1 /* $Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $ */
7 ForeRunnerHE ATM Adapter driver for ATM on Linux
8 Copyright (C) 1999-2001 Naval Research Laboratory
10 This library is free software; you can redistribute it and/or
11 modify it under the terms of the GNU Lesser General Public
12 License as published by the Free Software Foundation; either
13 version 2.1 of the License, or (at your option) any later version.
15 This library is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 Lesser General Public License for more details.
20 You should have received a copy of the GNU Lesser General Public
21 License along with this library; if not, write to the Free Software
22 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
30 ForeRunnerHE ATM Adapter driver for ATM on Linux
31 Copyright (C) 1999-2001 Naval Research Laboratory
33 Permission to use, copy, modify and distribute this software and its
34 documentation is hereby granted, provided that both the copyright
35 notice and this permission notice appear in all copies of the software,
36 derivative works or modified versions, and any portions thereof, and
37 that both notices appear in supporting documentation.
39 NRL ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION AND
40 DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
41 RESULTING FROM THE USE OF THIS SOFTWARE.
43 This driver was written using the "Programmer's Reference Manual for
44 ForeRunnerHE(tm)", MANU0361-01 - Rev. A, 08/21/98.
47 chas williams <chas@cmf.nrl.navy.mil>
48 eric kinzie <ekinzie@cmf.nrl.navy.mil>
51 4096 supported 'connections'
52 group 0 is used for all traffic
53 interrupt queue 0 is used for all interrupts
54 aal0 support (based on work from ulrich.u.muller@nokia.com)
58 #include <linux/config.h>
59 #include <linux/module.h>
60 #include <linux/version.h>
61 #include <linux/kernel.h>
62 #include <linux/skbuff.h>
63 #include <linux/pci.h>
64 #include <linux/errno.h>
65 #include <linux/types.h>
66 #include <linux/string.h>
67 #include <linux/delay.h>
68 #include <linux/init.h>
70 #include <linux/sched.h>
71 #include <linux/timer.h>
72 #include <linux/interrupt.h>
74 #include <asm/byteorder.h>
75 #include <asm/uaccess.h>
77 #include <linux/atmdev.h>
78 #include <linux/atm.h>
79 #include <linux/sonet.h>
82 #undef USE_SCATTERGATHER
83 #undef USE_CHECKSUM_HW /* still confused about this */
85 #undef USE_RBPS_POOL /* if memory is tight try this */
86 #undef USE_RBPL_POOL /* if memory is tight try this */
88 /* #undef CONFIG_ATM_HE_USE_SUNI */
92 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,69)
93 typedef void irqreturn_t;
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,9)
100 #define __devexit_p(func) func
103 #ifndef MODULE_LICENSE
104 #define MODULE_LICENSE(x)
107 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
108 #define pci_set_drvdata(pci_dev, data) (pci_dev)->driver_data = (data)
109 #define pci_get_drvdata(pci_dev) (pci_dev)->driver_data
116 #include <linux/atm_he.h>
118 #define hprintk(fmt,args...) printk(KERN_ERR DEV_LABEL "%d: " fmt, he_dev->number , ##args)
122 #define HPRINTK(fmt,args...) printk(KERN_DEBUG DEV_LABEL "%d: " fmt, he_dev->number , ##args)
124 #define HPRINTK(fmt,args...) do { } while (0)
128 /* version definition */
130 static char *version = "$Id: he.c,v 1.18 2003/05/06 22:57:15 chas Exp $";
134 static int he_open(struct atm_vcc *vcc);
135 static void he_close(struct atm_vcc *vcc);
136 static int he_send(struct atm_vcc *vcc, struct sk_buff *skb);
137 static int he_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
138 static irqreturn_t he_irq_handler(int irq, void *dev_id, struct pt_regs *regs);
139 static void he_tasklet(unsigned long data);
140 static int he_proc_read(struct atm_dev *dev,loff_t *pos,char *page);
141 static int he_start(struct atm_dev *dev);
142 static void he_stop(struct he_dev *dev);
143 static void he_phy_put(struct atm_dev *, unsigned char, unsigned long);
144 static unsigned char he_phy_get(struct atm_dev *, unsigned long);
146 static u8 read_prom_byte(struct he_dev *he_dev, int addr);
150 struct he_dev *he_devs = NULL;
151 static short disable64 = -1;
152 static short nvpibits = -1;
153 static short nvcibits = -1;
154 static short rx_skb_reserve = 16;
155 static short irq_coalesce = 1;
156 static short sdh = 0;
158 static struct atmdev_ops he_ops =
164 .phy_put = he_phy_put,
165 .phy_get = he_phy_get,
166 .proc_read = he_proc_read,
170 #define he_writel(dev, val, reg) do { writel(val, (dev)->membase + (reg)); wmb(); } while (0)
171 #define he_readl(dev, reg) readl((dev)->membase + (reg))
173 /* section 2.12 connection memory access */
175 static __inline__ void
176 he_writel_internal(struct he_dev *he_dev, unsigned val, unsigned addr,
179 he_writel(he_dev, val, CON_DAT);
180 (void) he_readl(he_dev, CON_DAT); /* flush posted writes */
181 he_writel(he_dev, flags | CON_CTL_WRITE | CON_CTL_ADDR(addr), CON_CTL);
182 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
185 #define he_writel_rcm(dev, val, reg) \
186 he_writel_internal(dev, val, reg, CON_CTL_RCM)
188 #define he_writel_tcm(dev, val, reg) \
189 he_writel_internal(dev, val, reg, CON_CTL_TCM)
191 #define he_writel_mbox(dev, val, reg) \
192 he_writel_internal(dev, val, reg, CON_CTL_MBOX)
195 he_readl_internal(struct he_dev *he_dev, unsigned addr, unsigned flags)
197 he_writel(he_dev, flags | CON_CTL_READ | CON_CTL_ADDR(addr), CON_CTL);
198 while (he_readl(he_dev, CON_CTL) & CON_CTL_BUSY);
199 return he_readl(he_dev, CON_DAT);
202 #define he_readl_rcm(dev, reg) \
203 he_readl_internal(dev, reg, CON_CTL_RCM)
205 #define he_readl_tcm(dev, reg) \
206 he_readl_internal(dev, reg, CON_CTL_TCM)
208 #define he_readl_mbox(dev, reg) \
209 he_readl_internal(dev, reg, CON_CTL_MBOX)
212 /* figure 2.2 connection id */
214 #define he_mkcid(dev, vpi, vci) (((vpi << (dev)->vcibits) | vci) & 0x1fff)
216 /* 2.5.1 per connection transmit state registers */
218 #define he_writel_tsr0(dev, val, cid) \
219 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 0)
220 #define he_readl_tsr0(dev, cid) \
221 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 0)
223 #define he_writel_tsr1(dev, val, cid) \
224 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 1)
226 #define he_writel_tsr2(dev, val, cid) \
227 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 2)
229 #define he_writel_tsr3(dev, val, cid) \
230 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 3)
232 #define he_writel_tsr4(dev, val, cid) \
233 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 4)
237 * NOTE While the transmit connection is active, bits 23 through 0
238 * of this register must not be written by the host. Byte
239 * enables should be used during normal operation when writing
240 * the most significant byte.
243 #define he_writel_tsr4_upper(dev, val, cid) \
244 he_writel_internal(dev, val, CONFIG_TSRA | (cid << 3) | 4, \
246 | CON_BYTE_DISABLE_2 \
247 | CON_BYTE_DISABLE_1 \
248 | CON_BYTE_DISABLE_0)
250 #define he_readl_tsr4(dev, cid) \
251 he_readl_tcm(dev, CONFIG_TSRA | (cid << 3) | 4)
253 #define he_writel_tsr5(dev, val, cid) \
254 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 5)
256 #define he_writel_tsr6(dev, val, cid) \
257 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 6)
259 #define he_writel_tsr7(dev, val, cid) \
260 he_writel_tcm(dev, val, CONFIG_TSRA | (cid << 3) | 7)
263 #define he_writel_tsr8(dev, val, cid) \
264 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 0)
266 #define he_writel_tsr9(dev, val, cid) \
267 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 1)
269 #define he_writel_tsr10(dev, val, cid) \
270 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 2)
272 #define he_writel_tsr11(dev, val, cid) \
273 he_writel_tcm(dev, val, CONFIG_TSRB | (cid << 2) | 3)
276 #define he_writel_tsr12(dev, val, cid) \
277 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 0)
279 #define he_writel_tsr13(dev, val, cid) \
280 he_writel_tcm(dev, val, CONFIG_TSRC | (cid << 1) | 1)
283 #define he_writel_tsr14(dev, val, cid) \
284 he_writel_tcm(dev, val, CONFIG_TSRD | cid)
286 #define he_writel_tsr14_upper(dev, val, cid) \
287 he_writel_internal(dev, val, CONFIG_TSRD | cid, \
289 | CON_BYTE_DISABLE_2 \
290 | CON_BYTE_DISABLE_1 \
291 | CON_BYTE_DISABLE_0)
293 /* 2.7.1 per connection receive state registers */
295 #define he_writel_rsr0(dev, val, cid) \
296 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 0)
297 #define he_readl_rsr0(dev, cid) \
298 he_readl_rcm(dev, 0x00000 | (cid << 3) | 0)
300 #define he_writel_rsr1(dev, val, cid) \
301 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 1)
303 #define he_writel_rsr2(dev, val, cid) \
304 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 2)
306 #define he_writel_rsr3(dev, val, cid) \
307 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 3)
309 #define he_writel_rsr4(dev, val, cid) \
310 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 4)
312 #define he_writel_rsr5(dev, val, cid) \
313 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 5)
315 #define he_writel_rsr6(dev, val, cid) \
316 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 6)
318 #define he_writel_rsr7(dev, val, cid) \
319 he_writel_rcm(dev, val, 0x00000 | (cid << 3) | 7)
321 static __inline__ struct atm_vcc*
322 __find_vcc(struct he_dev *he_dev, unsigned cid)
324 struct hlist_head *head;
326 struct hlist_node *node;
331 vpi = cid >> he_dev->vcibits;
332 vci = cid & ((1 << he_dev->vcibits) - 1);
333 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
335 sk_for_each(s, node, head) {
337 if (vcc->dev == he_dev->atm_dev &&
338 vcc->vci == vci && vcc->vpi == vpi &&
339 vcc->qos.rxtp.traffic_class != ATM_NONE) {
347 he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
349 struct atm_dev *atm_dev = NULL;
350 struct he_dev *he_dev = NULL;
353 printk(KERN_INFO "he: %s\n", version);
355 if (pci_enable_device(pci_dev))
357 if (pci_set_dma_mask(pci_dev, HE_DMA_MASK) != 0) {
358 printk(KERN_WARNING "he: no suitable dma available\n");
360 goto init_one_failure;
363 atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL);
366 goto init_one_failure;
368 pci_set_drvdata(pci_dev, atm_dev);
370 he_dev = (struct he_dev *) kmalloc(sizeof(struct he_dev),
374 goto init_one_failure;
376 memset(he_dev, 0, sizeof(struct he_dev));
378 he_dev->pci_dev = pci_dev;
379 he_dev->atm_dev = atm_dev;
380 he_dev->atm_dev->dev_data = he_dev;
381 atm_dev->dev_data = he_dev;
382 he_dev->number = atm_dev->number;
383 if (he_start(atm_dev)) {
386 goto init_one_failure;
390 he_dev->next = he_devs;
396 atm_dev_deregister(atm_dev);
399 pci_disable_device(pci_dev);
403 static void __devexit
404 he_remove_one (struct pci_dev *pci_dev)
406 struct atm_dev *atm_dev;
407 struct he_dev *he_dev;
409 atm_dev = pci_get_drvdata(pci_dev);
410 he_dev = HE_DEV(atm_dev);
412 /* need to remove from he_devs */
415 atm_dev_deregister(atm_dev);
418 pci_set_drvdata(pci_dev, NULL);
419 pci_disable_device(pci_dev);
424 rate_to_atmf(unsigned rate) /* cps to atm forum format */
426 #define NONZERO (1 << 14)
434 while (rate > 0x3ff) {
439 return (NONZERO | (exp << 9) | (rate & 0x1ff));
443 he_init_rx_lbfp0(struct he_dev *he_dev)
445 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
446 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
447 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
448 unsigned row_offset = he_dev->r0_startrow * he_dev->bytes_per_row;
451 lbm_offset = he_readl(he_dev, RCMLBM_BA);
453 he_writel(he_dev, lbufd_index, RLBF0_H);
455 for (i = 0, lbuf_count = 0; i < he_dev->r0_numbuffs; ++i) {
457 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
459 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
460 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
462 if (++lbuf_count == lbufs_per_row) {
464 row_offset += he_dev->bytes_per_row;
469 he_writel(he_dev, lbufd_index - 2, RLBF0_T);
470 he_writel(he_dev, he_dev->r0_numbuffs, RLBF0_C);
474 he_init_rx_lbfp1(struct he_dev *he_dev)
476 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
477 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
478 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
479 unsigned row_offset = he_dev->r1_startrow * he_dev->bytes_per_row;
482 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
484 he_writel(he_dev, lbufd_index, RLBF1_H);
486 for (i = 0, lbuf_count = 0; i < he_dev->r1_numbuffs; ++i) {
488 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
490 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
491 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
493 if (++lbuf_count == lbufs_per_row) {
495 row_offset += he_dev->bytes_per_row;
500 he_writel(he_dev, lbufd_index - 2, RLBF1_T);
501 he_writel(he_dev, he_dev->r1_numbuffs, RLBF1_C);
505 he_init_tx_lbfp(struct he_dev *he_dev)
507 unsigned i, lbm_offset, lbufd_index, lbuf_addr, lbuf_count;
508 unsigned lbufs_per_row = he_dev->cells_per_row / he_dev->cells_per_lbuf;
509 unsigned lbuf_bufsize = he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD;
510 unsigned row_offset = he_dev->tx_startrow * he_dev->bytes_per_row;
512 lbufd_index = he_dev->r0_numbuffs + he_dev->r1_numbuffs;
513 lbm_offset = he_readl(he_dev, RCMLBM_BA) + (2 * lbufd_index);
515 he_writel(he_dev, lbufd_index, TLBF_H);
517 for (i = 0, lbuf_count = 0; i < he_dev->tx_numbuffs; ++i) {
519 lbuf_addr = (row_offset + (lbuf_count * lbuf_bufsize)) / 32;
521 he_writel_rcm(he_dev, lbuf_addr, lbm_offset);
522 he_writel_rcm(he_dev, lbufd_index, lbm_offset + 1);
524 if (++lbuf_count == lbufs_per_row) {
526 row_offset += he_dev->bytes_per_row;
531 he_writel(he_dev, lbufd_index - 1, TLBF_T);
535 he_init_tpdrq(struct he_dev *he_dev)
537 he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
538 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
539 if (he_dev->tpdrq_base == NULL) {
540 hprintk("failed to alloc tpdrq\n");
543 memset(he_dev->tpdrq_base, 0,
544 CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
546 he_dev->tpdrq_tail = he_dev->tpdrq_base;
547 he_dev->tpdrq_head = he_dev->tpdrq_base;
549 he_writel(he_dev, he_dev->tpdrq_phys, TPDRQ_B_H);
550 he_writel(he_dev, 0, TPDRQ_T);
551 he_writel(he_dev, CONFIG_TPDRQ_SIZE - 1, TPDRQ_S);
557 he_init_cs_block(struct he_dev *he_dev)
559 unsigned clock, rate, delta;
562 /* 5.1.7 cs block initialization */
564 for (reg = 0; reg < 0x20; ++reg)
565 he_writel_mbox(he_dev, 0x0, CS_STTIM0 + reg);
567 /* rate grid timer reload values */
569 clock = he_is622(he_dev) ? 66667000 : 50000000;
570 rate = he_dev->atm_dev->link_rate;
571 delta = rate / 16 / 2;
573 for (reg = 0; reg < 0x10; ++reg) {
574 /* 2.4 internal transmit function
576 * we initialize the first row in the rate grid.
577 * values are period (in clock cycles) of timer
579 unsigned period = clock / rate;
581 he_writel_mbox(he_dev, period, CS_TGRLD0 + reg);
585 if (he_is622(he_dev)) {
586 /* table 5.2 (4 cells per lbuf) */
587 he_writel_mbox(he_dev, 0x000800fa, CS_ERTHR0);
588 he_writel_mbox(he_dev, 0x000c33cb, CS_ERTHR1);
589 he_writel_mbox(he_dev, 0x0010101b, CS_ERTHR2);
590 he_writel_mbox(he_dev, 0x00181dac, CS_ERTHR3);
591 he_writel_mbox(he_dev, 0x00280600, CS_ERTHR4);
593 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
594 he_writel_mbox(he_dev, 0x023de8b3, CS_ERCTL0);
595 he_writel_mbox(he_dev, 0x1801, CS_ERCTL1);
596 he_writel_mbox(he_dev, 0x68b3, CS_ERCTL2);
597 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
598 he_writel_mbox(he_dev, 0x68b3, CS_ERSTAT1);
599 he_writel_mbox(he_dev, 0x14585, CS_RTFWR);
601 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
604 he_writel_mbox(he_dev, 0x00159ece, CS_TFBSET);
605 he_writel_mbox(he_dev, 0x68b3, CS_WCRMAX);
606 he_writel_mbox(he_dev, 0x5eb3, CS_WCRMIN);
607 he_writel_mbox(he_dev, 0xe8b3, CS_WCRINC);
608 he_writel_mbox(he_dev, 0xdeb3, CS_WCRDEC);
609 he_writel_mbox(he_dev, 0x68b3, CS_WCRCEIL);
612 he_writel_mbox(he_dev, 0x5, CS_OTPPER);
613 he_writel_mbox(he_dev, 0x14, CS_OTWPER);
615 /* table 5.1 (4 cells per lbuf) */
616 he_writel_mbox(he_dev, 0x000400ea, CS_ERTHR0);
617 he_writel_mbox(he_dev, 0x00063388, CS_ERTHR1);
618 he_writel_mbox(he_dev, 0x00081018, CS_ERTHR2);
619 he_writel_mbox(he_dev, 0x000c1dac, CS_ERTHR3);
620 he_writel_mbox(he_dev, 0x0014051a, CS_ERTHR4);
622 /* table 5.3, 5.4, 5.5, 5.6, 5.7 */
623 he_writel_mbox(he_dev, 0x0235e4b1, CS_ERCTL0);
624 he_writel_mbox(he_dev, 0x4701, CS_ERCTL1);
625 he_writel_mbox(he_dev, 0x64b1, CS_ERCTL2);
626 he_writel_mbox(he_dev, 0x1280, CS_ERSTAT0);
627 he_writel_mbox(he_dev, 0x64b1, CS_ERSTAT1);
628 he_writel_mbox(he_dev, 0xf424, CS_RTFWR);
630 he_writel_mbox(he_dev, 0x4680, CS_RTATR);
633 he_writel_mbox(he_dev, 0x000563b7, CS_TFBSET);
634 he_writel_mbox(he_dev, 0x64b1, CS_WCRMAX);
635 he_writel_mbox(he_dev, 0x5ab1, CS_WCRMIN);
636 he_writel_mbox(he_dev, 0xe4b1, CS_WCRINC);
637 he_writel_mbox(he_dev, 0xdab1, CS_WCRDEC);
638 he_writel_mbox(he_dev, 0x64b1, CS_WCRCEIL);
641 he_writel_mbox(he_dev, 0x6, CS_OTPPER);
642 he_writel_mbox(he_dev, 0x1e, CS_OTWPER);
645 he_writel_mbox(he_dev, 0x8, CS_OTTLIM);
647 for (reg = 0; reg < 0x8; ++reg)
648 he_writel_mbox(he_dev, 0x0, CS_HGRRT0 + reg);
653 he_init_cs_block_rcm(struct he_dev *he_dev)
655 unsigned (*rategrid)[16][16];
656 unsigned rate, delta;
659 unsigned rate_atmf, exp, man;
660 unsigned long long rate_cps;
661 int mult, buf, buf_limit = 4;
663 rategrid = kmalloc( sizeof(unsigned) * 16 * 16, GFP_KERNEL);
667 /* initialize rate grid group table */
669 for (reg = 0x0; reg < 0xff; ++reg)
670 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
672 /* initialize rate controller groups */
674 for (reg = 0x100; reg < 0x1ff; ++reg)
675 he_writel_rcm(he_dev, 0x0, CONFIG_RCMABR + reg);
677 /* initialize tNrm lookup table */
679 /* the manual makes reference to a routine in a sample driver
680 for proper configuration; fortunately, we only need this
681 in order to support abr connection */
683 /* initialize rate to group table */
685 rate = he_dev->atm_dev->link_rate;
689 * 2.4 transmit internal functions
691 * we construct a copy of the rate grid used by the scheduler
692 * in order to construct the rate to group table below
695 for (j = 0; j < 16; j++) {
696 (*rategrid)[0][j] = rate;
700 for (i = 1; i < 16; i++)
701 for (j = 0; j < 16; j++)
703 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 4;
705 (*rategrid)[i][j] = (*rategrid)[i - 1][j] / 2;
708 * 2.4 transmit internal function
710 * this table maps the upper 5 bits of exponent and mantissa
711 * of the atm forum representation of the rate into an index
716 while (rate_atmf < 0x400) {
717 man = (rate_atmf & 0x1f) << 4;
718 exp = rate_atmf >> 5;
721 instead of '/ 512', use '>> 9' to prevent a call
722 to divdu3 on x86 platforms
724 rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
727 rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
729 for (i = 255; i > 0; i--)
730 if ((*rategrid)[i/16][i%16] >= rate_cps)
731 break; /* pick nearest rate instead? */
734 * each table entry is 16 bits: (rate grid index (8 bits)
735 * and a buffer limit (8 bits)
736 * there are two table entries in each 32-bit register
740 buf = rate_cps * he_dev->tx_numbuffs /
741 (he_dev->atm_dev->link_rate * 2);
743 /* this is pretty, but avoids _divdu3 and is mostly correct */
744 mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
745 if (rate_cps > (272 * mult))
747 else if (rate_cps > (204 * mult))
749 else if (rate_cps > (136 * mult))
751 else if (rate_cps > (68 * mult))
758 reg = (reg << 16) | ((i << 8) | buf);
760 #define RTGTBL_OFFSET 0x400
763 he_writel_rcm(he_dev, reg,
764 CONFIG_RCMABR + RTGTBL_OFFSET + (rate_atmf >> 1));
774 he_init_group(struct he_dev *he_dev, int group)
779 /* small buffer pool */
781 he_dev->rbps_pool = pci_pool_create("rbps", he_dev->pci_dev,
782 CONFIG_RBPS_BUFSIZE, 8, 0);
783 if (he_dev->rbps_pool == NULL) {
784 hprintk("unable to create rbps pages\n");
787 #else /* !USE_RBPS_POOL */
788 he_dev->rbps_pages = pci_alloc_consistent(he_dev->pci_dev,
789 CONFIG_RBPS_SIZE * CONFIG_RBPS_BUFSIZE, &he_dev->rbps_pages_phys);
790 if (he_dev->rbps_pages == NULL) {
791 hprintk("unable to create rbps page pool\n");
794 #endif /* USE_RBPS_POOL */
796 he_dev->rbps_base = pci_alloc_consistent(he_dev->pci_dev,
797 CONFIG_RBPS_SIZE * sizeof(struct he_rbp), &he_dev->rbps_phys);
798 if (he_dev->rbps_base == NULL) {
799 hprintk("failed to alloc rbps\n");
802 memset(he_dev->rbps_base, 0, CONFIG_RBPS_SIZE * sizeof(struct he_rbp));
803 he_dev->rbps_virt = kmalloc(CONFIG_RBPS_SIZE * sizeof(struct he_virt), GFP_KERNEL);
805 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
806 dma_addr_t dma_handle;
810 cpuaddr = pci_pool_alloc(he_dev->rbps_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
814 cpuaddr = he_dev->rbps_pages + (i * CONFIG_RBPS_BUFSIZE);
815 dma_handle = he_dev->rbps_pages_phys + (i * CONFIG_RBPS_BUFSIZE);
818 he_dev->rbps_virt[i].virt = cpuaddr;
819 he_dev->rbps_base[i].status = RBP_LOANED | RBP_SMALLBUF | (i << RBP_INDEX_OFF);
820 he_dev->rbps_base[i].phys = dma_handle;
823 he_dev->rbps_tail = &he_dev->rbps_base[CONFIG_RBPS_SIZE - 1];
825 he_writel(he_dev, he_dev->rbps_phys, G0_RBPS_S + (group * 32));
826 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail),
827 G0_RBPS_T + (group * 32));
828 he_writel(he_dev, CONFIG_RBPS_BUFSIZE/4,
829 G0_RBPS_BS + (group * 32));
831 RBP_THRESH(CONFIG_RBPS_THRESH) |
832 RBP_QSIZE(CONFIG_RBPS_SIZE - 1) |
834 G0_RBPS_QI + (group * 32));
835 #else /* !USE_RBPS */
836 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
837 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
838 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
839 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
840 G0_RBPS_BS + (group * 32));
841 #endif /* USE_RBPS */
843 /* large buffer pool */
845 he_dev->rbpl_pool = pci_pool_create("rbpl", he_dev->pci_dev,
846 CONFIG_RBPL_BUFSIZE, 8, 0);
847 if (he_dev->rbpl_pool == NULL) {
848 hprintk("unable to create rbpl pool\n");
851 #else /* !USE_RBPL_POOL */
852 he_dev->rbpl_pages = (void *) pci_alloc_consistent(he_dev->pci_dev,
853 CONFIG_RBPL_SIZE * CONFIG_RBPL_BUFSIZE, &he_dev->rbpl_pages_phys);
854 if (he_dev->rbpl_pages == NULL) {
855 hprintk("unable to create rbpl pages\n");
858 #endif /* USE_RBPL_POOL */
860 he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
861 CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
862 if (he_dev->rbpl_base == NULL) {
863 hprintk("failed to alloc rbpl\n");
866 memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
867 he_dev->rbpl_virt = kmalloc(CONFIG_RBPL_SIZE * sizeof(struct he_virt), GFP_KERNEL);
869 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
870 dma_addr_t dma_handle;
874 cpuaddr = pci_pool_alloc(he_dev->rbpl_pool, SLAB_KERNEL|SLAB_DMA, &dma_handle);
878 cpuaddr = he_dev->rbpl_pages + (i * CONFIG_RBPL_BUFSIZE);
879 dma_handle = he_dev->rbpl_pages_phys + (i * CONFIG_RBPL_BUFSIZE);
882 he_dev->rbpl_virt[i].virt = cpuaddr;
883 he_dev->rbpl_base[i].status = RBP_LOANED | (i << RBP_INDEX_OFF);
884 he_dev->rbpl_base[i].phys = dma_handle;
886 he_dev->rbpl_tail = &he_dev->rbpl_base[CONFIG_RBPL_SIZE - 1];
888 he_writel(he_dev, he_dev->rbpl_phys, G0_RBPL_S + (group * 32));
889 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail),
890 G0_RBPL_T + (group * 32));
891 he_writel(he_dev, CONFIG_RBPL_BUFSIZE/4,
892 G0_RBPL_BS + (group * 32));
894 RBP_THRESH(CONFIG_RBPL_THRESH) |
895 RBP_QSIZE(CONFIG_RBPL_SIZE - 1) |
897 G0_RBPL_QI + (group * 32));
899 /* rx buffer ready queue */
901 he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
902 CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
903 if (he_dev->rbrq_base == NULL) {
904 hprintk("failed to allocate rbrq\n");
907 memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
909 he_dev->rbrq_head = he_dev->rbrq_base;
910 he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
911 he_writel(he_dev, 0, G0_RBRQ_H + (group * 16));
913 RBRQ_THRESH(CONFIG_RBRQ_THRESH) | RBRQ_SIZE(CONFIG_RBRQ_SIZE - 1),
914 G0_RBRQ_Q + (group * 16));
916 hprintk("coalescing interrupts\n");
917 he_writel(he_dev, RBRQ_TIME(768) | RBRQ_COUNT(7),
918 G0_RBRQ_I + (group * 16));
920 he_writel(he_dev, RBRQ_TIME(0) | RBRQ_COUNT(1),
921 G0_RBRQ_I + (group * 16));
923 /* tx buffer ready queue */
925 he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
926 CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
927 if (he_dev->tbrq_base == NULL) {
928 hprintk("failed to allocate tbrq\n");
931 memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
933 he_dev->tbrq_head = he_dev->tbrq_base;
935 he_writel(he_dev, he_dev->tbrq_phys, G0_TBRQ_B_T + (group * 16));
936 he_writel(he_dev, 0, G0_TBRQ_H + (group * 16));
937 he_writel(he_dev, CONFIG_TBRQ_SIZE - 1, G0_TBRQ_S + (group * 16));
938 he_writel(he_dev, CONFIG_TBRQ_THRESH, G0_TBRQ_THRESH + (group * 16));
944 he_init_irq(struct he_dev *he_dev)
948 /* 2.9.3.5 tail offset for each interrupt queue is located after the
949 end of the interrupt queue */
951 he_dev->irq_base = pci_alloc_consistent(he_dev->pci_dev,
952 (CONFIG_IRQ_SIZE+1) * sizeof(struct he_irq), &he_dev->irq_phys);
953 if (he_dev->irq_base == NULL) {
954 hprintk("failed to allocate irq\n");
957 he_dev->irq_tailoffset = (unsigned *)
958 &he_dev->irq_base[CONFIG_IRQ_SIZE];
959 *he_dev->irq_tailoffset = 0;
960 he_dev->irq_head = he_dev->irq_base;
961 he_dev->irq_tail = he_dev->irq_base;
963 for (i = 0; i < CONFIG_IRQ_SIZE; ++i)
964 he_dev->irq_base[i].isw = ITYPE_INVALID;
966 he_writel(he_dev, he_dev->irq_phys, IRQ0_BASE);
968 IRQ_SIZE(CONFIG_IRQ_SIZE) | IRQ_THRESH(CONFIG_IRQ_THRESH),
970 he_writel(he_dev, IRQ_INT_A | IRQ_TYPE_LINE, IRQ0_CNTL);
971 he_writel(he_dev, 0x0, IRQ0_DATA);
973 he_writel(he_dev, 0x0, IRQ1_BASE);
974 he_writel(he_dev, 0x0, IRQ1_HEAD);
975 he_writel(he_dev, 0x0, IRQ1_CNTL);
976 he_writel(he_dev, 0x0, IRQ1_DATA);
978 he_writel(he_dev, 0x0, IRQ2_BASE);
979 he_writel(he_dev, 0x0, IRQ2_HEAD);
980 he_writel(he_dev, 0x0, IRQ2_CNTL);
981 he_writel(he_dev, 0x0, IRQ2_DATA);
983 he_writel(he_dev, 0x0, IRQ3_BASE);
984 he_writel(he_dev, 0x0, IRQ3_HEAD);
985 he_writel(he_dev, 0x0, IRQ3_CNTL);
986 he_writel(he_dev, 0x0, IRQ3_DATA);
988 /* 2.9.3.2 interrupt queue mapping registers */
990 he_writel(he_dev, 0x0, GRP_10_MAP);
991 he_writel(he_dev, 0x0, GRP_32_MAP);
992 he_writel(he_dev, 0x0, GRP_54_MAP);
993 he_writel(he_dev, 0x0, GRP_76_MAP);
995 if (request_irq(he_dev->pci_dev->irq, he_irq_handler, SA_INTERRUPT|SA_SHIRQ, DEV_LABEL, he_dev)) {
996 hprintk("irq %d already in use\n", he_dev->pci_dev->irq);
1000 he_dev->irq = he_dev->pci_dev->irq;
1006 he_start(struct atm_dev *dev)
1008 struct he_dev *he_dev;
1009 struct pci_dev *pci_dev;
1010 unsigned long membase;
1013 u32 gen_cntl_0, host_cntl, lb_swap;
1014 u8 cache_size, timer;
1017 unsigned int status, reg;
1020 he_dev = HE_DEV(dev);
1021 pci_dev = he_dev->pci_dev;
1023 membase = pci_resource_start(pci_dev, 0);
1024 HPRINTK("membase = 0x%lx irq = %d.\n", membase, pci_dev->irq);
1027 * pci bus controller initialization
1030 /* 4.3 pci bus controller-specific initialization */
1031 if (pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0) != 0) {
1032 hprintk("can't read GEN_CNTL_0\n");
1035 gen_cntl_0 |= (MRL_ENB | MRM_ENB | IGNORE_TIMEOUT);
1036 if (pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0) != 0) {
1037 hprintk("can't write GEN_CNTL_0.\n");
1041 if (pci_read_config_word(pci_dev, PCI_COMMAND, &command) != 0) {
1042 hprintk("can't read PCI_COMMAND.\n");
1046 command |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE);
1047 if (pci_write_config_word(pci_dev, PCI_COMMAND, command) != 0) {
1048 hprintk("can't enable memory.\n");
1052 if (pci_read_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, &cache_size)) {
1053 hprintk("can't read cache line size?\n");
1057 if (cache_size < 16) {
1059 if (pci_write_config_byte(pci_dev, PCI_CACHE_LINE_SIZE, cache_size))
1060 hprintk("can't set cache line size to %d\n", cache_size);
1063 if (pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &timer)) {
1064 hprintk("can't read latency timer?\n");
1070 * LAT_TIMER = 1 + AVG_LAT + BURST_SIZE/BUS_SIZE
1072 * AVG_LAT: The average first data read/write latency [maximum 16 clock cycles]
1073 * BURST_SIZE: 1536 bytes (read) for 622, 768 bytes (read) for 155 [192 clock cycles]
1076 #define LAT_TIMER 209
1077 if (timer < LAT_TIMER) {
1078 HPRINTK("latency timer was %d, setting to %d\n", timer, LAT_TIMER);
1080 if (pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, timer))
1081 hprintk("can't set latency timer to %d\n", timer);
1084 if (!(he_dev->membase = ioremap(membase, HE_REGMAP_SIZE))) {
1085 hprintk("can't set up page mapping\n");
1089 /* 4.4 card reset */
1090 he_writel(he_dev, 0x0, RESET_CNTL);
1091 he_writel(he_dev, 0xff, RESET_CNTL);
1093 udelay(16*1000); /* 16 ms */
1094 status = he_readl(he_dev, RESET_CNTL);
1095 if ((status & BOARD_RST_STATUS) == 0) {
1096 hprintk("reset failed\n");
1100 /* 4.5 set bus width */
1101 host_cntl = he_readl(he_dev, HOST_CNTL);
1102 if (host_cntl & PCI_BUS_SIZE64)
1103 gen_cntl_0 |= ENBL_64;
1105 gen_cntl_0 &= ~ENBL_64;
1107 if (disable64 == 1) {
1108 hprintk("disabling 64-bit pci bus transfers\n");
1109 gen_cntl_0 &= ~ENBL_64;
1112 if (gen_cntl_0 & ENBL_64)
1113 hprintk("64-bit transfers enabled\n");
1115 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1117 /* 4.7 read prom contents */
1118 for (i = 0; i < PROD_ID_LEN; ++i)
1119 he_dev->prod_id[i] = read_prom_byte(he_dev, PROD_ID + i);
1121 he_dev->media = read_prom_byte(he_dev, MEDIA);
1123 for (i = 0; i < 6; ++i)
1124 dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
1126 hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
1128 he_dev->media & 0x40 ? "SM" : "MM",
1135 he_dev->atm_dev->link_rate = he_is622(he_dev) ?
1136 ATM_OC12_PCR : ATM_OC3_PCR;
1138 /* 4.6 set host endianess */
1139 lb_swap = he_readl(he_dev, LB_SWAP);
1140 if (he_is622(he_dev))
1141 lb_swap &= ~XFER_SIZE; /* 4 cells */
1143 lb_swap |= XFER_SIZE; /* 8 cells */
1145 lb_swap |= DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST;
1147 lb_swap &= ~(DESC_WR_SWAP | INTR_SWAP | BIG_ENDIAN_HOST |
1148 DATA_WR_SWAP | DATA_RD_SWAP | DESC_RD_SWAP);
1149 #endif /* __BIG_ENDIAN */
1150 he_writel(he_dev, lb_swap, LB_SWAP);
1152 /* 4.8 sdram controller initialization */
1153 he_writel(he_dev, he_is622(he_dev) ? LB_64_ENB : 0x0, SDRAM_CTL);
1155 /* 4.9 initialize rnum value */
1156 lb_swap |= SWAP_RNUM_MAX(0xf);
1157 he_writel(he_dev, lb_swap, LB_SWAP);
1159 /* 4.10 initialize the interrupt queues */
1160 if ((err = he_init_irq(he_dev)) != 0)
1164 tasklet_init(&he_dev->tasklet, he_tasklet, (unsigned long) he_dev);
1166 spin_lock_init(&he_dev->global_lock);
1168 /* 4.11 enable pci bus controller state machines */
1169 host_cntl |= (OUTFF_ENB | CMDFF_ENB |
1170 QUICK_RD_RETRY | QUICK_WR_RETRY | PERR_INT_ENB);
1171 he_writel(he_dev, host_cntl, HOST_CNTL);
1173 gen_cntl_0 |= INT_PROC_ENBL|INIT_ENB;
1174 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1177 * atm network controller initialization
1180 /* 5.1.1 generic configuration state */
1183 * local (cell) buffer memory map
1187 * 0 ____________1023 bytes 0 _______________________2047 bytes
1189 * | utility | | rx0 | |
1190 * 5|____________| 255|___________________| u |
1193 * | rx0 | row | tx | l |
1195 * | | 767|___________________| t |
1196 * 517|____________| 768| | y |
1197 * row 518| | | rx1 | |
1198 * | | 1023|___________________|___|
1203 * 1535|____________|
1206 * 2047|____________|
1210 /* total 4096 connections */
1211 he_dev->vcibits = CONFIG_DEFAULT_VCIBITS;
1212 he_dev->vpibits = CONFIG_DEFAULT_VPIBITS;
1214 if (nvpibits != -1 && nvcibits != -1 && nvpibits+nvcibits != HE_MAXCIDBITS) {
1215 hprintk("nvpibits + nvcibits != %d\n", HE_MAXCIDBITS);
1219 if (nvpibits != -1) {
1220 he_dev->vpibits = nvpibits;
1221 he_dev->vcibits = HE_MAXCIDBITS - nvpibits;
1224 if (nvcibits != -1) {
1225 he_dev->vcibits = nvcibits;
1226 he_dev->vpibits = HE_MAXCIDBITS - nvcibits;
1230 if (he_is622(he_dev)) {
1231 he_dev->cells_per_row = 40;
1232 he_dev->bytes_per_row = 2048;
1233 he_dev->r0_numrows = 256;
1234 he_dev->tx_numrows = 512;
1235 he_dev->r1_numrows = 256;
1236 he_dev->r0_startrow = 0;
1237 he_dev->tx_startrow = 256;
1238 he_dev->r1_startrow = 768;
1240 he_dev->cells_per_row = 20;
1241 he_dev->bytes_per_row = 1024;
1242 he_dev->r0_numrows = 512;
1243 he_dev->tx_numrows = 1018;
1244 he_dev->r1_numrows = 512;
1245 he_dev->r0_startrow = 6;
1246 he_dev->tx_startrow = 518;
1247 he_dev->r1_startrow = 1536;
1250 he_dev->cells_per_lbuf = 4;
1251 he_dev->buffer_limit = 4;
1252 he_dev->r0_numbuffs = he_dev->r0_numrows *
1253 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1254 if (he_dev->r0_numbuffs > 2560)
1255 he_dev->r0_numbuffs = 2560;
1257 he_dev->r1_numbuffs = he_dev->r1_numrows *
1258 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1259 if (he_dev->r1_numbuffs > 2560)
1260 he_dev->r1_numbuffs = 2560;
1262 he_dev->tx_numbuffs = he_dev->tx_numrows *
1263 he_dev->cells_per_row / he_dev->cells_per_lbuf;
1264 if (he_dev->tx_numbuffs > 5120)
1265 he_dev->tx_numbuffs = 5120;
1267 /* 5.1.2 configure hardware dependent registers */
1270 SLICE_X(0x2) | ARB_RNUM_MAX(0xf) | TH_PRTY(0x3) |
1271 RH_PRTY(0x3) | TL_PRTY(0x2) | RL_PRTY(0x1) |
1272 (he_is622(he_dev) ? BUS_MULTI(0x28) : BUS_MULTI(0x46)) |
1273 (he_is622(he_dev) ? NET_PREF(0x50) : NET_PREF(0x8c)),
1276 he_writel(he_dev, BANK_ON |
1277 (he_is622(he_dev) ? (REF_RATE(0x384) | WIDE_DATA) : REF_RATE(0x150)),
1281 (he_is622(he_dev) ? RM_BANK_WAIT(1) : RM_BANK_WAIT(0)) |
1282 RM_RW_WAIT(1), RCMCONFIG);
1284 (he_is622(he_dev) ? TM_BANK_WAIT(2) : TM_BANK_WAIT(1)) |
1285 TM_RW_WAIT(1), TCMCONFIG);
1287 he_writel(he_dev, he_dev->cells_per_lbuf * ATM_CELL_PAYLOAD, LB_CONFIG);
1290 (he_is622(he_dev) ? UT_RD_DELAY(8) : UT_RD_DELAY(0)) |
1291 (he_is622(he_dev) ? RC_UT_MODE(0) : RC_UT_MODE(1)) |
1292 RX_VALVP(he_dev->vpibits) |
1293 RX_VALVC(he_dev->vcibits), RC_CONFIG);
1295 he_writel(he_dev, DRF_THRESH(0x20) |
1296 (he_is622(he_dev) ? TX_UT_MODE(0) : TX_UT_MODE(1)) |
1297 TX_VCI_MASK(he_dev->vcibits) |
1298 LBFREE_CNT(he_dev->tx_numbuffs), TX_CONFIG);
1300 he_writel(he_dev, 0x0, TXAAL5_PROTO);
1302 he_writel(he_dev, PHY_INT_ENB |
1303 (he_is622(he_dev) ? PTMR_PRE(67 - 1) : PTMR_PRE(50 - 1)),
1306 /* 5.1.3 initialize connection memory */
1308 for (i = 0; i < TCM_MEM_SIZE; ++i)
1309 he_writel_tcm(he_dev, 0, i);
1311 for (i = 0; i < RCM_MEM_SIZE; ++i)
1312 he_writel_rcm(he_dev, 0, i);
1315 * transmit connection memory map
1318 * 0x0 ___________________
1324 * 0x8000|___________________|
1327 * 0xc000|___________________|
1330 * 0xe000|___________________|
1332 * 0xf000|___________________|
1334 * 0x10000|___________________|
1337 * |___________________|
1340 * 0x1ffff|___________________|
1345 he_writel(he_dev, CONFIG_TSRB, TSRB_BA);
1346 he_writel(he_dev, CONFIG_TSRC, TSRC_BA);
1347 he_writel(he_dev, CONFIG_TSRD, TSRD_BA);
1348 he_writel(he_dev, CONFIG_TMABR, TMABR_BA);
1349 he_writel(he_dev, CONFIG_TPDBA, TPD_BA);
1353 * receive connection memory map
1355 * 0x0 ___________________
1361 * 0x8000|___________________|
1364 * | LBM | link lists of local
1365 * | tx | buffer memory
1367 * 0xd000|___________________|
1370 * 0xe000|___________________|
1373 * |___________________|
1376 * 0xffff|___________________|
1379 he_writel(he_dev, 0x08000, RCMLBM_BA);
1380 he_writel(he_dev, 0x0e000, RCMRSRB_BA);
1381 he_writel(he_dev, 0x0d800, RCMABR_BA);
1383 /* 5.1.4 initialize local buffer free pools linked lists */
1385 he_init_rx_lbfp0(he_dev);
1386 he_init_rx_lbfp1(he_dev);
1388 he_writel(he_dev, 0x0, RLBC_H);
1389 he_writel(he_dev, 0x0, RLBC_T);
1390 he_writel(he_dev, 0x0, RLBC_H2);
1392 he_writel(he_dev, 512, RXTHRSH); /* 10% of r0+r1 buffers */
1393 he_writel(he_dev, 256, LITHRSH); /* 5% of r0+r1 buffers */
1395 he_init_tx_lbfp(he_dev);
1397 he_writel(he_dev, he_is622(he_dev) ? 0x104780 : 0x800, UBUFF_BA);
1399 /* 5.1.5 initialize intermediate receive queues */
1401 if (he_is622(he_dev)) {
1402 he_writel(he_dev, 0x000f, G0_INMQ_S);
1403 he_writel(he_dev, 0x200f, G0_INMQ_L);
1405 he_writel(he_dev, 0x001f, G1_INMQ_S);
1406 he_writel(he_dev, 0x201f, G1_INMQ_L);
1408 he_writel(he_dev, 0x002f, G2_INMQ_S);
1409 he_writel(he_dev, 0x202f, G2_INMQ_L);
1411 he_writel(he_dev, 0x003f, G3_INMQ_S);
1412 he_writel(he_dev, 0x203f, G3_INMQ_L);
1414 he_writel(he_dev, 0x004f, G4_INMQ_S);
1415 he_writel(he_dev, 0x204f, G4_INMQ_L);
1417 he_writel(he_dev, 0x005f, G5_INMQ_S);
1418 he_writel(he_dev, 0x205f, G5_INMQ_L);
1420 he_writel(he_dev, 0x006f, G6_INMQ_S);
1421 he_writel(he_dev, 0x206f, G6_INMQ_L);
1423 he_writel(he_dev, 0x007f, G7_INMQ_S);
1424 he_writel(he_dev, 0x207f, G7_INMQ_L);
1426 he_writel(he_dev, 0x0000, G0_INMQ_S);
1427 he_writel(he_dev, 0x0008, G0_INMQ_L);
1429 he_writel(he_dev, 0x0001, G1_INMQ_S);
1430 he_writel(he_dev, 0x0009, G1_INMQ_L);
1432 he_writel(he_dev, 0x0002, G2_INMQ_S);
1433 he_writel(he_dev, 0x000a, G2_INMQ_L);
1435 he_writel(he_dev, 0x0003, G3_INMQ_S);
1436 he_writel(he_dev, 0x000b, G3_INMQ_L);
1438 he_writel(he_dev, 0x0004, G4_INMQ_S);
1439 he_writel(he_dev, 0x000c, G4_INMQ_L);
1441 he_writel(he_dev, 0x0005, G5_INMQ_S);
1442 he_writel(he_dev, 0x000d, G5_INMQ_L);
1444 he_writel(he_dev, 0x0006, G6_INMQ_S);
1445 he_writel(he_dev, 0x000e, G6_INMQ_L);
1447 he_writel(he_dev, 0x0007, G7_INMQ_S);
1448 he_writel(he_dev, 0x000f, G7_INMQ_L);
1451 /* 5.1.6 application tunable parameters */
1453 he_writel(he_dev, 0x0, MCC);
1454 he_writel(he_dev, 0x0, OEC);
1455 he_writel(he_dev, 0x0, DCC);
1456 he_writel(he_dev, 0x0, CEC);
1458 /* 5.1.7 cs block initialization */
1460 he_init_cs_block(he_dev);
1462 /* 5.1.8 cs block connection memory initialization */
1464 if (he_init_cs_block_rcm(he_dev) < 0)
1467 /* 5.1.10 initialize host structures */
1469 he_init_tpdrq(he_dev);
1472 he_dev->tpd_pool = pci_pool_create("tpd", he_dev->pci_dev,
1473 sizeof(struct he_tpd), TPD_ALIGNMENT, 0);
1474 if (he_dev->tpd_pool == NULL) {
1475 hprintk("unable to create tpd pci_pool\n");
1479 INIT_LIST_HEAD(&he_dev->outstanding_tpds);
1481 he_dev->tpd_base = (void *) pci_alloc_consistent(he_dev->pci_dev,
1482 CONFIG_NUMTPDS * sizeof(struct he_tpd), &he_dev->tpd_base_phys);
1483 if (!he_dev->tpd_base)
1486 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1487 he_dev->tpd_base[i].status = (i << TPD_ADDR_SHIFT);
1488 he_dev->tpd_base[i].inuse = 0;
1491 he_dev->tpd_head = he_dev->tpd_base;
1492 he_dev->tpd_end = &he_dev->tpd_base[CONFIG_NUMTPDS - 1];
1495 if (he_init_group(he_dev, 0) != 0)
1498 for (group = 1; group < HE_NUM_GROUPS; ++group) {
1499 he_writel(he_dev, 0x0, G0_RBPS_S + (group * 32));
1500 he_writel(he_dev, 0x0, G0_RBPS_T + (group * 32));
1501 he_writel(he_dev, 0x0, G0_RBPS_QI + (group * 32));
1502 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1503 G0_RBPS_BS + (group * 32));
1505 he_writel(he_dev, 0x0, G0_RBPL_S + (group * 32));
1506 he_writel(he_dev, 0x0, G0_RBPL_T + (group * 32));
1507 he_writel(he_dev, RBP_THRESH(0x1) | RBP_QSIZE(0x0),
1508 G0_RBPL_QI + (group * 32));
1509 he_writel(he_dev, 0x0, G0_RBPL_BS + (group * 32));
1511 he_writel(he_dev, 0x0, G0_RBRQ_ST + (group * 16));
1512 he_writel(he_dev, 0x0, G0_RBRQ_H + (group * 16));
1513 he_writel(he_dev, RBRQ_THRESH(0x1) | RBRQ_SIZE(0x0),
1514 G0_RBRQ_Q + (group * 16));
1515 he_writel(he_dev, 0x0, G0_RBRQ_I + (group * 16));
1517 he_writel(he_dev, 0x0, G0_TBRQ_B_T + (group * 16));
1518 he_writel(he_dev, 0x0, G0_TBRQ_H + (group * 16));
1519 he_writel(he_dev, TBRQ_THRESH(0x1),
1520 G0_TBRQ_THRESH + (group * 16));
1521 he_writel(he_dev, 0x0, G0_TBRQ_S + (group * 16));
1524 /* host status page */
1526 he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
1527 sizeof(struct he_hsp), &he_dev->hsp_phys);
1528 if (he_dev->hsp == NULL) {
1529 hprintk("failed to allocate host status page\n");
1532 memset(he_dev->hsp, 0, sizeof(struct he_hsp));
1533 he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
1535 /* initialize framer */
1537 #ifdef CONFIG_ATM_HE_USE_SUNI
1538 suni_init(he_dev->atm_dev);
1539 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->start)
1540 he_dev->atm_dev->phy->start(he_dev->atm_dev);
1541 #endif /* CONFIG_ATM_HE_USE_SUNI */
1544 /* this really should be in suni.c but for now... */
1547 val = he_phy_get(he_dev->atm_dev, SUNI_TPOP_APM);
1548 val = (val & ~SUNI_TPOP_APM_S) | (SUNI_TPOP_S_SDH << SUNI_TPOP_APM_S_SHIFT);
1549 he_phy_put(he_dev->atm_dev, val, SUNI_TPOP_APM);
1552 /* 5.1.12 enable transmit and receive */
1554 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1555 reg |= TX_ENABLE|ER_ENABLE;
1556 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1558 reg = he_readl(he_dev, RC_CONFIG);
1560 he_writel(he_dev, reg, RC_CONFIG);
1562 for (i = 0; i < HE_NUM_CS_STPER; ++i) {
1563 he_dev->cs_stper[i].inuse = 0;
1564 he_dev->cs_stper[i].pcr = -1;
1566 he_dev->total_bw = 0;
1569 /* atm linux initialization */
1571 he_dev->atm_dev->ci_range.vpi_bits = he_dev->vpibits;
1572 he_dev->atm_dev->ci_range.vci_bits = he_dev->vcibits;
1574 he_dev->irq_peak = 0;
1575 he_dev->rbrq_peak = 0;
1576 he_dev->rbpl_peak = 0;
1577 he_dev->tbrq_peak = 0;
1579 HPRINTK("hell bent for leather!\n");
1585 he_stop(struct he_dev *he_dev)
1588 u32 gen_cntl_0, reg;
1589 struct pci_dev *pci_dev;
1591 pci_dev = he_dev->pci_dev;
1593 /* disable interrupts */
1595 if (he_dev->membase) {
1596 pci_read_config_dword(pci_dev, GEN_CNTL_0, &gen_cntl_0);
1597 gen_cntl_0 &= ~(INT_PROC_ENBL | INIT_ENB);
1598 pci_write_config_dword(pci_dev, GEN_CNTL_0, gen_cntl_0);
1601 tasklet_disable(&he_dev->tasklet);
1604 /* disable recv and transmit */
1606 reg = he_readl_mbox(he_dev, CS_ERCTL0);
1607 reg &= ~(TX_ENABLE|ER_ENABLE);
1608 he_writel_mbox(he_dev, reg, CS_ERCTL0);
1610 reg = he_readl(he_dev, RC_CONFIG);
1611 reg &= ~(RX_ENABLE);
1612 he_writel(he_dev, reg, RC_CONFIG);
1615 #ifdef CONFIG_ATM_HE_USE_SUNI
1616 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->stop)
1617 he_dev->atm_dev->phy->stop(he_dev->atm_dev);
1618 #endif /* CONFIG_ATM_HE_USE_SUNI */
1621 free_irq(he_dev->irq, he_dev);
1623 if (he_dev->irq_base)
1624 pci_free_consistent(he_dev->pci_dev, (CONFIG_IRQ_SIZE+1)
1625 * sizeof(struct he_irq), he_dev->irq_base, he_dev->irq_phys);
1628 pci_free_consistent(he_dev->pci_dev, sizeof(struct he_hsp),
1629 he_dev->hsp, he_dev->hsp_phys);
1631 if (he_dev->rbpl_base) {
1632 #ifdef USE_RBPL_POOL
1633 for (i = 0; i < CONFIG_RBPL_SIZE; ++i) {
1634 void *cpuaddr = he_dev->rbpl_virt[i].virt;
1635 dma_addr_t dma_handle = he_dev->rbpl_base[i].phys;
1637 pci_pool_free(he_dev->rbpl_pool, cpuaddr, dma_handle);
1640 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1641 * CONFIG_RBPL_BUFSIZE, he_dev->rbpl_pages, he_dev->rbpl_pages_phys);
1643 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPL_SIZE
1644 * sizeof(struct he_rbp), he_dev->rbpl_base, he_dev->rbpl_phys);
1647 #ifdef USE_RBPL_POOL
1648 if (he_dev->rbpl_pool)
1649 pci_pool_destroy(he_dev->rbpl_pool);
1653 if (he_dev->rbps_base) {
1654 #ifdef USE_RBPS_POOL
1655 for (i = 0; i < CONFIG_RBPS_SIZE; ++i) {
1656 void *cpuaddr = he_dev->rbps_virt[i].virt;
1657 dma_addr_t dma_handle = he_dev->rbps_base[i].phys;
1659 pci_pool_free(he_dev->rbps_pool, cpuaddr, dma_handle);
1662 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1663 * CONFIG_RBPS_BUFSIZE, he_dev->rbps_pages, he_dev->rbps_pages_phys);
1665 pci_free_consistent(he_dev->pci_dev, CONFIG_RBPS_SIZE
1666 * sizeof(struct he_rbp), he_dev->rbps_base, he_dev->rbps_phys);
1669 #ifdef USE_RBPS_POOL
1670 if (he_dev->rbps_pool)
1671 pci_pool_destroy(he_dev->rbps_pool);
1674 #endif /* USE_RBPS */
1676 if (he_dev->rbrq_base)
1677 pci_free_consistent(he_dev->pci_dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
1678 he_dev->rbrq_base, he_dev->rbrq_phys);
1680 if (he_dev->tbrq_base)
1681 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1682 he_dev->tbrq_base, he_dev->tbrq_phys);
1684 if (he_dev->tpdrq_base)
1685 pci_free_consistent(he_dev->pci_dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
1686 he_dev->tpdrq_base, he_dev->tpdrq_phys);
1689 if (he_dev->tpd_pool)
1690 pci_pool_destroy(he_dev->tpd_pool);
1692 if (he_dev->tpd_base)
1693 pci_free_consistent(he_dev->pci_dev, CONFIG_NUMTPDS * sizeof(struct he_tpd),
1694 he_dev->tpd_base, he_dev->tpd_base_phys);
1697 if (he_dev->pci_dev) {
1698 pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
1699 command &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1700 pci_write_config_word(he_dev->pci_dev, PCI_COMMAND, command);
1703 if (he_dev->membase)
1704 iounmap((void *) he_dev->membase);
1707 static struct he_tpd *
1708 __alloc_tpd(struct he_dev *he_dev)
1712 dma_addr_t dma_handle;
1714 tpd = pci_pool_alloc(he_dev->tpd_pool, SLAB_ATOMIC|SLAB_DMA, &dma_handle);
1718 tpd->status = TPD_ADDR(dma_handle);
1720 tpd->iovec[0].addr = 0; tpd->iovec[0].len = 0;
1721 tpd->iovec[1].addr = 0; tpd->iovec[1].len = 0;
1722 tpd->iovec[2].addr = 0; tpd->iovec[2].len = 0;
1728 for (i = 0; i < CONFIG_NUMTPDS; ++i) {
1730 if (he_dev->tpd_head > he_dev->tpd_end) {
1731 he_dev->tpd_head = he_dev->tpd_base;
1734 if (!he_dev->tpd_head->inuse) {
1735 he_dev->tpd_head->inuse = 1;
1736 he_dev->tpd_head->status &= TPD_MASK;
1737 he_dev->tpd_head->iovec[0].addr = 0; he_dev->tpd_head->iovec[0].len = 0;
1738 he_dev->tpd_head->iovec[1].addr = 0; he_dev->tpd_head->iovec[1].len = 0;
1739 he_dev->tpd_head->iovec[2].addr = 0; he_dev->tpd_head->iovec[2].len = 0;
1740 return he_dev->tpd_head;
1743 hprintk("out of tpds -- increase CONFIG_NUMTPDS (%d)\n", CONFIG_NUMTPDS);
1748 #define AAL5_LEN(buf,len) \
1749 ((((unsigned char *)(buf))[(len)-6] << 8) | \
1750 (((unsigned char *)(buf))[(len)-5]))
1754 * aal5 packets can optionally return the tcp checksum in the lower
1755 * 16 bits of the crc (RSR0_TCP_CKSUM)
1758 #define TCP_CKSUM(buf,len) \
1759 ((((unsigned char *)(buf))[(len)-2] << 8) | \
1760 (((unsigned char *)(buf))[(len-1)]))
1763 he_service_rbrq(struct he_dev *he_dev, int group)
1765 struct he_rbrq *rbrq_tail = (struct he_rbrq *)
1766 ((unsigned long)he_dev->rbrq_base |
1767 he_dev->hsp->group[group].rbrq_tail);
1768 struct he_rbp *rbp = NULL;
1769 unsigned cid, lastcid = -1;
1770 unsigned buf_len = 0;
1771 struct sk_buff *skb;
1772 struct atm_vcc *vcc = NULL;
1773 struct he_vcc *he_vcc;
1774 struct he_iovec *iov;
1775 int pdus_assembled = 0;
1778 read_lock(&vcc_sklist_lock);
1779 while (he_dev->rbrq_head != rbrq_tail) {
1782 HPRINTK("%p rbrq%d 0x%x len=%d cid=0x%x %s%s%s%s%s%s\n",
1783 he_dev->rbrq_head, group,
1784 RBRQ_ADDR(he_dev->rbrq_head),
1785 RBRQ_BUFLEN(he_dev->rbrq_head),
1786 RBRQ_CID(he_dev->rbrq_head),
1787 RBRQ_CRC_ERR(he_dev->rbrq_head) ? " CRC_ERR" : "",
1788 RBRQ_LEN_ERR(he_dev->rbrq_head) ? " LEN_ERR" : "",
1789 RBRQ_END_PDU(he_dev->rbrq_head) ? " END_PDU" : "",
1790 RBRQ_AAL5_PROT(he_dev->rbrq_head) ? " AAL5_PROT" : "",
1791 RBRQ_CON_CLOSED(he_dev->rbrq_head) ? " CON_CLOSED" : "",
1792 RBRQ_HBUF_ERR(he_dev->rbrq_head) ? " HBUF_ERR" : "");
1795 if (RBRQ_ADDR(he_dev->rbrq_head) & RBP_SMALLBUF)
1796 rbp = &he_dev->rbps_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1799 rbp = &he_dev->rbpl_base[RBP_INDEX(RBRQ_ADDR(he_dev->rbrq_head))];
1801 buf_len = RBRQ_BUFLEN(he_dev->rbrq_head) * 4;
1802 cid = RBRQ_CID(he_dev->rbrq_head);
1805 vcc = __find_vcc(he_dev, cid);
1809 hprintk("vcc == NULL (cid 0x%x)\n", cid);
1810 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1811 rbp->status &= ~RBP_LOANED;
1813 goto next_rbrq_entry;
1816 he_vcc = HE_VCC(vcc);
1817 if (he_vcc == NULL) {
1818 hprintk("he_vcc == NULL (cid 0x%x)\n", cid);
1819 if (!RBRQ_HBUF_ERR(he_dev->rbrq_head))
1820 rbp->status &= ~RBP_LOANED;
1821 goto next_rbrq_entry;
1824 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
1825 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
1826 atomic_inc(&vcc->stats->rx_drop);
1827 goto return_host_buffers;
1830 he_vcc->iov_tail->iov_base = RBRQ_ADDR(he_dev->rbrq_head);
1831 he_vcc->iov_tail->iov_len = buf_len;
1832 he_vcc->pdu_len += buf_len;
1835 if (RBRQ_CON_CLOSED(he_dev->rbrq_head)) {
1837 HPRINTK("wake_up rx_waitq (cid 0x%x)\n", cid);
1838 wake_up(&he_vcc->rx_waitq);
1839 goto return_host_buffers;
1843 if ((he_vcc->iov_tail - he_vcc->iov_head) > HE_MAXIOV) {
1844 hprintk("iovec full! cid 0x%x\n", cid);
1845 goto return_host_buffers;
1848 if (!RBRQ_END_PDU(he_dev->rbrq_head))
1849 goto next_rbrq_entry;
1851 if (RBRQ_LEN_ERR(he_dev->rbrq_head)
1852 || RBRQ_CRC_ERR(he_dev->rbrq_head)) {
1853 HPRINTK("%s%s (%d.%d)\n",
1854 RBRQ_CRC_ERR(he_dev->rbrq_head)
1856 RBRQ_LEN_ERR(he_dev->rbrq_head)
1858 vcc->vpi, vcc->vci);
1859 atomic_inc(&vcc->stats->rx_err);
1860 goto return_host_buffers;
1863 skb = atm_alloc_charge(vcc, he_vcc->pdu_len + rx_skb_reserve,
1866 HPRINTK("charge failed (%d.%d)\n", vcc->vpi, vcc->vci);
1867 goto return_host_buffers;
1870 if (rx_skb_reserve > 0)
1871 skb_reserve(skb, rx_skb_reserve);
1873 do_gettimeofday(&skb->stamp);
1875 for (iov = he_vcc->iov_head;
1876 iov < he_vcc->iov_tail; ++iov) {
1878 if (iov->iov_base & RBP_SMALLBUF)
1879 memcpy(skb_put(skb, iov->iov_len),
1880 he_dev->rbps_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1883 memcpy(skb_put(skb, iov->iov_len),
1884 he_dev->rbpl_virt[RBP_INDEX(iov->iov_base)].virt, iov->iov_len);
1887 switch (vcc->qos.aal) {
1889 /* 2.10.1.5 raw cell receive */
1890 skb->len = ATM_AAL0_SDU;
1891 skb->tail = skb->data + skb->len;
1894 /* 2.10.1.2 aal5 receive */
1896 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len);
1897 skb->tail = skb->data + skb->len;
1898 #ifdef USE_CHECKSUM_HW
1899 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) {
1900 skb->ip_summed = CHECKSUM_HW;
1901 skb->csum = TCP_CKSUM(skb->data,
1908 #ifdef should_never_happen
1909 if (skb->len > vcc->qos.rxtp.max_sdu)
1910 hprintk("pdu_len (%d) > vcc->qos.rxtp.max_sdu (%d)! cid 0x%x\n", skb->len, vcc->qos.rxtp.max_sdu, cid);
1914 ATM_SKB(skb)->vcc = vcc;
1916 vcc->push(vcc, skb);
1918 atomic_inc(&vcc->stats->rx);
1920 return_host_buffers:
1923 for (iov = he_vcc->iov_head;
1924 iov < he_vcc->iov_tail; ++iov) {
1926 if (iov->iov_base & RBP_SMALLBUF)
1927 rbp = &he_dev->rbps_base[RBP_INDEX(iov->iov_base)];
1930 rbp = &he_dev->rbpl_base[RBP_INDEX(iov->iov_base)];
1932 rbp->status &= ~RBP_LOANED;
1935 he_vcc->iov_tail = he_vcc->iov_head;
1936 he_vcc->pdu_len = 0;
1939 he_dev->rbrq_head = (struct he_rbrq *)
1940 ((unsigned long) he_dev->rbrq_base |
1941 RBRQ_MASK(++he_dev->rbrq_head));
1944 read_unlock(&vcc_sklist_lock);
1947 if (updated > he_dev->rbrq_peak)
1948 he_dev->rbrq_peak = updated;
1950 he_writel(he_dev, RBRQ_MASK(he_dev->rbrq_head),
1951 G0_RBRQ_H + (group * 16));
1954 return pdus_assembled;
1958 he_service_tbrq(struct he_dev *he_dev, int group)
1960 struct he_tbrq *tbrq_tail = (struct he_tbrq *)
1961 ((unsigned long)he_dev->tbrq_base |
1962 he_dev->hsp->group[group].tbrq_tail);
1964 int slot, updated = 0;
1966 struct he_tpd *__tpd;
1969 /* 2.1.6 transmit buffer return queue */
1971 while (he_dev->tbrq_head != tbrq_tail) {
1974 HPRINTK("tbrq%d 0x%x%s%s\n",
1976 TBRQ_TPD(he_dev->tbrq_head),
1977 TBRQ_EOS(he_dev->tbrq_head) ? " EOS" : "",
1978 TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
1981 list_for_each_entry(__tpd, &he_dev->outstanding_tpds, entry) {
1982 if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
1984 list_del(&__tpd->entry);
1990 hprintk("unable to locate tpd for dma buffer %x\n",
1991 TBRQ_TPD(he_dev->tbrq_head));
1992 goto next_tbrq_entry;
1995 tpd = &he_dev->tpd_base[ TPD_INDEX(TBRQ_TPD(he_dev->tbrq_head)) ];
1998 if (TBRQ_EOS(he_dev->tbrq_head)) {
1999 HPRINTK("wake_up(tx_waitq) cid 0x%x\n",
2000 he_mkcid(he_dev, tpd->vcc->vpi, tpd->vcc->vci));
2002 wake_up(&HE_VCC(tpd->vcc)->tx_waitq);
2004 goto next_tbrq_entry;
2007 for (slot = 0; slot < TPD_MAXIOV; ++slot) {
2008 if (tpd->iovec[slot].addr)
2009 pci_unmap_single(he_dev->pci_dev,
2010 tpd->iovec[slot].addr,
2011 tpd->iovec[slot].len & TPD_LEN_MASK,
2013 if (tpd->iovec[slot].len & TPD_LST)
2018 if (tpd->skb) { /* && !TBRQ_MULTIPLE(he_dev->tbrq_head) */
2019 if (tpd->vcc && tpd->vcc->pop)
2020 tpd->vcc->pop(tpd->vcc, tpd->skb);
2022 dev_kfree_skb_any(tpd->skb);
2028 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2032 he_dev->tbrq_head = (struct he_tbrq *)
2033 ((unsigned long) he_dev->tbrq_base |
2034 TBRQ_MASK(++he_dev->tbrq_head));
2038 if (updated > he_dev->tbrq_peak)
2039 he_dev->tbrq_peak = updated;
2041 he_writel(he_dev, TBRQ_MASK(he_dev->tbrq_head),
2042 G0_TBRQ_H + (group * 16));
2048 he_service_rbpl(struct he_dev *he_dev, int group)
2050 struct he_rbp *newtail;
2051 struct he_rbp *rbpl_head;
2054 rbpl_head = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2055 RBPL_MASK(he_readl(he_dev, G0_RBPL_S)));
2058 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbpl_base |
2059 RBPL_MASK(he_dev->rbpl_tail+1));
2061 /* table 3.42 -- rbpl_tail should never be set to rbpl_head */
2062 if ((newtail == rbpl_head) || (newtail->status & RBP_LOANED))
2065 newtail->status |= RBP_LOANED;
2066 he_dev->rbpl_tail = newtail;
2071 he_writel(he_dev, RBPL_MASK(he_dev->rbpl_tail), G0_RBPL_T);
2076 he_service_rbps(struct he_dev *he_dev, int group)
2078 struct he_rbp *newtail;
2079 struct he_rbp *rbps_head;
2082 rbps_head = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2083 RBPS_MASK(he_readl(he_dev, G0_RBPS_S)));
2086 newtail = (struct he_rbp *) ((unsigned long)he_dev->rbps_base |
2087 RBPS_MASK(he_dev->rbps_tail+1));
2089 /* table 3.42 -- rbps_tail should never be set to rbps_head */
2090 if ((newtail == rbps_head) || (newtail->status & RBP_LOANED))
2093 newtail->status |= RBP_LOANED;
2094 he_dev->rbps_tail = newtail;
2099 he_writel(he_dev, RBPS_MASK(he_dev->rbps_tail), G0_RBPS_T);
2101 #endif /* USE_RBPS */
2104 he_tasklet(unsigned long data)
2106 unsigned long flags;
2107 struct he_dev *he_dev = (struct he_dev *) data;
2111 HPRINTK("tasklet (0x%lx)\n", data);
2113 spin_lock_irqsave(&he_dev->global_lock, flags);
2116 while (he_dev->irq_head != he_dev->irq_tail) {
2119 type = ITYPE_TYPE(he_dev->irq_head->isw);
2120 group = ITYPE_GROUP(he_dev->irq_head->isw);
2123 case ITYPE_RBRQ_THRESH:
2124 HPRINTK("rbrq%d threshold\n", group);
2126 case ITYPE_RBRQ_TIMER:
2127 if (he_service_rbrq(he_dev, group)) {
2128 he_service_rbpl(he_dev, group);
2130 he_service_rbps(he_dev, group);
2131 #endif /* USE_RBPS */
2134 case ITYPE_TBRQ_THRESH:
2135 HPRINTK("tbrq%d threshold\n", group);
2137 case ITYPE_TPD_COMPLETE:
2138 he_service_tbrq(he_dev, group);
2140 case ITYPE_RBPL_THRESH:
2141 he_service_rbpl(he_dev, group);
2143 case ITYPE_RBPS_THRESH:
2145 he_service_rbps(he_dev, group);
2146 #endif /* USE_RBPS */
2149 HPRINTK("phy interrupt\n");
2150 #ifdef CONFIG_ATM_HE_USE_SUNI
2151 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2152 if (he_dev->atm_dev->phy && he_dev->atm_dev->phy->interrupt)
2153 he_dev->atm_dev->phy->interrupt(he_dev->atm_dev);
2154 spin_lock_irqsave(&he_dev->global_lock, flags);
2158 switch (type|group) {
2160 hprintk("parity error\n");
2163 hprintk("abort 0x%x\n", he_readl(he_dev, ABORT_ADDR));
2167 case ITYPE_TYPE(ITYPE_INVALID):
2168 /* see 8.1.1 -- check all queues */
2170 HPRINTK("isw not updated 0x%x\n", he_dev->irq_head->isw);
2172 he_service_rbrq(he_dev, 0);
2173 he_service_rbpl(he_dev, 0);
2175 he_service_rbps(he_dev, 0);
2176 #endif /* USE_RBPS */
2177 he_service_tbrq(he_dev, 0);
2180 hprintk("bad isw 0x%x?\n", he_dev->irq_head->isw);
2183 he_dev->irq_head->isw = ITYPE_INVALID;
2185 he_dev->irq_head = (struct he_irq *) NEXT_ENTRY(he_dev->irq_base, he_dev->irq_head, IRQ_MASK);
2189 if (updated > he_dev->irq_peak)
2190 he_dev->irq_peak = updated;
2193 IRQ_SIZE(CONFIG_IRQ_SIZE) |
2194 IRQ_THRESH(CONFIG_IRQ_THRESH) |
2195 IRQ_TAIL(he_dev->irq_tail), IRQ0_HEAD);
2196 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata; flush posted writes */
2199 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2204 he_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
2206 unsigned long flags;
2207 struct he_dev *he_dev = (struct he_dev * )dev_id;
2213 spin_lock_irqsave(&he_dev->global_lock, flags);
2215 he_dev->irq_tail = (struct he_irq *) (((unsigned long)he_dev->irq_base) |
2216 (*he_dev->irq_tailoffset << 2));
2218 if (he_dev->irq_tail == he_dev->irq_head) {
2219 HPRINTK("tailoffset not updated?\n");
2220 he_dev->irq_tail = (struct he_irq *) ((unsigned long)he_dev->irq_base |
2221 ((he_readl(he_dev, IRQ0_BASE) & IRQ_MASK) << 2));
2222 (void) he_readl(he_dev, INT_FIFO); /* 8.1.2 controller errata */
2226 if (he_dev->irq_head == he_dev->irq_tail /* && !IRQ_PENDING */)
2227 hprintk("spurious (or shared) interrupt?\n");
2230 if (he_dev->irq_head != he_dev->irq_tail) {
2233 tasklet_schedule(&he_dev->tasklet);
2235 he_tasklet((unsigned long) he_dev);
2237 he_writel(he_dev, INT_CLEAR_A, INT_FIFO); /* clear interrupt */
2238 (void) he_readl(he_dev, INT_FIFO); /* flush posted writes */
2240 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2241 return IRQ_RETVAL(handled);
2245 static __inline__ void
2246 __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
2248 struct he_tpdrq *new_tail;
2250 HPRINTK("tpdrq %p cid 0x%x -> tpdrq_tail %p\n",
2251 tpd, cid, he_dev->tpdrq_tail);
2253 /* new_tail = he_dev->tpdrq_tail; */
2254 new_tail = (struct he_tpdrq *) ((unsigned long) he_dev->tpdrq_base |
2255 TPDRQ_MASK(he_dev->tpdrq_tail+1));
2258 * check to see if we are about to set the tail == head
2259 * if true, update the head pointer from the adapter
2260 * to see if this is really the case (reading the queue
2261 * head for every enqueue would be unnecessarily slow)
2264 if (new_tail == he_dev->tpdrq_head) {
2265 he_dev->tpdrq_head = (struct he_tpdrq *)
2266 (((unsigned long)he_dev->tpdrq_base) |
2267 TPDRQ_MASK(he_readl(he_dev, TPDRQ_B_H)));
2269 if (new_tail == he_dev->tpdrq_head) {
2270 hprintk("tpdrq full (cid 0x%x)\n", cid);
2273 * push tpd onto a transmit backlog queue
2274 * after service_tbrq, service the backlog
2275 * for now, we just drop the pdu
2279 tpd->vcc->pop(tpd->vcc, tpd->skb);
2281 dev_kfree_skb_any(tpd->skb);
2282 atomic_inc(&tpd->vcc->stats->tx_err);
2285 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
2293 /* 2.1.5 transmit packet descriptor ready queue */
2295 list_add_tail(&tpd->entry, &he_dev->outstanding_tpds);
2296 he_dev->tpdrq_tail->tpd = TPD_ADDR(tpd->status);
2298 he_dev->tpdrq_tail->tpd = he_dev->tpd_base_phys +
2299 (TPD_INDEX(tpd->status) * sizeof(struct he_tpd));
2301 he_dev->tpdrq_tail->cid = cid;
2304 he_dev->tpdrq_tail = new_tail;
2306 he_writel(he_dev, TPDRQ_MASK(he_dev->tpdrq_tail), TPDRQ_T);
2307 (void) he_readl(he_dev, TPDRQ_T); /* flush posted writes */
2311 he_open(struct atm_vcc *vcc)
2313 unsigned long flags;
2314 struct he_dev *he_dev = HE_DEV(vcc->dev);
2315 struct he_vcc *he_vcc;
2317 unsigned cid, rsr0, rsr1, rsr4, tsr0, tsr0_aal, tsr4, period, reg, clock;
2318 short vpi = vcc->vpi;
2321 if (vci == ATM_VCI_UNSPEC || vpi == ATM_VPI_UNSPEC)
2324 HPRINTK("open vcc %p %d.%d\n", vcc, vpi, vci);
2326 set_bit(ATM_VF_ADDR, &vcc->flags);
2328 cid = he_mkcid(he_dev, vpi, vci);
2330 he_vcc = (struct he_vcc *) kmalloc(sizeof(struct he_vcc), GFP_ATOMIC);
2331 if (he_vcc == NULL) {
2332 hprintk("unable to allocate he_vcc during open\n");
2336 he_vcc->iov_tail = he_vcc->iov_head;
2337 he_vcc->pdu_len = 0;
2338 he_vcc->rc_index = -1;
2340 init_waitqueue_head(&he_vcc->rx_waitq);
2341 init_waitqueue_head(&he_vcc->tx_waitq);
2343 vcc->dev_data = he_vcc;
2345 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2348 pcr_goal = atm_pcr_goal(&vcc->qos.txtp);
2350 pcr_goal = he_dev->atm_dev->link_rate;
2351 if (pcr_goal < 0) /* means round down, technically */
2352 pcr_goal = -pcr_goal;
2354 HPRINTK("open tx cid 0x%x pcr_goal %d\n", cid, pcr_goal);
2356 switch (vcc->qos.aal) {
2358 tsr0_aal = TSR0_AAL5;
2362 tsr0_aal = TSR0_AAL0_SDU;
2363 tsr4 = TSR4_AAL0_SDU;
2370 spin_lock_irqsave(&he_dev->global_lock, flags);
2371 tsr0 = he_readl_tsr0(he_dev, cid);
2372 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2374 if (TSR0_CONN_STATE(tsr0) != 0) {
2375 hprintk("cid 0x%x not idle (tsr0 = 0x%x)\n", cid, tsr0);
2380 switch (vcc->qos.txtp.traffic_class) {
2382 /* 2.3.3.1 open connection ubr */
2384 tsr0 = TSR0_UBR | TSR0_GROUP(0) | tsr0_aal |
2385 TSR0_USE_WMIN | TSR0_UPDATE_GER;
2389 /* 2.3.3.2 open connection cbr */
2391 /* 8.2.3 cbr scheduler wrap problem -- limit to 90% total link rate */
2392 if ((he_dev->total_bw + pcr_goal)
2393 > (he_dev->atm_dev->link_rate * 9 / 10))
2399 spin_lock_irqsave(&he_dev->global_lock, flags); /* also protects he_dev->cs_stper[] */
2401 /* find an unused cs_stper register */
2402 for (reg = 0; reg < HE_NUM_CS_STPER; ++reg)
2403 if (he_dev->cs_stper[reg].inuse == 0 ||
2404 he_dev->cs_stper[reg].pcr == pcr_goal)
2407 if (reg == HE_NUM_CS_STPER) {
2409 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2413 he_dev->total_bw += pcr_goal;
2415 he_vcc->rc_index = reg;
2416 ++he_dev->cs_stper[reg].inuse;
2417 he_dev->cs_stper[reg].pcr = pcr_goal;
2419 clock = he_is622(he_dev) ? 66667000 : 50000000;
2420 period = clock / pcr_goal;
2422 HPRINTK("rc_index = %d period = %d\n",
2425 he_writel_mbox(he_dev, rate_to_atmf(period/2),
2427 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2429 tsr0 = TSR0_CBR | TSR0_GROUP(0) | tsr0_aal |
2438 spin_lock_irqsave(&he_dev->global_lock, flags);
2440 he_writel_tsr0(he_dev, tsr0, cid);
2441 he_writel_tsr4(he_dev, tsr4 | 1, cid);
2442 he_writel_tsr1(he_dev, TSR1_MCR(rate_to_atmf(0)) |
2443 TSR1_PCR(rate_to_atmf(pcr_goal)), cid);
2444 he_writel_tsr2(he_dev, TSR2_ACR(rate_to_atmf(pcr_goal)), cid);
2445 he_writel_tsr9(he_dev, TSR9_OPEN_CONN, cid);
2447 he_writel_tsr3(he_dev, 0x0, cid);
2448 he_writel_tsr5(he_dev, 0x0, cid);
2449 he_writel_tsr6(he_dev, 0x0, cid);
2450 he_writel_tsr7(he_dev, 0x0, cid);
2451 he_writel_tsr8(he_dev, 0x0, cid);
2452 he_writel_tsr10(he_dev, 0x0, cid);
2453 he_writel_tsr11(he_dev, 0x0, cid);
2454 he_writel_tsr12(he_dev, 0x0, cid);
2455 he_writel_tsr13(he_dev, 0x0, cid);
2456 he_writel_tsr14(he_dev, 0x0, cid);
2457 (void) he_readl_tsr0(he_dev, cid); /* flush posted writes */
2458 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2461 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2464 HPRINTK("open rx cid 0x%x (rx_waitq %p)\n", cid,
2465 &HE_VCC(vcc)->rx_waitq);
2467 switch (vcc->qos.aal) {
2479 spin_lock_irqsave(&he_dev->global_lock, flags);
2481 rsr0 = he_readl_rsr0(he_dev, cid);
2482 if (rsr0 & RSR0_OPEN_CONN) {
2483 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2485 hprintk("cid 0x%x not idle (rsr0 = 0x%x)\n", cid, rsr0);
2491 rsr1 = RSR1_GROUP(0);
2492 rsr4 = RSR4_GROUP(0);
2493 #else /* !USE_RBPS */
2494 rsr1 = RSR1_GROUP(0)|RSR1_RBPL_ONLY;
2495 rsr4 = RSR4_GROUP(0)|RSR4_RBPL_ONLY;
2496 #endif /* USE_RBPS */
2497 rsr0 = vcc->qos.rxtp.traffic_class == ATM_UBR ?
2498 (RSR0_EPD_ENABLE|RSR0_PPD_ENABLE) : 0;
2500 #ifdef USE_CHECKSUM_HW
2501 if (vpi == 0 && vci >= ATM_NOT_RSV_VCI)
2502 rsr0 |= RSR0_TCP_CKSUM;
2505 he_writel_rsr4(he_dev, rsr4, cid);
2506 he_writel_rsr1(he_dev, rsr1, cid);
2507 /* 5.1.11 last parameter initialized should be
2508 the open/closed indication in rsr0 */
2509 he_writel_rsr0(he_dev,
2510 rsr0 | RSR0_START_PDU | RSR0_OPEN_CONN | aal, cid);
2511 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2513 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2521 clear_bit(ATM_VF_ADDR, &vcc->flags);
2524 set_bit(ATM_VF_READY, &vcc->flags);
2530 he_close(struct atm_vcc *vcc)
2532 unsigned long flags;
2533 DECLARE_WAITQUEUE(wait, current);
2534 struct he_dev *he_dev = HE_DEV(vcc->dev);
2537 struct he_vcc *he_vcc = HE_VCC(vcc);
2538 #define MAX_RETRY 30
2539 int retry = 0, sleep = 1, tx_inuse;
2541 HPRINTK("close vcc %p %d.%d\n", vcc, vcc->vpi, vcc->vci);
2543 clear_bit(ATM_VF_READY, &vcc->flags);
2544 cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2546 if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2549 HPRINTK("close rx cid 0x%x\n", cid);
2551 /* 2.7.2.2 close receive operation */
2553 /* wait for previous close (if any) to finish */
2555 spin_lock_irqsave(&he_dev->global_lock, flags);
2556 while (he_readl(he_dev, RCC_STAT) & RCC_BUSY) {
2557 HPRINTK("close cid 0x%x RCC_BUSY\n", cid);
2561 add_wait_queue(&he_vcc->rx_waitq, &wait);
2562 set_current_state(TASK_UNINTERRUPTIBLE);
2564 he_writel_rsr0(he_dev, RSR0_CLOSE_CONN, cid);
2565 (void) he_readl_rsr0(he_dev, cid); /* flush posted writes */
2566 he_writel_mbox(he_dev, cid, RXCON_CLOSE);
2567 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2569 timeout = schedule_timeout(30*HZ);
2571 remove_wait_queue(&he_vcc->rx_waitq, &wait);
2572 set_current_state(TASK_RUNNING);
2575 hprintk("close rx timeout cid 0x%x\n", cid);
2577 HPRINTK("close rx cid 0x%x complete\n", cid);
2581 if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2582 volatile unsigned tsr4, tsr0;
2585 HPRINTK("close tx cid 0x%x\n", cid);
2589 * ... the host must first stop queueing packets to the TPDRQ
2590 * on the connection to be closed, then wait for all outstanding
2591 * packets to be transmitted and their buffers returned to the
2592 * TBRQ. When the last packet on the connection arrives in the
2593 * TBRQ, the host issues the close command to the adapter.
2596 while (((tx_inuse = atomic_read(&vcc->sk->sk_wmem_alloc)) > 0) &&
2597 (retry < MAX_RETRY)) {
2606 hprintk("close tx cid 0x%x tx_inuse = %d\n", cid, tx_inuse);
2608 /* 2.3.1.1 generic close operations with flush */
2610 spin_lock_irqsave(&he_dev->global_lock, flags);
2611 he_writel_tsr4_upper(he_dev, TSR4_FLUSH_CONN, cid);
2612 /* also clears TSR4_SESSION_ENDED */
2614 switch (vcc->qos.txtp.traffic_class) {
2616 he_writel_tsr1(he_dev,
2617 TSR1_MCR(rate_to_atmf(200000))
2618 | TSR1_PCR(0), cid);
2621 he_writel_tsr14_upper(he_dev, TSR14_DELETE, cid);
2624 (void) he_readl_tsr4(he_dev, cid); /* flush posted writes */
2626 tpd = __alloc_tpd(he_dev);
2628 hprintk("close tx he_alloc_tpd failed cid 0x%x\n", cid);
2629 goto close_tx_incomplete;
2631 tpd->status |= TPD_EOS | TPD_INT;
2636 add_wait_queue(&he_vcc->tx_waitq, &wait);
2637 set_current_state(TASK_UNINTERRUPTIBLE);
2638 __enqueue_tpd(he_dev, tpd, cid);
2639 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2641 timeout = schedule_timeout(30*HZ);
2643 remove_wait_queue(&he_vcc->tx_waitq, &wait);
2644 set_current_state(TASK_RUNNING);
2646 spin_lock_irqsave(&he_dev->global_lock, flags);
2649 hprintk("close tx timeout cid 0x%x\n", cid);
2650 goto close_tx_incomplete;
2653 while (!((tsr4 = he_readl_tsr4(he_dev, cid)) & TSR4_SESSION_ENDED)) {
2654 HPRINTK("close tx cid 0x%x !TSR4_SESSION_ENDED (tsr4 = 0x%x)\n", cid, tsr4);
2658 while (TSR0_CONN_STATE(tsr0 = he_readl_tsr0(he_dev, cid)) != 0) {
2659 HPRINTK("close tx cid 0x%x TSR0_CONN_STATE != 0 (tsr0 = 0x%x)\n", cid, tsr0);
2663 close_tx_incomplete:
2665 if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2666 int reg = he_vcc->rc_index;
2668 HPRINTK("cs_stper reg = %d\n", reg);
2670 if (he_dev->cs_stper[reg].inuse == 0)
2671 hprintk("cs_stper[%d].inuse = 0!\n", reg);
2673 --he_dev->cs_stper[reg].inuse;
2675 he_dev->total_bw -= he_dev->cs_stper[reg].pcr;
2677 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2679 HPRINTK("close tx cid 0x%x complete\n", cid);
2684 clear_bit(ATM_VF_ADDR, &vcc->flags);
2688 he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2690 unsigned long flags;
2691 struct he_dev *he_dev = HE_DEV(vcc->dev);
2692 unsigned cid = he_mkcid(he_dev, vcc->vpi, vcc->vci);
2694 #ifdef USE_SCATTERGATHER
2698 #define HE_TPD_BUFSIZE 0xffff
2700 HPRINTK("send %d.%d\n", vcc->vpi, vcc->vci);
2702 if ((skb->len > HE_TPD_BUFSIZE) ||
2703 ((vcc->qos.aal == ATM_AAL0) && (skb->len != ATM_AAL0_SDU))) {
2704 hprintk("buffer too large (or small) -- %d bytes\n", skb->len );
2708 dev_kfree_skb_any(skb);
2709 atomic_inc(&vcc->stats->tx_err);
2713 #ifndef USE_SCATTERGATHER
2714 if (skb_shinfo(skb)->nr_frags) {
2715 hprintk("no scatter/gather support\n");
2719 dev_kfree_skb_any(skb);
2720 atomic_inc(&vcc->stats->tx_err);
2724 spin_lock_irqsave(&he_dev->global_lock, flags);
2726 tpd = __alloc_tpd(he_dev);
2731 dev_kfree_skb_any(skb);
2732 atomic_inc(&vcc->stats->tx_err);
2733 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2737 if (vcc->qos.aal == ATM_AAL5)
2738 tpd->status |= TPD_CELLTYPE(TPD_USERCELL);
2740 char *pti_clp = (void *) (skb->data + 3);
2743 pti = (*pti_clp & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
2744 clp = (*pti_clp & ATM_HDR_CLP);
2745 tpd->status |= TPD_CELLTYPE(pti);
2747 tpd->status |= TPD_CLP;
2749 skb_pull(skb, ATM_AAL0_SDU - ATM_CELL_PAYLOAD);
2752 #ifdef USE_SCATTERGATHER
2753 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2754 skb->len - skb->data_len, PCI_DMA_TODEVICE);
2755 tpd->iovec[slot].len = skb->len - skb->data_len;
2758 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2759 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2761 if (slot == TPD_MAXIOV) { /* queue tpd; start new tpd */
2763 tpd->skb = NULL; /* not the last fragment
2764 so dont ->push() yet */
2767 __enqueue_tpd(he_dev, tpd, cid);
2768 tpd = __alloc_tpd(he_dev);
2773 dev_kfree_skb_any(skb);
2774 atomic_inc(&vcc->stats->tx_err);
2775 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2778 tpd->status |= TPD_USERCELL;
2782 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev,
2783 (void *) page_address(frag->page) + frag->page_offset,
2784 frag->size, PCI_DMA_TODEVICE);
2785 tpd->iovec[slot].len = frag->size;
2790 tpd->iovec[slot - 1].len |= TPD_LST;
2792 tpd->address0 = pci_map_single(he_dev->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2793 tpd->length0 = skb->len | TPD_LST;
2795 tpd->status |= TPD_INT;
2800 ATM_SKB(skb)->vcc = vcc;
2802 __enqueue_tpd(he_dev, tpd, cid);
2803 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2805 atomic_inc(&vcc->stats->tx);
2811 he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2813 unsigned long flags;
2814 struct he_dev *he_dev = HE_DEV(atm_dev);
2815 struct he_ioctl_reg reg;
2820 if (!capable(CAP_NET_ADMIN))
2823 if (copy_from_user(®, arg,
2824 sizeof(struct he_ioctl_reg)))
2827 spin_lock_irqsave(&he_dev->global_lock, flags);
2829 case HE_REGTYPE_PCI:
2830 reg.val = he_readl(he_dev, reg.addr);
2832 case HE_REGTYPE_RCM:
2834 he_readl_rcm(he_dev, reg.addr);
2836 case HE_REGTYPE_TCM:
2838 he_readl_tcm(he_dev, reg.addr);
2840 case HE_REGTYPE_MBOX:
2842 he_readl_mbox(he_dev, reg.addr);
2848 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2850 if (copy_to_user(arg, ®,
2851 sizeof(struct he_ioctl_reg)))
2855 #ifdef CONFIG_ATM_HE_USE_SUNI
2856 if (atm_dev->phy && atm_dev->phy->ioctl)
2857 err = atm_dev->phy->ioctl(atm_dev, cmd, arg);
2858 #else /* CONFIG_ATM_HE_USE_SUNI */
2860 #endif /* CONFIG_ATM_HE_USE_SUNI */
2868 he_phy_put(struct atm_dev *atm_dev, unsigned char val, unsigned long addr)
2870 unsigned long flags;
2871 struct he_dev *he_dev = HE_DEV(atm_dev);
2873 HPRINTK("phy_put(val 0x%x, addr 0x%lx)\n", val, addr);
2875 spin_lock_irqsave(&he_dev->global_lock, flags);
2876 he_writel(he_dev, val, FRAMER + (addr*4));
2877 (void) he_readl(he_dev, FRAMER + (addr*4)); /* flush posted writes */
2878 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2882 static unsigned char
2883 he_phy_get(struct atm_dev *atm_dev, unsigned long addr)
2885 unsigned long flags;
2886 struct he_dev *he_dev = HE_DEV(atm_dev);
2889 spin_lock_irqsave(&he_dev->global_lock, flags);
2890 reg = he_readl(he_dev, FRAMER + (addr*4));
2891 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2893 HPRINTK("phy_get(addr 0x%lx) =0x%x\n", addr, reg);
2898 he_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2900 unsigned long flags;
2901 struct he_dev *he_dev = HE_DEV(dev);
2904 struct he_rbrq *rbrq_tail;
2905 struct he_tpdrq *tpdrq_head;
2906 int rbpl_head, rbpl_tail;
2908 static long mcc = 0, oec = 0, dcc = 0, cec = 0;
2913 return sprintf(page, "%s\n", version);
2916 return sprintf(page, "%s%s\n\n",
2917 he_dev->prod_id, he_dev->media & 0x40 ? "SM" : "MM");
2920 return sprintf(page, "Mismatched Cells VPI/VCI Not Open Dropped Cells RCM Dropped Cells\n");
2922 spin_lock_irqsave(&he_dev->global_lock, flags);
2923 mcc += he_readl(he_dev, MCC);
2924 oec += he_readl(he_dev, OEC);
2925 dcc += he_readl(he_dev, DCC);
2926 cec += he_readl(he_dev, CEC);
2927 spin_unlock_irqrestore(&he_dev->global_lock, flags);
2930 return sprintf(page, "%16ld %16ld %13ld %17ld\n\n",
2931 mcc, oec, dcc, cec);
2934 return sprintf(page, "irq_size = %d inuse = ? peak = %d\n",
2935 CONFIG_IRQ_SIZE, he_dev->irq_peak);
2938 return sprintf(page, "tpdrq_size = %d inuse = ?\n",
2942 return sprintf(page, "rbrq_size = %d inuse = ? peak = %d\n",
2943 CONFIG_RBRQ_SIZE, he_dev->rbrq_peak);
2946 return sprintf(page, "tbrq_size = %d peak = %d\n",
2947 CONFIG_TBRQ_SIZE, he_dev->tbrq_peak);
2951 rbpl_head = RBPL_MASK(he_readl(he_dev, G0_RBPL_S));
2952 rbpl_tail = RBPL_MASK(he_readl(he_dev, G0_RBPL_T));
2954 inuse = rbpl_head - rbpl_tail;
2956 inuse += CONFIG_RBPL_SIZE * sizeof(struct he_rbp);
2957 inuse /= sizeof(struct he_rbp);
2960 return sprintf(page, "rbpl_size = %d inuse = %d\n\n",
2961 CONFIG_RBPL_SIZE, inuse);
2965 return sprintf(page, "rate controller periods (cbr)\n pcr #vc\n");
2967 for (i = 0; i < HE_NUM_CS_STPER; ++i)
2969 return sprintf(page, "cs_stper%-2d %8ld %3d\n", i,
2970 he_dev->cs_stper[i].pcr,
2971 he_dev->cs_stper[i].inuse);
2974 return sprintf(page, "total bw (cbr): %d (limit %d)\n",
2975 he_dev->total_bw, he_dev->atm_dev->link_rate * 10 / 9);
2980 /* eeprom routines -- see 4.7 */
2983 read_prom_byte(struct he_dev *he_dev, int addr)
2985 u32 val = 0, tmp_read = 0;
2989 val = readl(he_dev->membase + HOST_CNTL);
2992 /* Turn on write enable */
2994 he_writel(he_dev, val, HOST_CNTL);
2996 /* Send READ instruction */
2997 for (i = 0; i < sizeof(readtab)/sizeof(readtab[0]); i++) {
2998 he_writel(he_dev, val | readtab[i], HOST_CNTL);
2999 udelay(EEPROM_DELAY);
3002 /* Next, we need to send the byte address to read from */
3003 for (i = 7; i >= 0; i--) {
3004 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3005 udelay(EEPROM_DELAY);
3006 he_writel(he_dev, val | clocktab[j++] | (((addr >> i) & 1) << 9), HOST_CNTL);
3007 udelay(EEPROM_DELAY);
3012 val &= 0xFFFFF7FF; /* Turn off write enable */
3013 he_writel(he_dev, val, HOST_CNTL);
3015 /* Now, we can read data from the EEPROM by clocking it in */
3016 for (i = 7; i >= 0; i--) {
3017 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3018 udelay(EEPROM_DELAY);
3019 tmp_read = he_readl(he_dev, HOST_CNTL);
3020 byte_read |= (unsigned char)
3021 ((tmp_read & ID_DOUT) >> ID_DOFFSET << i);
3022 he_writel(he_dev, val | clocktab[j++], HOST_CNTL);
3023 udelay(EEPROM_DELAY);
3026 he_writel(he_dev, val | ID_CS, HOST_CNTL);
3027 udelay(EEPROM_DELAY);
3032 MODULE_LICENSE("GPL");
3033 MODULE_AUTHOR("chas williams <chas@cmf.nrl.navy.mil>");
3034 MODULE_DESCRIPTION("ForeRunnerHE ATM Adapter driver");
3035 MODULE_PARM(disable64, "h");
3036 MODULE_PARM_DESC(disable64, "disable 64-bit pci bus transfers");
3037 MODULE_PARM(nvpibits, "i");
3038 MODULE_PARM_DESC(nvpibits, "numbers of bits for vpi (default 0)");
3039 MODULE_PARM(nvcibits, "i");
3040 MODULE_PARM_DESC(nvcibits, "numbers of bits for vci (default 12)");
3041 MODULE_PARM(rx_skb_reserve, "i");
3042 MODULE_PARM_DESC(rx_skb_reserve, "padding for receive skb (default 16)");
3043 MODULE_PARM(irq_coalesce, "i");
3044 MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
3045 MODULE_PARM(sdh, "i");
3046 MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
3048 static struct pci_device_id he_pci_tbl[] = {
3049 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_HE, PCI_ANY_ID, PCI_ANY_ID,
3054 static struct pci_driver he_driver = {
3056 .probe = he_init_one,
3057 .remove = __devexit_p(he_remove_one),
3058 .id_table = he_pci_tbl,
3061 static int __init he_init(void)
3063 return pci_module_init(&he_driver);
3066 static void __exit he_cleanup(void)
3068 pci_unregister_driver(&he_driver);
3071 module_init(he_init);
3072 module_exit(he_cleanup);