2 $Id: fore200e.c,v 1.5 2000/04/14 10:10:34 davem Exp $
4 A FORE Systems 200E-series driver for ATM on Linux.
5 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003.
7 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de).
9 This driver simultaneously supports PCA-200E and SBA-200E adapters
10 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/config.h>
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/capability.h>
33 #include <linux/sched.h>
34 #include <linux/interrupt.h>
35 #include <linux/bitops.h>
36 #include <linux/pci.h>
37 #include <linux/module.h>
38 #include <linux/atmdev.h>
39 #include <linux/sonet.h>
40 #include <linux/atm_suni.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/delay.h>
44 #include <asm/string.h>
48 #include <asm/byteorder.h>
49 #include <asm/uaccess.h>
50 #include <asm/atomic.h>
52 #ifdef CONFIG_ATM_FORE200E_SBA
53 #include <asm/idprom.h>
55 #include <asm/openprom.h>
56 #include <asm/oplib.h>
57 #include <asm/pgtable.h>
60 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
61 #define FORE200E_USE_TASKLET
64 #if 0 /* enable the debugging code of the buffer supply queues */
65 #define FORE200E_BSQ_DEBUG
68 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
69 #define FORE200E_52BYTE_AAL0_SDU
75 #define FORE200E_VERSION "0.3e"
77 #define FORE200E "fore200e: "
79 #if 0 /* override .config */
80 #define CONFIG_ATM_FORE200E_DEBUG 1
82 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
83 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
84 printk(FORE200E format, ##args); } while (0)
86 #define DPRINTK(level, format, args...) do {} while (0)
90 #define FORE200E_ALIGN(addr, alignment) \
91 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
93 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
95 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
97 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ++(index) % (modulo))
100 #define MSECS(ms) (((ms)*HZ/1000)+1)
104 #define ASSERT(expr) if (!(expr)) { \
105 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
106 __FUNCTION__, __LINE__, #expr); \
107 panic(FORE200E "%s", __FUNCTION__); \
110 #define ASSERT(expr) do {} while (0)
114 static const struct atmdev_ops fore200e_ops;
115 static const struct fore200e_bus fore200e_bus[];
117 static struct fore200e* fore200e_boards = NULL;
120 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
121 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
122 MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
125 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
126 { BUFFER_S1_NBR, BUFFER_L1_NBR },
127 { BUFFER_S2_NBR, BUFFER_L2_NBR }
130 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
131 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
132 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
136 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
137 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
141 #if 0 /* currently unused */
143 fore200e_fore2atm_aal(enum fore200e_aal aal)
146 case FORE200E_AAL0: return ATM_AAL0;
147 case FORE200E_AAL34: return ATM_AAL34;
148 case FORE200E_AAL5: return ATM_AAL5;
156 static enum fore200e_aal
157 fore200e_atm2fore_aal(int aal)
160 case ATM_AAL0: return FORE200E_AAL0;
161 case ATM_AAL34: return FORE200E_AAL34;
164 case ATM_AAL5: return FORE200E_AAL5;
172 fore200e_irq_itoa(int irq)
174 #if defined(__sparc_v9__)
175 return __irq_itoa(irq);
178 sprintf(str, "%d", irq);
185 fore200e_kmalloc(int size, int flags)
187 void* chunk = kmalloc(size, flags);
190 memset(chunk, 0x00, size);
192 printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
199 fore200e_kfree(void* chunk)
205 /* allocate and align a chunk of memory intended to hold the data behing exchanged
206 between the driver and the adapter (using streaming DVMA) */
209 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
211 unsigned long offset = 0;
213 if (alignment <= sizeof(int))
216 chunk->alloc_size = size + alignment;
217 chunk->align_size = size;
218 chunk->direction = direction;
220 chunk->alloc_addr = fore200e_kmalloc(chunk->alloc_size, GFP_KERNEL | GFP_DMA);
221 if (chunk->alloc_addr == NULL)
225 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
227 chunk->align_addr = chunk->alloc_addr + offset;
229 chunk->dma_addr = fore200e->bus->dma_map(fore200e, chunk->align_addr, chunk->align_size, direction);
235 /* free a chunk of memory */
238 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
240 fore200e->bus->dma_unmap(fore200e, chunk->dma_addr, chunk->dma_size, chunk->direction);
242 fore200e_kfree(chunk->alloc_addr);
247 fore200e_spin(int msecs)
249 unsigned long timeout = jiffies + MSECS(msecs);
250 while (time_before(jiffies, timeout));
255 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
257 unsigned long timeout = jiffies + MSECS(msecs);
262 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
265 } while (time_before(jiffies, timeout));
269 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
279 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
281 unsigned long timeout = jiffies + MSECS(msecs);
285 if ((ok = (fore200e->bus->read(addr) == val)))
288 } while (time_before(jiffies, timeout));
292 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
293 fore200e->bus->read(addr), val);
302 fore200e_free_rx_buf(struct fore200e* fore200e)
304 int scheme, magn, nbr;
305 struct buffer* buffer;
307 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
308 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
310 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
312 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
314 struct chunk* data = &buffer[ nbr ].data;
316 if (data->alloc_addr != NULL)
317 fore200e_chunk_free(fore200e, data);
326 fore200e_uninit_bs_queue(struct fore200e* fore200e)
330 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
331 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
333 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
334 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
336 if (status->alloc_addr)
337 fore200e->bus->dma_chunk_free(fore200e, status);
339 if (rbd_block->alloc_addr)
340 fore200e->bus->dma_chunk_free(fore200e, rbd_block);
347 fore200e_reset(struct fore200e* fore200e, int diag)
351 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
353 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
355 fore200e->bus->reset(fore200e);
358 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
361 printk(FORE200E "device %s self-test failed\n", fore200e->name);
365 printk(FORE200E "device %s self-test passed\n", fore200e->name);
367 fore200e->state = FORE200E_STATE_RESET;
375 fore200e_shutdown(struct fore200e* fore200e)
377 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
378 fore200e->name, fore200e->phys_base,
379 fore200e_irq_itoa(fore200e->irq));
381 if (fore200e->state > FORE200E_STATE_RESET) {
382 /* first, reset the board to prevent further interrupts or data transfers */
383 fore200e_reset(fore200e, 0);
386 /* then, release all allocated resources */
387 switch(fore200e->state) {
389 case FORE200E_STATE_COMPLETE:
391 kfree(fore200e->stats);
393 case FORE200E_STATE_IRQ:
394 free_irq(fore200e->irq, fore200e->atm_dev);
396 case FORE200E_STATE_ALLOC_BUF:
397 fore200e_free_rx_buf(fore200e);
399 case FORE200E_STATE_INIT_BSQ:
400 fore200e_uninit_bs_queue(fore200e);
402 case FORE200E_STATE_INIT_RXQ:
403 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.status);
404 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
406 case FORE200E_STATE_INIT_TXQ:
407 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.status);
408 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
410 case FORE200E_STATE_INIT_CMDQ:
411 fore200e->bus->dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
413 case FORE200E_STATE_INITIALIZE:
414 /* nothing to do for that state */
416 case FORE200E_STATE_START_FW:
417 /* nothing to do for that state */
419 case FORE200E_STATE_LOAD_FW:
420 /* nothing to do for that state */
422 case FORE200E_STATE_RESET:
423 /* nothing to do for that state */
425 case FORE200E_STATE_MAP:
426 fore200e->bus->unmap(fore200e);
428 case FORE200E_STATE_CONFIGURE:
429 /* nothing to do for that state */
431 case FORE200E_STATE_REGISTER:
432 /* XXX shouldn't we *start* by deregistering the device? */
433 atm_dev_deregister(fore200e->atm_dev);
435 case FORE200E_STATE_BLANK:
436 /* nothing to do for that state */
442 #ifdef CONFIG_ATM_FORE200E_PCA
444 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
446 /* on big-endian hosts, the board is configured to convert
447 the endianess of slave RAM accesses */
448 return le32_to_cpu(readl(addr));
452 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
454 /* on big-endian hosts, the board is configured to convert
455 the endianess of slave RAM accesses */
456 writel(cpu_to_le32(val), addr);
461 fore200e_pca_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
463 u32 dma_addr = pci_map_single((struct pci_dev*)fore200e->bus_dev, virt_addr, size, direction);
465 DPRINTK(3, "PCI DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d, --> dma_addr = 0x%08x\n",
466 virt_addr, size, direction, dma_addr);
473 fore200e_pca_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
475 DPRINTK(3, "PCI DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d\n",
476 dma_addr, size, direction);
478 pci_unmap_single((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
483 fore200e_pca_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
485 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
487 pci_dma_sync_single_for_cpu((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
491 fore200e_pca_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
493 DPRINTK(3, "PCI DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
495 pci_dma_sync_single_for_device((struct pci_dev*)fore200e->bus_dev, dma_addr, size, direction);
499 /* allocate a DMA consistent chunk of memory intended to act as a communication mechanism
500 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
503 fore200e_pca_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
504 int size, int nbr, int alignment)
506 /* returned chunks are page-aligned */
507 chunk->alloc_size = size * nbr;
508 chunk->alloc_addr = pci_alloc_consistent((struct pci_dev*)fore200e->bus_dev,
512 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
515 chunk->align_addr = chunk->alloc_addr;
521 /* free a DMA consistent chunk of memory */
524 fore200e_pca_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
526 pci_free_consistent((struct pci_dev*)fore200e->bus_dev,
534 fore200e_pca_irq_check(struct fore200e* fore200e)
536 /* this is a 1 bit register */
537 int irq_posted = readl(fore200e->regs.pca.psr);
539 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
540 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
541 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
550 fore200e_pca_irq_ack(struct fore200e* fore200e)
552 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
557 fore200e_pca_reset(struct fore200e* fore200e)
559 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
561 writel(0, fore200e->regs.pca.hcr);
566 fore200e_pca_map(struct fore200e* fore200e)
568 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
570 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
572 if (fore200e->virt_base == NULL) {
573 printk(FORE200E "can't map device %s\n", fore200e->name);
577 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
579 /* gain access to the PCA specific registers */
580 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
581 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
582 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
584 fore200e->state = FORE200E_STATE_MAP;
590 fore200e_pca_unmap(struct fore200e* fore200e)
592 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
594 if (fore200e->virt_base != NULL)
595 iounmap(fore200e->virt_base);
600 fore200e_pca_configure(struct fore200e* fore200e)
602 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
603 u8 master_ctrl, latency;
605 DPRINTK(2, "device %s being configured\n", fore200e->name);
607 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
608 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
612 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
614 master_ctrl = master_ctrl
615 #if defined(__BIG_ENDIAN)
616 /* request the PCA board to convert the endianess of slave RAM accesses */
617 | PCA200E_CTRL_CONVERT_ENDIAN
620 | PCA200E_CTRL_DIS_CACHE_RD
621 | PCA200E_CTRL_DIS_WRT_INVAL
622 | PCA200E_CTRL_ENA_CONT_REQ_MODE
623 | PCA200E_CTRL_2_CACHE_WRT_INVAL
625 | PCA200E_CTRL_LARGE_PCI_BURSTS;
627 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
629 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
630 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
631 this may impact the performances of other PCI devices on the same bus, though */
633 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
635 fore200e->state = FORE200E_STATE_CONFIGURE;
640 static struct fore200e* __init
641 fore200e_pca_detect(const struct fore200e_bus* bus, int index)
643 struct fore200e* fore200e;
644 struct pci_dev* pci_dev = NULL;
648 pci_dev = pci_find_device(PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, pci_dev);
653 if (pci_enable_device(pci_dev))
656 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
657 if (fore200e == NULL)
661 fore200e->bus_dev = pci_dev;
662 fore200e->irq = pci_dev->irq;
663 fore200e->phys_base = pci_resource_start(pci_dev, 0);
665 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
667 pci_set_master(pci_dev);
674 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
676 struct host_cmdq* cmdq = &fore200e->host_cmdq;
677 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
678 struct prom_opcode opcode;
682 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
684 opcode.opcode = OPCODE_GET_PROM;
687 prom_dma = fore200e->bus->dma_map(fore200e, prom, sizeof(struct prom_data), DMA_FROM_DEVICE);
689 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
691 *entry->status = STATUS_PENDING;
693 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
695 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
697 *entry->status = STATUS_FREE;
699 fore200e->bus->dma_unmap(fore200e, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
702 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
706 #if defined(__BIG_ENDIAN)
708 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
710 /* MAC address is stored as little-endian */
711 swap_here(&prom->mac_addr[0]);
712 swap_here(&prom->mac_addr[4]);
720 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
722 struct pci_dev* pci_dev = (struct pci_dev*)fore200e->bus_dev;
724 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
725 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
728 #endif /* CONFIG_ATM_FORE200E_PCA */
731 #ifdef CONFIG_ATM_FORE200E_SBA
734 fore200e_sba_read(volatile u32 __iomem *addr)
736 return sbus_readl(addr);
741 fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
743 sbus_writel(val, addr);
748 fore200e_sba_dma_map(struct fore200e* fore200e, void* virt_addr, int size, int direction)
750 u32 dma_addr = sbus_map_single((struct sbus_dev*)fore200e->bus_dev, virt_addr, size, direction);
752 DPRINTK(3, "SBUS DVMA mapping: virt_addr = 0x%p, size = %d, direction = %d --> dma_addr = 0x%08x\n",
753 virt_addr, size, direction, dma_addr);
760 fore200e_sba_dma_unmap(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
762 DPRINTK(3, "SBUS DVMA unmapping: dma_addr = 0x%08x, size = %d, direction = %d,\n",
763 dma_addr, size, direction);
765 sbus_unmap_single((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
770 fore200e_sba_dma_sync_for_cpu(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
772 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
774 sbus_dma_sync_single_for_cpu((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
778 fore200e_sba_dma_sync_for_device(struct fore200e* fore200e, u32 dma_addr, int size, int direction)
780 DPRINTK(3, "SBUS DVMA sync: dma_addr = 0x%08x, size = %d, direction = %d\n", dma_addr, size, direction);
782 sbus_dma_sync_single_for_device((struct sbus_dev*)fore200e->bus_dev, dma_addr, size, direction);
786 /* allocate a DVMA consistent chunk of memory intended to act as a communication mechanism
787 (to hold descriptors, status, queues, etc.) shared by the driver and the adapter */
790 fore200e_sba_dma_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk,
791 int size, int nbr, int alignment)
793 chunk->alloc_size = chunk->align_size = size * nbr;
795 /* returned chunks are page-aligned */
796 chunk->alloc_addr = sbus_alloc_consistent((struct sbus_dev*)fore200e->bus_dev,
800 if ((chunk->alloc_addr == NULL) || (chunk->dma_addr == 0))
803 chunk->align_addr = chunk->alloc_addr;
809 /* free a DVMA consistent chunk of memory */
812 fore200e_sba_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
814 sbus_free_consistent((struct sbus_dev*)fore200e->bus_dev,
822 fore200e_sba_irq_enable(struct fore200e* fore200e)
824 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
825 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
830 fore200e_sba_irq_check(struct fore200e* fore200e)
832 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
837 fore200e_sba_irq_ack(struct fore200e* fore200e)
839 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
840 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
845 fore200e_sba_reset(struct fore200e* fore200e)
847 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
849 fore200e->bus->write(0, fore200e->regs.sba.hcr);
854 fore200e_sba_map(struct fore200e* fore200e)
856 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
859 /* gain access to the SBA specific registers */
860 fore200e->regs.sba.hcr = sbus_ioremap(&sbus_dev->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
861 fore200e->regs.sba.bsr = sbus_ioremap(&sbus_dev->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
862 fore200e->regs.sba.isr = sbus_ioremap(&sbus_dev->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
863 fore200e->virt_base = sbus_ioremap(&sbus_dev->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
865 if (fore200e->virt_base == NULL) {
866 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
870 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
872 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
874 /* get the supported DVMA burst sizes */
875 bursts = prom_getintdefault(sbus_dev->bus->prom_node, "burst-sizes", 0x00);
877 if (sbus_can_dma_64bit(sbus_dev))
878 sbus_set_sbus64(sbus_dev, bursts);
880 fore200e->state = FORE200E_STATE_MAP;
886 fore200e_sba_unmap(struct fore200e* fore200e)
888 sbus_iounmap(fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
889 sbus_iounmap(fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
890 sbus_iounmap(fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
891 sbus_iounmap(fore200e->virt_base, SBA200E_RAM_LENGTH);
896 fore200e_sba_configure(struct fore200e* fore200e)
898 fore200e->state = FORE200E_STATE_CONFIGURE;
903 static struct fore200e* __init
904 fore200e_sba_detect(const struct fore200e_bus* bus, int index)
906 struct fore200e* fore200e;
907 struct sbus_bus* sbus_bus;
908 struct sbus_dev* sbus_dev = NULL;
910 unsigned int count = 0;
912 for_each_sbus (sbus_bus) {
913 for_each_sbusdev (sbus_dev, sbus_bus) {
914 if (strcmp(sbus_dev->prom_name, SBA200E_PROM_NAME) == 0) {
924 if (sbus_dev->num_registers != 4) {
925 printk(FORE200E "this %s device has %d instead of 4 registers\n",
926 bus->model_name, sbus_dev->num_registers);
930 fore200e = fore200e_kmalloc(sizeof(struct fore200e), GFP_KERNEL);
931 if (fore200e == NULL)
935 fore200e->bus_dev = sbus_dev;
936 fore200e->irq = sbus_dev->irqs[ 0 ];
938 fore200e->phys_base = (unsigned long)sbus_dev;
940 sprintf(fore200e->name, "%s-%d", bus->model_name, index - 1);
947 fore200e_sba_prom_read(struct fore200e* fore200e, struct prom_data* prom)
949 struct sbus_dev* sbus_dev = (struct sbus_dev*) fore200e->bus_dev;
952 len = prom_getproperty(sbus_dev->prom_node, "macaddrlo2", &prom->mac_addr[ 4 ], 4);
956 len = prom_getproperty(sbus_dev->prom_node, "macaddrhi4", &prom->mac_addr[ 2 ], 4);
960 prom_getproperty(sbus_dev->prom_node, "serialnumber",
961 (char*)&prom->serial_number, sizeof(prom->serial_number));
963 prom_getproperty(sbus_dev->prom_node, "promversion",
964 (char*)&prom->hw_revision, sizeof(prom->hw_revision));
971 fore200e_sba_proc_read(struct fore200e* fore200e, char *page)
973 struct sbus_dev* sbus_dev = (struct sbus_dev*)fore200e->bus_dev;
975 return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n", sbus_dev->slot, sbus_dev->prom_name);
977 #endif /* CONFIG_ATM_FORE200E_SBA */
981 fore200e_tx_irq(struct fore200e* fore200e)
983 struct host_txq* txq = &fore200e->host_txq;
984 struct host_txq_entry* entry;
986 struct fore200e_vc_map* vc_map;
988 if (fore200e->host_txq.txing == 0)
993 entry = &txq->host_entry[ txq->tail ];
995 if ((*entry->status & STATUS_COMPLETE) == 0) {
999 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
1000 entry, txq->tail, entry->vc_map, entry->skb);
1002 /* free copy of misaligned data */
1006 /* remove DMA mapping */
1007 fore200e->bus->dma_unmap(fore200e, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
1010 vc_map = entry->vc_map;
1012 /* vcc closed since the time the entry was submitted for tx? */
1013 if ((vc_map->vcc == NULL) ||
1014 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1016 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
1017 fore200e->atm_dev->number);
1019 dev_kfree_skb_any(entry->skb);
1022 ASSERT(vc_map->vcc);
1024 /* vcc closed then immediately re-opened? */
1025 if (vc_map->incarn != entry->incarn) {
1027 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
1028 if the same vcc is immediately re-opened, those pending PDUs must
1029 not be popped after the completion of their emission, as they refer
1030 to the prior incarnation of that vcc. otherwise, vcc->sk->sk_wmem_alloc
1031 would be decremented by the size of the (unrelated) skb, possibly
1032 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
1033 we thus bind the tx entry to the current incarnation of the vcc
1034 when the entry is submitted for tx. When the tx later completes,
1035 if the incarnation number of the tx entry does not match the one
1036 of the vcc, then this implies that the vcc has been closed then re-opened.
1037 we thus just drop the skb here. */
1039 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
1040 fore200e->atm_dev->number);
1042 dev_kfree_skb_any(entry->skb);
1048 /* notify tx completion */
1050 vcc->pop(vcc, entry->skb);
1053 dev_kfree_skb_any(entry->skb);
1056 /* race fixed by the above incarnation mechanism, but... */
1057 if (atomic_read(&vcc->sk->sk_wmem_alloc) < 0) {
1058 atomic_set(&vcc->sk->sk_wmem_alloc, 0);
1061 /* check error condition */
1062 if (*entry->status & STATUS_ERROR)
1063 atomic_inc(&vcc->stats->tx_err);
1065 atomic_inc(&vcc->stats->tx);
1069 *entry->status = STATUS_FREE;
1071 fore200e->host_txq.txing--;
1073 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
1078 #ifdef FORE200E_BSQ_DEBUG
1079 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
1081 struct buffer* buffer;
1084 buffer = bsq->freebuf;
1087 if (buffer->supplied) {
1088 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
1089 where, scheme, magn, buffer->index);
1092 if (buffer->magn != magn) {
1093 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
1094 where, scheme, magn, buffer->index, buffer->magn);
1097 if (buffer->scheme != scheme) {
1098 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
1099 where, scheme, magn, buffer->index, buffer->scheme);
1102 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
1103 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
1104 where, scheme, magn, buffer->index);
1108 buffer = buffer->next;
1111 if (count != bsq->freebuf_count) {
1112 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
1113 where, scheme, magn, count, bsq->freebuf_count);
1121 fore200e_supply(struct fore200e* fore200e)
1123 int scheme, magn, i;
1125 struct host_bsq* bsq;
1126 struct host_bsq_entry* entry;
1127 struct buffer* buffer;
1129 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1130 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1132 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1134 #ifdef FORE200E_BSQ_DEBUG
1135 bsq_audit(1, bsq, scheme, magn);
1137 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
1139 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
1140 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
1142 entry = &bsq->host_entry[ bsq->head ];
1144 for (i = 0; i < RBD_BLK_SIZE; i++) {
1146 /* take the first buffer in the free buffer list */
1147 buffer = bsq->freebuf;
1149 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
1150 scheme, magn, bsq->freebuf_count);
1153 bsq->freebuf = buffer->next;
1155 #ifdef FORE200E_BSQ_DEBUG
1156 if (buffer->supplied)
1157 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
1158 scheme, magn, buffer->index);
1159 buffer->supplied = 1;
1161 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
1162 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
1165 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
1167 /* decrease accordingly the number of free rx buffers */
1168 bsq->freebuf_count -= RBD_BLK_SIZE;
1170 *entry->status = STATUS_PENDING;
1171 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
1179 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
1181 struct sk_buff* skb;
1182 struct buffer* buffer;
1183 struct fore200e_vcc* fore200e_vcc;
1185 #ifdef FORE200E_52BYTE_AAL0_SDU
1186 u32 cell_header = 0;
1191 fore200e_vcc = FORE200E_VCC(vcc);
1192 ASSERT(fore200e_vcc);
1194 #ifdef FORE200E_52BYTE_AAL0_SDU
1195 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
1197 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
1198 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
1199 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
1200 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
1201 rpd->atm_header.clp;
1206 /* compute total PDU length */
1207 for (i = 0; i < rpd->nseg; i++)
1208 pdu_len += rpd->rsd[ i ].length;
1210 skb = alloc_skb(pdu_len, GFP_ATOMIC);
1212 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
1214 atomic_inc(&vcc->stats->rx_drop);
1218 do_gettimeofday(&skb->stamp);
1220 #ifdef FORE200E_52BYTE_AAL0_SDU
1222 *((u32*)skb_put(skb, 4)) = cell_header;
1226 /* reassemble segments */
1227 for (i = 0; i < rpd->nseg; i++) {
1229 /* rebuild rx buffer address from rsd handle */
1230 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1232 /* Make device DMA transfer visible to CPU. */
1233 fore200e->bus->dma_sync_for_cpu(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1235 memcpy(skb_put(skb, rpd->rsd[ i ].length), buffer->data.align_addr, rpd->rsd[ i ].length);
1237 /* Now let the device get at it again. */
1238 fore200e->bus->dma_sync_for_device(fore200e, buffer->data.dma_addr, rpd->rsd[ i ].length, DMA_FROM_DEVICE);
1241 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1243 if (pdu_len < fore200e_vcc->rx_min_pdu)
1244 fore200e_vcc->rx_min_pdu = pdu_len;
1245 if (pdu_len > fore200e_vcc->rx_max_pdu)
1246 fore200e_vcc->rx_max_pdu = pdu_len;
1247 fore200e_vcc->rx_pdu++;
1250 if (atm_charge(vcc, skb->truesize) == 0) {
1252 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1253 vcc->itf, vcc->vpi, vcc->vci);
1255 dev_kfree_skb_any(skb);
1257 atomic_inc(&vcc->stats->rx_drop);
1261 ASSERT(atomic_read(&vcc->sk->sk_wmem_alloc) >= 0);
1263 vcc->push(vcc, skb);
1264 atomic_inc(&vcc->stats->rx);
1266 ASSERT(atomic_read(&vcc->sk->sk_wmem_alloc) >= 0);
1273 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1275 struct host_bsq* bsq;
1276 struct buffer* buffer;
1279 for (i = 0; i < rpd->nseg; i++) {
1281 /* rebuild rx buffer address from rsd handle */
1282 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1284 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1286 #ifdef FORE200E_BSQ_DEBUG
1287 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1289 if (buffer->supplied == 0)
1290 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1291 buffer->scheme, buffer->magn, buffer->index);
1292 buffer->supplied = 0;
1295 /* re-insert the buffer into the free buffer list */
1296 buffer->next = bsq->freebuf;
1297 bsq->freebuf = buffer;
1299 /* then increment the number of free rx buffers */
1300 bsq->freebuf_count++;
1306 fore200e_rx_irq(struct fore200e* fore200e)
1308 struct host_rxq* rxq = &fore200e->host_rxq;
1309 struct host_rxq_entry* entry;
1310 struct atm_vcc* vcc;
1311 struct fore200e_vc_map* vc_map;
1315 entry = &rxq->host_entry[ rxq->head ];
1317 /* no more received PDUs */
1318 if ((*entry->status & STATUS_COMPLETE) == 0)
1321 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1323 if ((vc_map->vcc == NULL) ||
1324 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1326 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1327 fore200e->atm_dev->number,
1328 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1334 if ((*entry->status & STATUS_ERROR) == 0) {
1336 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1339 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1340 fore200e->atm_dev->number,
1341 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1342 atomic_inc(&vcc->stats->rx_err);
1346 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1348 fore200e_collect_rpd(fore200e, entry->rpd);
1350 /* rewrite the rpd address to ack the received PDU */
1351 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1352 *entry->status = STATUS_FREE;
1354 fore200e_supply(fore200e);
1359 #ifndef FORE200E_USE_TASKLET
1361 fore200e_irq(struct fore200e* fore200e)
1363 unsigned long flags;
1365 spin_lock_irqsave(&fore200e->q_lock, flags);
1366 fore200e_rx_irq(fore200e);
1367 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1369 spin_lock_irqsave(&fore200e->q_lock, flags);
1370 fore200e_tx_irq(fore200e);
1371 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1377 fore200e_interrupt(int irq, void* dev, struct pt_regs* regs)
1379 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1381 if (fore200e->bus->irq_check(fore200e) == 0) {
1383 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1386 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1388 #ifdef FORE200E_USE_TASKLET
1389 tasklet_schedule(&fore200e->tx_tasklet);
1390 tasklet_schedule(&fore200e->rx_tasklet);
1392 fore200e_irq(fore200e);
1395 fore200e->bus->irq_ack(fore200e);
1400 #ifdef FORE200E_USE_TASKLET
1402 fore200e_tx_tasklet(unsigned long data)
1404 struct fore200e* fore200e = (struct fore200e*) data;
1405 unsigned long flags;
1407 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1409 spin_lock_irqsave(&fore200e->q_lock, flags);
1410 fore200e_tx_irq(fore200e);
1411 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1416 fore200e_rx_tasklet(unsigned long data)
1418 struct fore200e* fore200e = (struct fore200e*) data;
1419 unsigned long flags;
1421 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1423 spin_lock_irqsave(&fore200e->q_lock, flags);
1424 fore200e_rx_irq((struct fore200e*) data);
1425 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1431 fore200e_select_scheme(struct atm_vcc* vcc)
1433 /* fairly balance the VCs over (identical) buffer schemes */
1434 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1436 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1437 vcc->itf, vcc->vpi, vcc->vci, scheme);
1444 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1446 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1447 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1448 struct activate_opcode activ_opcode;
1449 struct deactivate_opcode deactiv_opcode;
1452 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1454 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1457 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1459 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1460 activ_opcode.aal = aal;
1461 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1462 activ_opcode.pad = 0;
1465 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1466 deactiv_opcode.pad = 0;
1469 vpvc.vci = vcc->vci;
1470 vpvc.vpi = vcc->vpi;
1472 *entry->status = STATUS_PENDING;
1476 #ifdef FORE200E_52BYTE_AAL0_SDU
1479 /* the MTU is not used by the cp, except in the case of AAL0 */
1480 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1481 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1482 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1485 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1486 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1489 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1491 *entry->status = STATUS_FREE;
1494 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1495 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1499 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1500 activate ? "open" : "clos");
1506 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1509 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1511 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1513 /* compute the data cells to idle cells ratio from the tx PCR */
1514 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1515 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1518 /* disable rate control */
1519 rate->data_cells = rate->idle_cells = 0;
1525 fore200e_open(struct atm_vcc *vcc)
1527 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1528 struct fore200e_vcc* fore200e_vcc;
1529 struct fore200e_vc_map* vc_map;
1530 unsigned long flags;
1532 short vpi = vcc->vpi;
1534 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1535 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1537 spin_lock_irqsave(&fore200e->q_lock, flags);
1539 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1542 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1544 printk(FORE200E "VC %d.%d.%d already in use\n",
1545 fore200e->atm_dev->number, vpi, vci);
1552 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1554 fore200e_vcc = fore200e_kmalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1555 if (fore200e_vcc == NULL) {
1560 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1561 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1562 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1563 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1564 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1565 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1566 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1568 /* pseudo-CBR bandwidth requested? */
1569 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1571 down(&fore200e->rate_sf);
1572 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1573 up(&fore200e->rate_sf);
1575 fore200e_kfree(fore200e_vcc);
1580 /* reserve bandwidth */
1581 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1582 up(&fore200e->rate_sf);
1585 vcc->itf = vcc->dev->number;
1587 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1588 set_bit(ATM_VF_ADDR, &vcc->flags);
1590 vcc->dev_data = fore200e_vcc;
1592 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1596 clear_bit(ATM_VF_ADDR, &vcc->flags);
1597 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1599 vcc->dev_data = NULL;
1601 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1603 fore200e_kfree(fore200e_vcc);
1607 /* compute rate control parameters */
1608 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1610 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1611 set_bit(ATM_VF_HASQOS, &vcc->flags);
1613 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1614 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1615 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1616 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1619 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1620 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1621 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1623 /* new incarnation of the vcc */
1624 vc_map->incarn = ++fore200e->incarn_count;
1626 /* VC unusable before this flag is set */
1627 set_bit(ATM_VF_READY, &vcc->flags);
1634 fore200e_close(struct atm_vcc* vcc)
1636 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1637 struct fore200e_vcc* fore200e_vcc;
1638 struct fore200e_vc_map* vc_map;
1639 unsigned long flags;
1642 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1643 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1645 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1647 clear_bit(ATM_VF_READY, &vcc->flags);
1649 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1651 spin_lock_irqsave(&fore200e->q_lock, flags);
1653 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1655 /* the vc is no longer considered as "in use" by fore200e_open() */
1658 vcc->itf = vcc->vci = vcc->vpi = 0;
1660 fore200e_vcc = FORE200E_VCC(vcc);
1661 vcc->dev_data = NULL;
1663 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1665 /* release reserved bandwidth, if any */
1666 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1668 down(&fore200e->rate_sf);
1669 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1670 up(&fore200e->rate_sf);
1672 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1675 clear_bit(ATM_VF_ADDR, &vcc->flags);
1676 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1678 ASSERT(fore200e_vcc);
1679 fore200e_kfree(fore200e_vcc);
1684 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1686 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1687 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1688 struct fore200e_vc_map* vc_map;
1689 struct host_txq* txq = &fore200e->host_txq;
1690 struct host_txq_entry* entry;
1692 struct tpd_haddr tpd_haddr;
1693 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1695 int tx_len = skb->len;
1696 u32* cell_header = NULL;
1697 unsigned char* skb_data;
1699 unsigned char* data;
1700 unsigned long flags;
1703 ASSERT(atomic_read(&vcc->sk->sk_wmem_alloc) >= 0);
1705 ASSERT(fore200e_vcc);
1707 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1708 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1709 dev_kfree_skb_any(skb);
1713 #ifdef FORE200E_52BYTE_AAL0_SDU
1714 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1715 cell_header = (u32*) skb->data;
1716 skb_data = skb->data + 4; /* skip 4-byte cell header */
1717 skb_len = tx_len = skb->len - 4;
1719 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1724 skb_data = skb->data;
1728 if (((unsigned long)skb_data) & 0x3) {
1730 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1735 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1737 /* this simply NUKES the PCA board */
1738 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1740 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1744 data = kmalloc(tx_len, GFP_ATOMIC | GFP_DMA);
1750 dev_kfree_skb_any(skb);
1755 memcpy(data, skb_data, skb_len);
1756 if (skb_len < tx_len)
1757 memset(data + skb_len, 0x00, tx_len - skb_len);
1763 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1764 ASSERT(vc_map->vcc == vcc);
1768 spin_lock_irqsave(&fore200e->q_lock, flags);
1770 entry = &txq->host_entry[ txq->head ];
1772 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1774 /* try to free completed tx queue entries */
1775 fore200e_tx_irq(fore200e);
1777 if (*entry->status != STATUS_FREE) {
1779 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1781 /* retry once again? */
1787 atomic_inc(&vcc->stats->tx_err);
1790 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1791 fore200e->name, fore200e->cp_queues->heartbeat);
1796 dev_kfree_skb_any(skb);
1806 entry->incarn = vc_map->incarn;
1807 entry->vc_map = vc_map;
1809 entry->data = tx_copy ? data : NULL;
1812 tpd->tsd[ 0 ].buffer = fore200e->bus->dma_map(fore200e, data, tx_len, DMA_TO_DEVICE);
1813 tpd->tsd[ 0 ].length = tx_len;
1815 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1818 /* The dma_map call above implies a dma_sync so the device can use it,
1819 * thus no explicit dma_sync call is necessary here.
1822 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1823 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1824 tpd->tsd[0].length, skb_len);
1826 if (skb_len < fore200e_vcc->tx_min_pdu)
1827 fore200e_vcc->tx_min_pdu = skb_len;
1828 if (skb_len > fore200e_vcc->tx_max_pdu)
1829 fore200e_vcc->tx_max_pdu = skb_len;
1830 fore200e_vcc->tx_pdu++;
1832 /* set tx rate control information */
1833 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1834 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1837 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1838 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1839 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1840 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1841 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1844 /* set the ATM header, common to all cells conveying the PDU */
1845 tpd->atm_header.clp = 0;
1846 tpd->atm_header.plt = 0;
1847 tpd->atm_header.vci = vcc->vci;
1848 tpd->atm_header.vpi = vcc->vpi;
1849 tpd->atm_header.gfc = 0;
1852 tpd->spec.length = tx_len;
1854 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1857 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1859 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1861 *entry->status = STATUS_PENDING;
1862 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1864 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1871 fore200e_getstats(struct fore200e* fore200e)
1873 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1874 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1875 struct stats_opcode opcode;
1879 if (fore200e->stats == NULL) {
1880 fore200e->stats = fore200e_kmalloc(sizeof(struct stats), GFP_KERNEL | GFP_DMA);
1881 if (fore200e->stats == NULL)
1885 stats_dma_addr = fore200e->bus->dma_map(fore200e, fore200e->stats,
1886 sizeof(struct stats), DMA_FROM_DEVICE);
1888 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1890 opcode.opcode = OPCODE_GET_STATS;
1893 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1895 *entry->status = STATUS_PENDING;
1897 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1899 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1901 *entry->status = STATUS_FREE;
1903 fore200e->bus->dma_unmap(fore200e, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1906 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1915 fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1917 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1919 DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1920 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1927 fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
1929 /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
1931 DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
1932 vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
1938 #if 0 /* currently unused */
1940 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1942 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1943 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1944 struct oc3_opcode opcode;
1946 u32 oc3_regs_dma_addr;
1948 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1950 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1952 opcode.opcode = OPCODE_GET_OC3;
1957 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1959 *entry->status = STATUS_PENDING;
1961 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1963 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1965 *entry->status = STATUS_FREE;
1967 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1970 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1980 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1982 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1983 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1984 struct oc3_opcode opcode;
1987 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1989 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1991 opcode.opcode = OPCODE_SET_OC3;
1993 opcode.value = value;
1996 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1998 *entry->status = STATUS_PENDING;
2000 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
2002 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
2004 *entry->status = STATUS_FREE;
2007 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
2016 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
2018 u32 mct_value, mct_mask;
2021 if (!capable(CAP_NET_ADMIN))
2024 switch (loop_mode) {
2028 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
2031 case ATM_LM_LOC_PHY:
2032 mct_value = mct_mask = SUNI_MCT_DLE;
2035 case ATM_LM_RMT_PHY:
2036 mct_value = mct_mask = SUNI_MCT_LLE;
2043 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
2045 fore200e->loop_mode = loop_mode;
2051 static inline unsigned int
2052 fore200e_swap(unsigned int in)
2054 #if defined(__LITTLE_ENDIAN)
2063 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
2065 struct sonet_stats tmp;
2067 if (fore200e_getstats(fore200e) < 0)
2070 tmp.section_bip = fore200e_swap(fore200e->stats->oc3.section_bip8_errors);
2071 tmp.line_bip = fore200e_swap(fore200e->stats->oc3.line_bip24_errors);
2072 tmp.path_bip = fore200e_swap(fore200e->stats->oc3.path_bip8_errors);
2073 tmp.line_febe = fore200e_swap(fore200e->stats->oc3.line_febe_errors);
2074 tmp.path_febe = fore200e_swap(fore200e->stats->oc3.path_febe_errors);
2075 tmp.corr_hcs = fore200e_swap(fore200e->stats->oc3.corr_hcs_errors);
2076 tmp.uncorr_hcs = fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors);
2077 tmp.tx_cells = fore200e_swap(fore200e->stats->aal0.cells_transmitted) +
2078 fore200e_swap(fore200e->stats->aal34.cells_transmitted) +
2079 fore200e_swap(fore200e->stats->aal5.cells_transmitted);
2080 tmp.rx_cells = fore200e_swap(fore200e->stats->aal0.cells_received) +
2081 fore200e_swap(fore200e->stats->aal34.cells_received) +
2082 fore200e_swap(fore200e->stats->aal5.cells_received);
2085 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
2092 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
2094 struct fore200e* fore200e = FORE200E_DEV(dev);
2096 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
2101 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
2104 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
2107 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
2110 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
2113 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
2116 return -ENOSYS; /* not implemented */
2121 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
2123 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
2124 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
2126 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
2127 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
2131 DPRINTK(2, "change_qos %d.%d.%d, "
2132 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
2133 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
2134 "available_cell_rate = %u",
2135 vcc->itf, vcc->vpi, vcc->vci,
2136 fore200e_traffic_class[ qos->txtp.traffic_class ],
2137 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
2138 fore200e_traffic_class[ qos->rxtp.traffic_class ],
2139 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
2140 flags, fore200e->available_cell_rate);
2142 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
2144 down(&fore200e->rate_sf);
2145 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
2146 up(&fore200e->rate_sf);
2150 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
2151 fore200e->available_cell_rate -= qos->txtp.max_pcr;
2153 up(&fore200e->rate_sf);
2155 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
2157 /* update rate control parameters */
2158 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
2160 set_bit(ATM_VF_HASQOS, &vcc->flags);
2170 fore200e_irq_request(struct fore200e* fore200e)
2172 if (request_irq(fore200e->irq, fore200e_interrupt, SA_SHIRQ, fore200e->name, fore200e->atm_dev) < 0) {
2174 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
2175 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2179 printk(FORE200E "IRQ %s reserved for device %s\n",
2180 fore200e_irq_itoa(fore200e->irq), fore200e->name);
2182 #ifdef FORE200E_USE_TASKLET
2183 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
2184 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
2187 fore200e->state = FORE200E_STATE_IRQ;
2193 fore200e_get_esi(struct fore200e* fore200e)
2195 struct prom_data* prom = fore200e_kmalloc(sizeof(struct prom_data), GFP_KERNEL | GFP_DMA);
2201 ok = fore200e->bus->prom_read(fore200e, prom);
2203 fore200e_kfree(prom);
2207 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
2209 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
2210 prom->serial_number & 0xFFFF,
2211 prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
2212 prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
2214 for (i = 0; i < ESI_LEN; i++) {
2215 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
2218 fore200e_kfree(prom);
2225 fore200e_alloc_rx_buf(struct fore200e* fore200e)
2227 int scheme, magn, nbr, size, i;
2229 struct host_bsq* bsq;
2230 struct buffer* buffer;
2232 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2233 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2235 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2237 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
2238 size = fore200e_rx_buf_size[ scheme ][ magn ];
2240 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
2242 /* allocate the array of receive buffers */
2243 buffer = bsq->buffer = fore200e_kmalloc(nbr * sizeof(struct buffer), GFP_KERNEL);
2248 bsq->freebuf = NULL;
2250 for (i = 0; i < nbr; i++) {
2252 buffer[ i ].scheme = scheme;
2253 buffer[ i ].magn = magn;
2254 #ifdef FORE200E_BSQ_DEBUG
2255 buffer[ i ].index = i;
2256 buffer[ i ].supplied = 0;
2259 /* allocate the receive buffer body */
2260 if (fore200e_chunk_alloc(fore200e,
2261 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2262 DMA_FROM_DEVICE) < 0) {
2265 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2266 fore200e_kfree(buffer);
2271 /* insert the buffer into the free buffer list */
2272 buffer[ i ].next = bsq->freebuf;
2273 bsq->freebuf = &buffer[ i ];
2275 /* all the buffers are free, initially */
2276 bsq->freebuf_count = nbr;
2278 #ifdef FORE200E_BSQ_DEBUG
2279 bsq_audit(3, bsq, scheme, magn);
2284 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2290 fore200e_init_bs_queue(struct fore200e* fore200e)
2292 int scheme, magn, i;
2294 struct host_bsq* bsq;
2295 struct cp_bsq_entry __iomem * cp_entry;
2297 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2298 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2300 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2302 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2304 /* allocate and align the array of status words */
2305 if (fore200e->bus->dma_chunk_alloc(fore200e,
2307 sizeof(enum status),
2309 fore200e->bus->status_alignment) < 0) {
2313 /* allocate and align the array of receive buffer descriptors */
2314 if (fore200e->bus->dma_chunk_alloc(fore200e,
2316 sizeof(struct rbd_block),
2318 fore200e->bus->descr_alignment) < 0) {
2320 fore200e->bus->dma_chunk_free(fore200e, &bsq->status);
2324 /* get the base address of the cp resident buffer supply queue entries */
2325 cp_entry = fore200e->virt_base +
2326 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2328 /* fill the host resident and cp resident buffer supply queue entries */
2329 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2331 bsq->host_entry[ i ].status =
2332 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2333 bsq->host_entry[ i ].rbd_block =
2334 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2335 bsq->host_entry[ i ].rbd_block_dma =
2336 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2337 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2339 *bsq->host_entry[ i ].status = STATUS_FREE;
2341 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2342 &cp_entry[ i ].status_haddr);
2347 fore200e->state = FORE200E_STATE_INIT_BSQ;
2353 fore200e_init_rx_queue(struct fore200e* fore200e)
2355 struct host_rxq* rxq = &fore200e->host_rxq;
2356 struct cp_rxq_entry __iomem * cp_entry;
2359 DPRINTK(2, "receive queue is being initialized\n");
2361 /* allocate and align the array of status words */
2362 if (fore200e->bus->dma_chunk_alloc(fore200e,
2364 sizeof(enum status),
2366 fore200e->bus->status_alignment) < 0) {
2370 /* allocate and align the array of receive PDU descriptors */
2371 if (fore200e->bus->dma_chunk_alloc(fore200e,
2375 fore200e->bus->descr_alignment) < 0) {
2377 fore200e->bus->dma_chunk_free(fore200e, &rxq->status);
2381 /* get the base address of the cp resident rx queue entries */
2382 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2384 /* fill the host resident and cp resident rx entries */
2385 for (i=0; i < QUEUE_SIZE_RX; i++) {
2387 rxq->host_entry[ i ].status =
2388 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2389 rxq->host_entry[ i ].rpd =
2390 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2391 rxq->host_entry[ i ].rpd_dma =
2392 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2393 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2395 *rxq->host_entry[ i ].status = STATUS_FREE;
2397 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2398 &cp_entry[ i ].status_haddr);
2400 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2401 &cp_entry[ i ].rpd_haddr);
2404 /* set the head entry of the queue */
2407 fore200e->state = FORE200E_STATE_INIT_RXQ;
2413 fore200e_init_tx_queue(struct fore200e* fore200e)
2415 struct host_txq* txq = &fore200e->host_txq;
2416 struct cp_txq_entry __iomem * cp_entry;
2419 DPRINTK(2, "transmit queue is being initialized\n");
2421 /* allocate and align the array of status words */
2422 if (fore200e->bus->dma_chunk_alloc(fore200e,
2424 sizeof(enum status),
2426 fore200e->bus->status_alignment) < 0) {
2430 /* allocate and align the array of transmit PDU descriptors */
2431 if (fore200e->bus->dma_chunk_alloc(fore200e,
2435 fore200e->bus->descr_alignment) < 0) {
2437 fore200e->bus->dma_chunk_free(fore200e, &txq->status);
2441 /* get the base address of the cp resident tx queue entries */
2442 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2444 /* fill the host resident and cp resident tx entries */
2445 for (i=0; i < QUEUE_SIZE_TX; i++) {
2447 txq->host_entry[ i ].status =
2448 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2449 txq->host_entry[ i ].tpd =
2450 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2451 txq->host_entry[ i ].tpd_dma =
2452 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2453 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2455 *txq->host_entry[ i ].status = STATUS_FREE;
2457 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2458 &cp_entry[ i ].status_haddr);
2460 /* although there is a one-to-one mapping of tx queue entries and tpds,
2461 we do not write here the DMA (physical) base address of each tpd into
2462 the related cp resident entry, because the cp relies on this write
2463 operation to detect that a new pdu has been submitted for tx */
2466 /* set the head and tail entries of the queue */
2470 fore200e->state = FORE200E_STATE_INIT_TXQ;
2476 fore200e_init_cmd_queue(struct fore200e* fore200e)
2478 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2479 struct cp_cmdq_entry __iomem * cp_entry;
2482 DPRINTK(2, "command queue is being initialized\n");
2484 /* allocate and align the array of status words */
2485 if (fore200e->bus->dma_chunk_alloc(fore200e,
2487 sizeof(enum status),
2489 fore200e->bus->status_alignment) < 0) {
2493 /* get the base address of the cp resident cmd queue entries */
2494 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2496 /* fill the host resident and cp resident cmd entries */
2497 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2499 cmdq->host_entry[ i ].status =
2500 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2501 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2503 *cmdq->host_entry[ i ].status = STATUS_FREE;
2505 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2506 &cp_entry[ i ].status_haddr);
2509 /* set the head entry of the queue */
2512 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2518 fore200e_param_bs_queue(struct fore200e* fore200e,
2519 enum buffer_scheme scheme, enum buffer_magn magn,
2520 int queue_length, int pool_size, int supply_blksize)
2522 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2524 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2525 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2526 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2527 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2532 fore200e_initialize(struct fore200e* fore200e)
2534 struct cp_queues __iomem * cpq;
2535 int ok, scheme, magn;
2537 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2539 init_MUTEX(&fore200e->rate_sf);
2540 spin_lock_init(&fore200e->q_lock);
2542 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2544 /* enable cp to host interrupts */
2545 fore200e->bus->write(1, &cpq->imask);
2547 if (fore200e->bus->irq_enable)
2548 fore200e->bus->irq_enable(fore200e);
2550 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2552 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2553 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2554 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2556 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2557 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2559 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2560 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2561 fore200e_param_bs_queue(fore200e, scheme, magn,
2563 fore200e_rx_buf_nbr[ scheme ][ magn ],
2566 /* issue the initialize command */
2567 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2568 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2570 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2572 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2576 printk(FORE200E "device %s initialized\n", fore200e->name);
2578 fore200e->state = FORE200E_STATE_INITIALIZE;
2584 fore200e_monitor_putc(struct fore200e* fore200e, char c)
2586 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2591 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2596 fore200e_monitor_getc(struct fore200e* fore200e)
2598 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2599 unsigned long timeout = jiffies + MSECS(50);
2602 while (time_before(jiffies, timeout)) {
2604 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2606 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2608 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2610 printk("%c", c & 0xFF);
2621 fore200e_monitor_puts(struct fore200e* fore200e, char* str)
2625 /* the i960 monitor doesn't accept any new character if it has something to say */
2626 while (fore200e_monitor_getc(fore200e) >= 0);
2628 fore200e_monitor_putc(fore200e, *str++);
2631 while (fore200e_monitor_getc(fore200e) >= 0);
2636 fore200e_start_fw(struct fore200e* fore200e)
2640 struct fw_header* fw_header = (struct fw_header*) fore200e->bus->fw_data;
2642 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2644 #if defined(__sparc_v9__)
2645 /* reported to be required by SBA cards on some sparc64 hosts */
2649 sprintf(cmd, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2651 fore200e_monitor_puts(fore200e, cmd);
2653 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000);
2655 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2659 printk(FORE200E "device %s firmware started\n", fore200e->name);
2661 fore200e->state = FORE200E_STATE_START_FW;
2667 fore200e_load_fw(struct fore200e* fore200e)
2669 u32* fw_data = (u32*) fore200e->bus->fw_data;
2670 u32 fw_size = (u32) *fore200e->bus->fw_size / sizeof(u32);
2672 struct fw_header* fw_header = (struct fw_header*) fw_data;
2674 u32 __iomem *load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2676 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2677 fore200e->name, load_addr, fw_size);
2679 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2680 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2684 for (; fw_size--; fw_data++, load_addr++)
2685 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2687 fore200e->state = FORE200E_STATE_LOAD_FW;
2693 fore200e_register(struct fore200e* fore200e)
2695 struct atm_dev* atm_dev;
2697 DPRINTK(2, "device %s being registered\n", fore200e->name);
2699 atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1,
2701 if (atm_dev == NULL) {
2702 printk(FORE200E "unable to register device %s\n", fore200e->name);
2706 atm_dev->dev_data = fore200e;
2707 fore200e->atm_dev = atm_dev;
2709 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2710 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2712 fore200e->available_cell_rate = ATM_OC3_PCR;
2714 fore200e->state = FORE200E_STATE_REGISTER;
2720 fore200e_init(struct fore200e* fore200e)
2722 if (fore200e_register(fore200e) < 0)
2725 if (fore200e->bus->configure(fore200e) < 0)
2728 if (fore200e->bus->map(fore200e) < 0)
2731 if (fore200e_reset(fore200e, 1) < 0)
2734 if (fore200e_load_fw(fore200e) < 0)
2737 if (fore200e_start_fw(fore200e) < 0)
2740 if (fore200e_initialize(fore200e) < 0)
2743 if (fore200e_init_cmd_queue(fore200e) < 0)
2746 if (fore200e_init_tx_queue(fore200e) < 0)
2749 if (fore200e_init_rx_queue(fore200e) < 0)
2752 if (fore200e_init_bs_queue(fore200e) < 0)
2755 if (fore200e_alloc_rx_buf(fore200e) < 0)
2758 if (fore200e_get_esi(fore200e) < 0)
2761 if (fore200e_irq_request(fore200e) < 0)
2764 fore200e_supply(fore200e);
2766 /* all done, board initialization is now complete */
2767 fore200e->state = FORE200E_STATE_COMPLETE;
2772 fore200e_module_init(void)
2774 const struct fore200e_bus* bus;
2775 struct fore200e* fore200e;
2778 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2780 /* for each configured bus interface */
2781 for (link = 0, bus = fore200e_bus; bus->model_name; bus++) {
2783 /* detect all boards present on that bus */
2784 for (index = 0; (fore200e = bus->detect(bus, index)); index++) {
2786 printk(FORE200E "device %s found at 0x%lx, IRQ %s\n",
2787 fore200e->bus->model_name,
2788 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2790 sprintf(fore200e->name, "%s-%d", bus->model_name, index);
2792 if (fore200e_init(fore200e) < 0) {
2794 fore200e_shutdown(fore200e);
2800 fore200e->next = fore200e_boards;
2801 fore200e_boards = fore200e;
2812 fore200e_module_cleanup(void)
2814 while (fore200e_boards) {
2815 struct fore200e* fore200e = fore200e_boards;
2817 fore200e_shutdown(fore200e);
2818 fore200e_boards = fore200e->next;
2821 DPRINTK(1, "module being removed\n");
2826 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2828 struct fore200e* fore200e = FORE200E_DEV(dev);
2829 struct fore200e_vcc* fore200e_vcc;
2830 struct atm_vcc* vcc;
2831 int i, len, left = *pos;
2832 unsigned long flags;
2836 if (fore200e_getstats(fore200e) < 0)
2839 len = sprintf(page,"\n"
2841 " internal name:\t\t%s\n", fore200e->name);
2843 /* print bus-specific information */
2844 if (fore200e->bus->proc_read)
2845 len += fore200e->bus->proc_read(fore200e, page + len);
2847 len += sprintf(page + len,
2848 " interrupt line:\t\t%s\n"
2849 " physical base address:\t0x%p\n"
2850 " virtual base address:\t0x%p\n"
2851 " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
2852 " board serial number:\t\t%d\n\n",
2853 fore200e_irq_itoa(fore200e->irq),
2854 (void*)fore200e->phys_base,
2855 fore200e->virt_base,
2856 fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
2857 fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
2858 fore200e->esi[4] * 256 + fore200e->esi[5]);
2864 return sprintf(page,
2865 " free small bufs, scheme 1:\t%d\n"
2866 " free large bufs, scheme 1:\t%d\n"
2867 " free small bufs, scheme 2:\t%d\n"
2868 " free large bufs, scheme 2:\t%d\n",
2869 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2870 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2871 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2872 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2875 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2877 len = sprintf(page,"\n\n"
2878 " cell processor:\n"
2879 " heartbeat state:\t\t");
2881 if (hb >> 16 != 0xDEAD)
2882 len += sprintf(page + len, "0x%08x\n", hb);
2884 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2890 static const char* media_name[] = {
2891 "unshielded twisted pair",
2892 "multimode optical fiber ST",
2893 "multimode optical fiber SC",
2894 "single-mode optical fiber ST",
2895 "single-mode optical fiber SC",
2899 static const char* oc3_mode[] = {
2901 "diagnostic loopback",
2906 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2907 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2908 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2909 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2912 if ((media_index < 0) || (media_index > 4))
2915 switch (fore200e->loop_mode) {
2916 case ATM_LM_NONE: oc3_index = 0;
2918 case ATM_LM_LOC_PHY: oc3_index = 1;
2920 case ATM_LM_RMT_PHY: oc3_index = 2;
2922 default: oc3_index = 3;
2925 return sprintf(page,
2926 " firmware release:\t\t%d.%d.%d\n"
2927 " monitor release:\t\t%d.%d\n"
2928 " media type:\t\t\t%s\n"
2929 " OC-3 revision:\t\t0x%x\n"
2930 " OC-3 mode:\t\t\t%s",
2931 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2932 mon960_release >> 16, mon960_release << 16 >> 16,
2933 media_name[ media_index ],
2935 oc3_mode[ oc3_index ]);
2939 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2941 return sprintf(page,
2944 " version number:\t\t%d\n"
2945 " boot status word:\t\t0x%08x\n",
2946 fore200e->bus->read(&cp_monitor->mon_version),
2947 fore200e->bus->read(&cp_monitor->bstat));
2951 return sprintf(page,
2953 " device statistics:\n"
2955 " crc_header_errors:\t\t%10u\n"
2956 " framing_errors:\t\t%10u\n",
2957 fore200e_swap(fore200e->stats->phy.crc_header_errors),
2958 fore200e_swap(fore200e->stats->phy.framing_errors));
2961 return sprintf(page, "\n"
2963 " section_bip8_errors:\t%10u\n"
2964 " path_bip8_errors:\t\t%10u\n"
2965 " line_bip24_errors:\t\t%10u\n"
2966 " line_febe_errors:\t\t%10u\n"
2967 " path_febe_errors:\t\t%10u\n"
2968 " corr_hcs_errors:\t\t%10u\n"
2969 " ucorr_hcs_errors:\t\t%10u\n",
2970 fore200e_swap(fore200e->stats->oc3.section_bip8_errors),
2971 fore200e_swap(fore200e->stats->oc3.path_bip8_errors),
2972 fore200e_swap(fore200e->stats->oc3.line_bip24_errors),
2973 fore200e_swap(fore200e->stats->oc3.line_febe_errors),
2974 fore200e_swap(fore200e->stats->oc3.path_febe_errors),
2975 fore200e_swap(fore200e->stats->oc3.corr_hcs_errors),
2976 fore200e_swap(fore200e->stats->oc3.ucorr_hcs_errors));
2979 return sprintf(page,"\n"
2980 " ATM:\t\t\t\t cells\n"
2983 " vpi out of range:\t\t%10u\n"
2984 " vpi no conn:\t\t%10u\n"
2985 " vci out of range:\t\t%10u\n"
2986 " vci no conn:\t\t%10u\n",
2987 fore200e_swap(fore200e->stats->atm.cells_transmitted),
2988 fore200e_swap(fore200e->stats->atm.cells_received),
2989 fore200e_swap(fore200e->stats->atm.vpi_bad_range),
2990 fore200e_swap(fore200e->stats->atm.vpi_no_conn),
2991 fore200e_swap(fore200e->stats->atm.vci_bad_range),
2992 fore200e_swap(fore200e->stats->atm.vci_no_conn));
2995 return sprintf(page,"\n"
2996 " AAL0:\t\t\t cells\n"
2999 " dropped:\t\t\t%10u\n",
3000 fore200e_swap(fore200e->stats->aal0.cells_transmitted),
3001 fore200e_swap(fore200e->stats->aal0.cells_received),
3002 fore200e_swap(fore200e->stats->aal0.cells_dropped));
3005 return sprintf(page,"\n"
3007 " SAR sublayer:\t\t cells\n"
3010 " dropped:\t\t\t%10u\n"
3011 " CRC errors:\t\t%10u\n"
3012 " protocol errors:\t\t%10u\n\n"
3013 " CS sublayer:\t\t PDUs\n"
3016 " dropped:\t\t\t%10u\n"
3017 " protocol errors:\t\t%10u\n",
3018 fore200e_swap(fore200e->stats->aal34.cells_transmitted),
3019 fore200e_swap(fore200e->stats->aal34.cells_received),
3020 fore200e_swap(fore200e->stats->aal34.cells_dropped),
3021 fore200e_swap(fore200e->stats->aal34.cells_crc_errors),
3022 fore200e_swap(fore200e->stats->aal34.cells_protocol_errors),
3023 fore200e_swap(fore200e->stats->aal34.cspdus_transmitted),
3024 fore200e_swap(fore200e->stats->aal34.cspdus_received),
3025 fore200e_swap(fore200e->stats->aal34.cspdus_dropped),
3026 fore200e_swap(fore200e->stats->aal34.cspdus_protocol_errors));
3029 return sprintf(page,"\n"
3031 " SAR sublayer:\t\t cells\n"
3034 " dropped:\t\t\t%10u\n"
3035 " congestions:\t\t%10u\n\n"
3036 " CS sublayer:\t\t PDUs\n"
3039 " dropped:\t\t\t%10u\n"
3040 " CRC errors:\t\t%10u\n"
3041 " protocol errors:\t\t%10u\n",
3042 fore200e_swap(fore200e->stats->aal5.cells_transmitted),
3043 fore200e_swap(fore200e->stats->aal5.cells_received),
3044 fore200e_swap(fore200e->stats->aal5.cells_dropped),
3045 fore200e_swap(fore200e->stats->aal5.congestion_experienced),
3046 fore200e_swap(fore200e->stats->aal5.cspdus_transmitted),
3047 fore200e_swap(fore200e->stats->aal5.cspdus_received),
3048 fore200e_swap(fore200e->stats->aal5.cspdus_dropped),
3049 fore200e_swap(fore200e->stats->aal5.cspdus_crc_errors),
3050 fore200e_swap(fore200e->stats->aal5.cspdus_protocol_errors));
3053 return sprintf(page,"\n"
3054 " AUX:\t\t allocation failures\n"
3055 " small b1:\t\t\t%10u\n"
3056 " large b1:\t\t\t%10u\n"
3057 " small b2:\t\t\t%10u\n"
3058 " large b2:\t\t\t%10u\n"
3059 " RX PDUs:\t\t\t%10u\n"
3060 " TX PDUs:\t\t\t%10lu\n",
3061 fore200e_swap(fore200e->stats->aux.small_b1_failed),
3062 fore200e_swap(fore200e->stats->aux.large_b1_failed),
3063 fore200e_swap(fore200e->stats->aux.small_b2_failed),
3064 fore200e_swap(fore200e->stats->aux.large_b2_failed),
3065 fore200e_swap(fore200e->stats->aux.rpd_alloc_failed),
3069 return sprintf(page,"\n"
3070 " receive carrier:\t\t\t%s\n",
3071 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
3074 return sprintf(page,"\n"
3075 " VCCs:\n address VPI VCI AAL "
3076 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
3079 for (i = 0; i < NBR_CONNECT; i++) {
3081 vcc = fore200e->vc_map[i].vcc;
3086 spin_lock_irqsave(&fore200e->q_lock, flags);
3088 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
3090 fore200e_vcc = FORE200E_VCC(vcc);
3091 ASSERT(fore200e_vcc);
3094 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3095 (u32)(unsigned long)vcc,
3096 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3097 fore200e_vcc->tx_pdu,
3098 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
3099 fore200e_vcc->tx_max_pdu,
3100 fore200e_vcc->rx_pdu,
3101 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
3102 fore200e_vcc->rx_max_pdu);
3104 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3108 spin_unlock_irqrestore(&fore200e->q_lock, flags);
3114 module_init(fore200e_module_init);
3115 module_exit(fore200e_module_cleanup);
3118 static const struct atmdev_ops fore200e_ops =
3120 .open = fore200e_open,
3121 .close = fore200e_close,
3122 .ioctl = fore200e_ioctl,
3123 .getsockopt = fore200e_getsockopt,
3124 .setsockopt = fore200e_setsockopt,
3125 .send = fore200e_send,
3126 .change_qos = fore200e_change_qos,
3127 .proc_read = fore200e_proc_read,
3128 .owner = THIS_MODULE
3132 #ifdef CONFIG_ATM_FORE200E_PCA
3133 extern const unsigned char _fore200e_pca_fw_data[];
3134 extern const unsigned int _fore200e_pca_fw_size;
3136 #ifdef CONFIG_ATM_FORE200E_SBA
3137 extern const unsigned char _fore200e_sba_fw_data[];
3138 extern const unsigned int _fore200e_sba_fw_size;
3141 static const struct fore200e_bus fore200e_bus[] = {
3142 #ifdef CONFIG_ATM_FORE200E_PCA
3143 { "PCA-200E", "pca200e", 32, 4, 32,
3144 _fore200e_pca_fw_data, &_fore200e_pca_fw_size,
3147 fore200e_pca_dma_map,
3148 fore200e_pca_dma_unmap,
3149 fore200e_pca_dma_sync_for_cpu,
3150 fore200e_pca_dma_sync_for_device,
3151 fore200e_pca_dma_chunk_alloc,
3152 fore200e_pca_dma_chunk_free,
3153 fore200e_pca_detect,
3154 fore200e_pca_configure,
3157 fore200e_pca_prom_read,
3160 fore200e_pca_irq_check,
3161 fore200e_pca_irq_ack,
3162 fore200e_pca_proc_read,
3165 #ifdef CONFIG_ATM_FORE200E_SBA
3166 { "SBA-200E", "sba200e", 32, 64, 32,
3167 _fore200e_sba_fw_data, &_fore200e_sba_fw_size,
3170 fore200e_sba_dma_map,
3171 fore200e_sba_dma_unmap,
3172 fore200e_sba_dma_sync_for_cpu,
3173 fore200e_sba_dma_sync_for_device,
3174 fore200e_sba_dma_chunk_alloc,
3175 fore200e_sba_dma_chunk_free,
3176 fore200e_sba_detect,
3177 fore200e_sba_configure,
3180 fore200e_sba_prom_read,
3182 fore200e_sba_irq_enable,
3183 fore200e_sba_irq_check,
3184 fore200e_sba_irq_ack,
3185 fore200e_sba_proc_read,
3191 #ifdef MODULE_LICENSE
3192 MODULE_LICENSE("GPL");