1 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
8 3 primary sources of the mess --
9 1) hppa needs *lots* of cacheline flushing to keep this kind of
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
15 3) The implementation HP is using seems to be significantly pickier
16 about when and how the command and RX units are started. some
17 command ordering was changed.
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
25 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
29 /* 82596.c: A generic 82596 ethernet driver for linux. */
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
35 Modularised 12/94 Mark Evans
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performace test to tune rx_copybreak
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU General Public License as modified by SRC,
64 incorporated herein by reference.
66 The author may be reached as becker@scyld.com, or C/O
67 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/ptrace.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/slab.h>
78 #include <linux/interrupt.h>
79 #include <linux/delay.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/init.h>
84 #include <linux/pci.h>
85 #include <linux/types.h>
87 #include <asm/bitops.h>
89 #include <asm/pgtable.h>
90 #include <asm/pgalloc.h>
93 #include <asm/cache.h>
94 #include <asm/parisc-device.h>
96 static char version[] __devinitdata =
97 "82596.c $Revision: 1.29 $\n";
102 #define DEB_INIT 0x0001
103 #define DEB_PROBE 0x0002
104 #define DEB_SERIOUS 0x0004
105 #define DEB_ERRORS 0x0008
106 #define DEB_MULTI 0x0010
107 #define DEB_TDR 0x0020
108 #define DEB_OPEN 0x0040
109 #define DEB_RESET 0x0080
110 #define DEB_ADDCMD 0x0100
111 #define DEB_STATUS 0x0200
112 #define DEB_STARTTX 0x0400
113 #define DEB_RXADDR 0x0800
114 #define DEB_TXADDR 0x1000
115 #define DEB_RXFRAME 0x2000
116 #define DEB_INTS 0x4000
117 #define DEB_STRUCT 0x8000
118 #define DEB_ANY 0xffff
121 #define DEB(x,y) if (i596_debug & (x)) { y; }
124 #define CHECK_WBACK(addr,len) \
125 do { dma_cache_sync((void *)addr, len, DMA_TO_DEVICE); } while (0)
127 #define CHECK_INV(addr,len) \
128 do { dma_cache_sync((void *)addr,len, DMA_FROM_DEVICE); } while(0)
130 #define CHECK_WBACK_INV(addr,len) \
131 do { dma_cache_sync((void *)addr,len, DMA_BIDIRECTIONAL); } while (0)
134 #define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
135 #define PA_CPU_PORT_L_ACCESS 4
136 #define PA_CHANNEL_ATTENTION 8
140 * Define various macros for Channel Attention, word swapping etc., dependent
141 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel.
145 #define WSWAPrfd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
146 #define WSWAPrbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
147 #define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
148 #define WSWAPscb(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
149 #define WSWAPcmd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
150 #define WSWAPtbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
151 #define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
152 #define ISCP_BUSY 0x00010000
153 #define MACH_IS_APRICOT 0
155 #define WSWAPrfd(x) ((struct i596_rfd *)(x))
156 #define WSWAPrbd(x) ((struct i596_rbd *)(x))
157 #define WSWAPiscp(x) ((struct i596_iscp *)(x))
158 #define WSWAPscb(x) ((struct i596_scb *)(x))
159 #define WSWAPcmd(x) ((struct i596_cmd *)(x))
160 #define WSWAPtbd(x) ((struct i596_tbd *)(x))
161 #define WSWAPchar(x) ((char *)(x))
162 #define ISCP_BUSY 0x0001
163 #define MACH_IS_APRICOT 1
167 * The MPU_PORT command allows direct access to the 82596. With PORT access
168 * the following commands are available (p5-18). The 32-bit port command
169 * must be word-swapped with the most significant word written first.
170 * This only applies to VME boards.
172 #define PORT_RESET 0x00 /* reset 82596 */
173 #define PORT_SELFTEST 0x01 /* selftest */
174 #define PORT_ALTSCP 0x02 /* alternate SCB address */
175 #define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
177 static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
179 MODULE_AUTHOR("Richard Hirst");
180 MODULE_DESCRIPTION("i82596 driver");
181 MODULE_LICENSE("GPL");
182 MODULE_PARM(i596_debug, "i");
183 MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
185 /* Copy frames shorter than rx_copybreak, otherwise pass on up in
186 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
188 static int rx_copybreak = 100;
190 #define MAX_DRIVERS 4 /* max count of drivers */
192 #define PKT_BUF_SZ 1536
193 #define MAX_MC_CNT 64
195 #define I596_NULL ((u32)0xffffffff)
197 #define CMD_EOL 0x8000 /* The last command of the list, stop. */
198 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
199 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
201 #define CMD_FLEX 0x0008 /* Enable flexible memory model */
204 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
205 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
208 #define STAT_C 0x8000 /* Set to 0 after execution */
209 #define STAT_B 0x4000 /* Command being executed */
210 #define STAT_OK 0x2000 /* Command executed ok */
211 #define STAT_A 0x1000 /* Command aborted */
213 #define CUC_START 0x0100
214 #define CUC_RESUME 0x0200
215 #define CUC_SUSPEND 0x0300
216 #define CUC_ABORT 0x0400
217 #define RX_START 0x0010
218 #define RX_RESUME 0x0020
219 #define RX_SUSPEND 0x0030
220 #define RX_ABORT 0x0040
224 #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
228 unsigned short porthi;
229 unsigned short portlo;
234 #define SIZE_MASK 0x3fff
241 u32 cache_pad[5]; /* Total 32 bytes... */
244 /* The command structure has two 'next' pointers; v_next is the address of
245 * the next command as seen by the CPU, b_next is the address of the next
246 * command as seen by the 82596. The b_next pointer, as used by the 82596
247 * always references the status field of the next command, rather than the
248 * v_next field, because the 82596 is unaware of v_next. It may seem more
249 * logical to put v_next at the end of the structure, but we cannot do that
250 * because the 82596 expects other fields to be there, depending on command
255 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
256 unsigned short status;
257 unsigned short command;
258 dma_addr_t b_next; /* Address from i596 viewpoint */
266 struct sk_buff *skb; /* So we can free it after tx */
269 u32 cache_pad[6]; /* Total 64 bytes... */
271 u32 cache_pad[1]; /* Total 32 bytes... */
277 unsigned short status;
284 char mc_addrs[MAX_MC_CNT*6];
294 char i596_config[16];
300 dma_addr_t b_next; /* Address from i596 viewpoint */
302 unsigned short count;
304 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
305 struct i596_rfd *v_prev;
307 u32 cache_pad[2]; /* Total 32 bytes... */
313 unsigned short count;
314 unsigned short zero1;
316 dma_addr_t b_data; /* Address from i596 viewpoint */
318 unsigned short zero2;
321 struct i596_rbd *v_next;
322 dma_addr_t b_addr; /* This rbd addr from i596 view */
323 unsigned char *v_data; /* Address from CPUs viewpoint */
324 /* Total 32 bytes... */
330 /* These values as chosen so struct i596_private fits in one page... */
332 #define TX_RING_SIZE 32
333 #define RX_RING_SIZE 16
336 unsigned short status;
337 unsigned short command;
347 unsigned short t_off;
361 struct i596_private {
362 volatile struct i596_scp scp __attribute__((aligned(32)));
363 volatile struct i596_iscp iscp __attribute__((aligned(32)));
364 volatile struct i596_scb scb __attribute__((aligned(32)));
365 struct sa_cmd sa_cmd __attribute__((aligned(32)));
366 struct cf_cmd cf_cmd __attribute__((aligned(32)));
367 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
368 struct mc_cmd mc_cmd __attribute__((aligned(32)));
369 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
370 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
371 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
372 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
375 struct i596_rfd *rfd_head;
376 struct i596_rbd *rbd_head;
377 struct i596_cmd *cmd_tail;
378 struct i596_cmd *cmd_head;
381 struct net_device_stats stats;
389 static char init_setup[] =
391 0x8E, /* length, prefetch on */
392 0xC8, /* fifo to 8, monitor off */
393 0x80, /* don't save bad frames */
394 0x2E, /* No source address insertion, 8 byte preamble */
395 0x00, /* priority and backoff defaults */
396 0x60, /* interframe spacing */
397 0x00, /* slot time LSB */
398 0xf2, /* slot time and retries */
399 0x00, /* promiscuous mode */
400 0x00, /* collision detect */
401 0x40, /* minimum frame length */
404 0x7f /* *multi IA */ };
406 static int i596_open(struct net_device *dev);
407 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
408 static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs);
409 static int i596_close(struct net_device *dev);
410 static struct net_device_stats *i596_get_stats(struct net_device *dev);
411 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
412 static void i596_tx_timeout (struct net_device *dev);
413 static void print_eth(unsigned char *buf, char *str);
414 static void set_multicast_list(struct net_device *dev);
416 static int rx_ring_size = RX_RING_SIZE;
417 static int ticks_limit = 100;
418 static int max_cmd_backlog = TX_RING_SIZE-1;
421 static inline void CA(struct net_device *dev)
423 gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
427 static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
429 struct i596_private *lp = dev->priv;
431 u32 v = (u32) (c) | (u32) (x);
434 if (lp->options & OPT_SWAP_PORT) {
442 gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
444 gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
448 static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
450 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
451 while (--delcnt && lp->iscp.stat) {
453 CHECK_INV(&(lp->iscp), sizeof(struct i596_iscp));
456 printk("%s: %s, iscp.stat %04x, didn't clear\n",
457 dev->name, str, lp->iscp.stat);
465 static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
467 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
468 while (--delcnt && lp->scb.command) {
470 CHECK_INV(&(lp->scb), sizeof(struct i596_scb));
473 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
474 dev->name, str, lp->scb.status, lp->scb.command);
482 static void i596_display_data(struct net_device *dev)
484 struct i596_private *lp = dev->priv;
485 struct i596_cmd *cmd;
486 struct i596_rfd *rfd;
487 struct i596_rbd *rbd;
489 printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
490 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
491 printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
492 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
493 printk("scb at %p, scb.status = %04x, .command = %04x,"
494 " .cmd = %08x, .rfd = %08x\n",
495 &lp->scb, lp->scb.status, lp->scb.command,
496 lp->scb.cmd, lp->scb.rfd);
497 printk(" errors: crc %x, align %x, resource %x,"
498 " over %x, rcvdt %x, short %x\n",
499 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
500 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
502 while (cmd != NULL) {
503 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
504 cmd, cmd->status, cmd->command, cmd->b_next);
508 printk("rfd_head = %p\n", rfd);
510 printk (" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
512 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
515 } while (rfd != lp->rfd_head);
517 printk("rbd_head = %p\n", rbd);
519 printk(" %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
520 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
522 } while (rbd != lp->rbd_head);
523 CHECK_INV(lp, sizeof(struct i596_private));
527 #if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
528 static void i596_error(int irq, void *dev_id, struct pt_regs *regs)
530 struct net_device *dev = dev_id;
531 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
535 printk("%s: Error interrupt\n", dev->name);
536 i596_display_data(dev);
540 #define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
542 static inline void init_rx_bufs(struct net_device *dev)
544 struct i596_private *lp = dev->priv;
546 struct i596_rfd *rfd;
547 struct i596_rbd *rbd;
549 /* First build the Receive Buffer Descriptor List */
551 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
553 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
556 panic("82596: alloc_skb() failed");
558 dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ,
562 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
563 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
565 rbd->v_data = skb->tail;
566 rbd->b_data = WSWAPchar(dma_addr);
567 rbd->size = PKT_BUF_SZ;
569 lp->rbd_head = lp->rbds;
570 rbd = lp->rbds + rx_ring_size - 1;
571 rbd->v_next = lp->rbds;
572 rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
574 /* Now build the Receive Frame Descriptor List */
576 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
577 rfd->rbd = I596_NULL;
580 rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
583 lp->rfd_head = lp->rfds;
584 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
586 rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
587 rfd->v_prev = lp->rfds + rx_ring_size - 1;
588 rfd = lp->rfds + rx_ring_size - 1;
589 rfd->v_next = lp->rfds;
590 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
591 rfd->cmd = CMD_EOL|CMD_FLEX;
593 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
596 static inline void remove_rx_bufs(struct net_device *dev)
598 struct i596_private *lp = dev->priv;
599 struct i596_rbd *rbd;
602 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
603 if (rbd->skb == NULL)
605 dma_unmap_single(lp->dev,
606 (dma_addr_t)WSWAPchar(rbd->b_data),
607 PKT_BUF_SZ, DMA_FROM_DEVICE);
608 dev_kfree_skb(rbd->skb);
613 static void rebuild_rx_bufs(struct net_device *dev)
615 struct i596_private *lp = dev->priv;
618 /* Ensure rx frame/buffer descriptors are tidy */
620 for (i = 0; i < rx_ring_size; i++) {
621 lp->rfds[i].rbd = I596_NULL;
622 lp->rfds[i].cmd = CMD_FLEX;
624 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
625 lp->rfd_head = lp->rfds;
626 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
627 lp->rbd_head = lp->rbds;
628 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
630 CHECK_WBACK_INV(lp, sizeof(struct i596_private));
634 static int init_i596_mem(struct net_device *dev)
636 struct i596_private *lp = dev->priv;
639 disable_irq(dev->irq); /* disable IRQs from LAN */
641 printk("RESET 82596 port: %08lX (with IRQ%d disabled)\n",
642 dev->base_addr + PA_I82596_RESET,
645 gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
646 udelay(100); /* Wait 100us - seems to help */
648 /* change the scp address */
650 lp->last_cmd = jiffies;
653 lp->scp.sysbus = 0x0000006c;
654 lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
655 lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
656 lp->iscp.stat = ISCP_BUSY;
660 lp->scb.cmd = I596_NULL;
662 DEB(DEB_INIT,printk("%s: starting i82596.\n", dev->name));
664 CHECK_WBACK(&(lp->scp), sizeof(struct i596_scp));
665 CHECK_WBACK(&(lp->iscp), sizeof(struct i596_iscp));
667 MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
671 if (wait_istat(dev,lp,1000,"initialization timed out"))
673 DEB(DEB_INIT,printk("%s: i82596 initialization successful\n", dev->name));
675 /* Ensure rx frame/buffer descriptors are tidy */
676 rebuild_rx_bufs(dev);
679 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
681 enable_irq(dev->irq); /* enable IRQs from LAN */
683 DEB(DEB_INIT,printk("%s: queuing CmdConfigure\n", dev->name));
684 memcpy(lp->cf_cmd.i596_config, init_setup, 14);
685 lp->cf_cmd.cmd.command = CmdConfigure;
686 CHECK_WBACK(&(lp->cf_cmd), sizeof(struct cf_cmd));
687 i596_add_cmd(dev, &lp->cf_cmd.cmd);
689 DEB(DEB_INIT,printk("%s: queuing CmdSASetup\n", dev->name));
690 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
691 lp->sa_cmd.cmd.command = CmdSASetup;
692 CHECK_WBACK(&(lp->sa_cmd), sizeof(struct sa_cmd));
693 i596_add_cmd(dev, &lp->sa_cmd.cmd);
695 DEB(DEB_INIT,printk("%s: queuing CmdTDR\n", dev->name));
696 lp->tdr_cmd.cmd.command = CmdTDR;
697 CHECK_WBACK(&(lp->tdr_cmd), sizeof(struct tdr_cmd));
698 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
700 spin_lock_irqsave (&lp->lock, flags);
702 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
703 spin_unlock_irqrestore (&lp->lock, flags);
706 DEB(DEB_INIT,printk("%s: Issuing RX_START\n", dev->name));
707 lp->scb.command = RX_START;
708 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
709 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
713 spin_unlock_irqrestore (&lp->lock, flags);
715 if (wait_cmd(dev,lp,1000,"RX_START not processed"))
717 DEB(DEB_INIT,printk("%s: Receive unit started OK\n", dev->name));
722 printk("%s: Failed to initialise 82596\n", dev->name);
723 MPU_PORT(dev, PORT_RESET, 0);
728 static inline int i596_rx(struct net_device *dev)
730 struct i596_private *lp = dev->priv;
731 struct i596_rfd *rfd;
732 struct i596_rbd *rbd;
735 DEB(DEB_RXFRAME,printk ("i596_rx(), rfd_head %p, rbd_head %p\n",
736 lp->rfd_head, lp->rbd_head));
739 rfd = lp->rfd_head; /* Ref next frame to check */
741 CHECK_INV(rfd, sizeof(struct i596_rfd));
742 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
743 if (rfd->rbd == I596_NULL)
745 else if (rfd->rbd == lp->rbd_head->b_addr) {
747 CHECK_INV(rbd, sizeof(struct i596_rbd));
750 printk("%s: rbd chain broken!\n", dev->name);
754 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
755 rfd, rfd->rbd, rfd->stat));
757 if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
759 int pkt_len = rbd->count & 0x3fff;
760 struct sk_buff *skb = rbd->skb;
763 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
766 /* Check if the packet is long enough to just accept
767 * without copying to a properly sized skbuff.
770 if (pkt_len > rx_copybreak) {
771 struct sk_buff *newskb;
774 dma_unmap_single(lp->dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
775 /* Get fresh skbuff to replace filled one. */
776 newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
777 if (newskb == NULL) {
778 skb = NULL; /* drop pkt */
781 skb_reserve(newskb, 2);
783 /* Pass up the skb already on the Rx ring. */
784 skb_put(skb, pkt_len);
788 dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE);
789 rbd->v_data = newskb->tail;
790 rbd->b_data = WSWAPchar(dma_addr);
791 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
794 skb = dev_alloc_skb(pkt_len + 2);
797 /* XXX tulip.c can defer packets here!! */
798 printk ("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
799 lp->stats.rx_dropped++;
804 /* 16 byte align the data fields */
805 dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
807 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
808 dma_sync_single_for_device(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
811 skb->protocol=eth_type_trans(skb,dev);
813 dev->last_rx = jiffies;
814 lp->stats.rx_packets++;
815 lp->stats.rx_bytes+=pkt_len;
819 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
820 dev->name, rfd->stat));
821 lp->stats.rx_errors++;
822 if ((rfd->stat) & 0x0001)
823 lp->stats.collisions++;
824 if ((rfd->stat) & 0x0080)
825 lp->stats.rx_length_errors++;
826 if ((rfd->stat) & 0x0100)
827 lp->stats.rx_over_errors++;
828 if ((rfd->stat) & 0x0200)
829 lp->stats.rx_fifo_errors++;
830 if ((rfd->stat) & 0x0400)
831 lp->stats.rx_frame_errors++;
832 if ((rfd->stat) & 0x0800)
833 lp->stats.rx_crc_errors++;
834 if ((rfd->stat) & 0x1000)
835 lp->stats.rx_length_errors++;
838 /* Clear the buffer descriptor count and EOF + F flags */
840 if (rbd != NULL && (rbd->count & 0x4000)) {
842 lp->rbd_head = rbd->v_next;
843 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
846 /* Tidy the frame descriptor, marking it as end of list */
848 rfd->rbd = I596_NULL;
850 rfd->cmd = CMD_EOL|CMD_FLEX;
853 /* Remove end-of-list from old end descriptor */
855 rfd->v_prev->cmd = CMD_FLEX;
857 /* Update record of next frame descriptor to process */
859 lp->scb.rfd = rfd->b_next;
860 lp->rfd_head = rfd->v_next;
861 CHECK_WBACK_INV(rfd->v_prev, sizeof(struct i596_rfd));
862 CHECK_WBACK_INV(rfd, sizeof(struct i596_rfd));
864 CHECK_INV(rfd, sizeof(struct i596_rfd));
867 DEB(DEB_RXFRAME,printk ("frames %d\n", frames));
873 static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
875 struct i596_cmd *ptr;
877 while (lp->cmd_head != NULL) {
879 lp->cmd_head = ptr->v_next;
882 switch ((ptr->command) & 0x7) {
885 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
886 struct sk_buff *skb = tx_cmd->skb;
887 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
891 lp->stats.tx_errors++;
892 lp->stats.tx_aborted_errors++;
895 ptr->b_next = I596_NULL;
896 tx_cmd->cmd.command = 0; /* Mark as free */
901 ptr->b_next = I596_NULL;
903 CHECK_WBACK_INV(ptr, sizeof(struct i596_cmd));
906 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
907 lp->scb.cmd = I596_NULL;
908 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
912 static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
916 DEB(DEB_RESET,printk("i596_reset\n"));
918 spin_lock_irqsave (&lp->lock, flags);
920 wait_cmd(dev,lp,100,"i596_reset timed out");
922 netif_stop_queue(dev);
924 /* FIXME: this command might cause an lpmc */
925 lp->scb.command = CUC_ABORT | RX_ABORT;
926 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
929 /* wait for shutdown */
930 wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
931 spin_unlock_irqrestore (&lp->lock, flags);
933 i596_cleanup_cmd(dev,lp);
936 netif_start_queue(dev);
941 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
943 struct i596_private *lp = dev->priv;
946 DEB(DEB_ADDCMD,printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
949 cmd->command |= (CMD_EOL | CMD_INTR);
951 cmd->b_next = I596_NULL;
952 CHECK_WBACK(cmd, sizeof(struct i596_cmd));
954 spin_lock_irqsave (&lp->lock, flags);
956 if (lp->cmd_head != NULL) {
957 lp->cmd_tail->v_next = cmd;
958 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
959 CHECK_WBACK(lp->cmd_tail, sizeof(struct i596_cmd));
962 wait_cmd(dev,lp,100,"i596_add_cmd timed out");
963 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
964 lp->scb.command = CUC_START;
965 CHECK_WBACK(&(lp->scb), sizeof(struct i596_scb));
971 spin_unlock_irqrestore (&lp->lock, flags);
973 if (lp->cmd_backlog > max_cmd_backlog) {
974 unsigned long tickssofar = jiffies - lp->last_cmd;
976 if (tickssofar < ticks_limit)
979 printk("%s: command unit timed out, status resetting.\n", dev->name);
987 /* this function makes a perfectly adequate probe... but we have a
989 static int i596_test(struct net_device *dev)
991 struct i596_private *lp = dev->priv;
995 tint = (volatile int *)(&(lp->scp));
996 data = virt_to_dma(lp,tint);
999 CHECK_WBACK(tint,PAGE_SIZE);
1001 MPU_PORT(dev, 1, data);
1003 for(data = 1000000; data; data--) {
1004 CHECK_INV(tint,PAGE_SIZE);
1010 printk("i596_test result %d\n", tint[1]);
1016 static int i596_open(struct net_device *dev)
1018 DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
1020 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1021 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
1027 if (init_i596_mem(dev)) {
1028 printk("%s: Failed to init memory\n", dev->name);
1029 goto out_remove_rx_bufs;
1032 netif_start_queue(dev);
1037 remove_rx_bufs(dev);
1038 free_irq(dev->irq, dev);
1043 static void i596_tx_timeout (struct net_device *dev)
1045 struct i596_private *lp = dev->priv;
1047 /* Transmitter timeout, serious problems. */
1048 DEB(DEB_ERRORS,printk("%s: transmit timed out, status resetting.\n",
1051 lp->stats.tx_errors++;
1053 /* Try to restart the adaptor */
1054 if (lp->last_restart == lp->stats.tx_packets) {
1055 DEB(DEB_ERRORS,printk ("Resetting board.\n"));
1056 /* Shutdown and restart */
1057 i596_reset (dev, lp);
1059 /* Issue a channel attention signal */
1060 DEB(DEB_ERRORS,printk ("Kicking board.\n"));
1061 lp->scb.command = CUC_START | RX_START;
1062 CHECK_WBACK_INV(&(lp->scb), sizeof(struct i596_scb));
1064 lp->last_restart = lp->stats.tx_packets;
1067 dev->trans_start = jiffies;
1068 netif_wake_queue (dev);
1072 static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1074 struct i596_private *lp = dev->priv;
1075 struct tx_cmd *tx_cmd;
1076 struct i596_tbd *tbd;
1077 short length = skb->len;
1078 dev->trans_start = jiffies;
1080 DEB(DEB_STARTTX,printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
1081 skb->len, skb->data));
1083 if (length < ETH_ZLEN) {
1084 skb = skb_padto(skb, ETH_ZLEN);
1090 netif_stop_queue(dev);
1092 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1093 tbd = lp->tbds + lp->next_tx_cmd;
1095 if (tx_cmd->cmd.command) {
1096 DEB(DEB_ERRORS,printk ("%s: xmit ring full, dropping packet.\n",
1098 lp->stats.tx_dropped++;
1102 if (++lp->next_tx_cmd == TX_RING_SIZE)
1103 lp->next_tx_cmd = 0;
1104 tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
1105 tbd->next = I596_NULL;
1107 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1113 tbd->size = EOF | length;
1115 tx_cmd->dma_addr = dma_map_single(lp->dev, skb->data, skb->len,
1117 tbd->data = WSWAPchar(tx_cmd->dma_addr);
1119 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1120 CHECK_WBACK_INV(tx_cmd, sizeof(struct tx_cmd));
1121 CHECK_WBACK_INV(tbd, sizeof(struct i596_tbd));
1122 i596_add_cmd(dev, &tx_cmd->cmd);
1124 lp->stats.tx_packets++;
1125 lp->stats.tx_bytes += length;
1128 netif_start_queue(dev);
1133 static void print_eth(unsigned char *add, char *str)
1137 printk("i596 0x%p, ", add);
1138 for (i = 0; i < 6; i++)
1139 printk(" %02X", add[i + 6]);
1141 for (i = 0; i < 6; i++)
1142 printk(" %02X", add[i]);
1143 printk(" %02X%02X, %s\n", add[12], add[13], str);
1147 #define LAN_PROM_ADDR 0xF0810000
1149 static int __devinit i82596_probe(struct net_device *dev,
1150 struct device *gen_dev)
1153 struct i596_private *lp;
1155 dma_addr_t dma_addr;
1157 /* This lot is ensure things have been cache line aligned. */
1158 if (sizeof(struct i596_rfd) != 32) {
1159 printk("82596: sizeof(struct i596_rfd) = %d\n",
1160 sizeof(struct i596_rfd));
1163 if ((sizeof(struct i596_rbd) % 32) != 0) {
1164 printk("82596: sizeof(struct i596_rbd) = %d\n",
1165 sizeof(struct i596_rbd));
1168 if ((sizeof(struct tx_cmd) % 32) != 0) {
1169 printk("82596: sizeof(struct tx_cmd) = %d\n",
1170 sizeof(struct tx_cmd));
1173 if (sizeof(struct i596_tbd) != 32) {
1174 printk("82596: sizeof(struct i596_tbd) = %d\n",
1175 sizeof(struct i596_tbd));
1179 if (sizeof(struct i596_private) > 4096) {
1180 printk("82596: sizeof(struct i596_private) = %d\n",
1181 sizeof(struct i596_private));
1186 if (!dev->base_addr || !dev->irq)
1189 if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
1190 for (i=0; i < 6; i++) {
1191 eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
1193 printk("82596.c: MAC of HP700 LAN read from EEPROM\n");
1196 dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
1197 sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
1198 if (!dev->mem_start) {
1199 printk("%s: Couldn't get shared memory\n", dev->name);
1203 DEB(DEB_PROBE,printk("%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1205 for (i = 0; i < 6; i++)
1206 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1208 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1210 DEB(DEB_PROBE,printk(version));
1212 /* The 82596-specific entries in the device structure. */
1213 dev->open = i596_open;
1214 dev->stop = i596_close;
1215 dev->hard_start_xmit = i596_start_xmit;
1216 dev->get_stats = i596_get_stats;
1217 dev->set_multicast_list = set_multicast_list;
1218 dev->tx_timeout = i596_tx_timeout;
1219 dev->watchdog_timeo = TX_TIMEOUT;
1221 dev->priv = (void *)(dev->mem_start);
1224 DEB(DEB_INIT,printk ("%s: lp at 0x%08lx (%d bytes), lp->scb at 0x%08lx\n",
1225 dev->name, (unsigned long)lp,
1226 sizeof(struct i596_private), (unsigned long)&lp->scb));
1227 memset(lp, 0, sizeof(struct i596_private));
1229 lp->scb.command = 0;
1230 lp->scb.cmd = I596_NULL;
1231 lp->scb.rfd = I596_NULL;
1232 lp->lock = SPIN_LOCK_UNLOCKED;
1233 lp->dma_addr = dma_addr;
1236 CHECK_WBACK_INV(dev->mem_start, sizeof(struct i596_private));
1242 static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1244 struct net_device *dev = dev_id;
1245 struct i596_private *lp;
1246 unsigned short status, ack_cmd = 0;
1249 printk("i596_interrupt(): irq %d for unknown device.\n", irq);
1255 spin_lock (&lp->lock);
1257 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1258 status = lp->scb.status;
1260 DEB(DEB_INTS,printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1261 dev->name, irq, status));
1263 ack_cmd = status & 0xf000;
1266 DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
1267 spin_unlock (&lp->lock);
1271 if ((status & 0x8000) || (status & 0x2000)) {
1272 struct i596_cmd *ptr;
1274 if ((status & 0x8000))
1275 DEB(DEB_INTS,printk("%s: i596 interrupt completed command.\n", dev->name));
1276 if ((status & 0x2000))
1277 DEB(DEB_INTS,printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1279 while (lp->cmd_head != NULL) {
1280 CHECK_INV(lp->cmd_head, sizeof(struct i596_cmd));
1281 if (!(lp->cmd_head->status & STAT_C))
1286 DEB(DEB_STATUS,printk("cmd_head->status = %04x, ->command = %04x\n",
1287 lp->cmd_head->status, lp->cmd_head->command));
1288 lp->cmd_head = ptr->v_next;
1291 switch ((ptr->command) & 0x7) {
1294 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1295 struct sk_buff *skb = tx_cmd->skb;
1297 if ((ptr->status) & STAT_OK) {
1298 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1300 lp->stats.tx_errors++;
1301 if ((ptr->status) & 0x0020)
1302 lp->stats.collisions++;
1303 if (!((ptr->status) & 0x0040))
1304 lp->stats.tx_heartbeat_errors++;
1305 if ((ptr->status) & 0x0400)
1306 lp->stats.tx_carrier_errors++;
1307 if ((ptr->status) & 0x0800)
1308 lp->stats.collisions++;
1309 if ((ptr->status) & 0x1000)
1310 lp->stats.tx_aborted_errors++;
1312 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
1313 dev_kfree_skb_irq(skb);
1315 tx_cmd->cmd.command = 0; /* Mark free */
1320 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1322 if (status & 0x8000) {
1323 DEB(DEB_ANY,printk("%s: link ok.\n", dev->name));
1325 if (status & 0x4000)
1326 printk("%s: Transceiver problem.\n", dev->name);
1327 if (status & 0x2000)
1328 printk("%s: Termination problem.\n", dev->name);
1329 if (status & 0x1000)
1330 printk("%s: Short circuit.\n", dev->name);
1332 DEB(DEB_TDR,printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1337 /* Zap command so set_multicast_list() knows it is free */
1342 ptr->b_next = I596_NULL;
1343 CHECK_WBACK(ptr, sizeof(struct i596_cmd));
1344 lp->last_cmd = jiffies;
1347 /* This mess is arranging that only the last of any outstanding
1348 * commands has the interrupt bit set. Should probably really
1349 * only add to the cmd queue when the CU is stopped.
1352 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1353 struct i596_cmd *prev = ptr;
1355 ptr->command &= 0x1fff;
1357 CHECK_WBACK_INV(prev, sizeof(struct i596_cmd));
1360 if ((lp->cmd_head != NULL))
1361 ack_cmd |= CUC_START;
1362 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
1363 CHECK_WBACK_INV(&lp->scb, sizeof(struct i596_scb));
1365 if ((status & 0x1000) || (status & 0x4000)) {
1366 if ((status & 0x4000))
1367 DEB(DEB_INTS,printk("%s: i596 interrupt received a frame.\n", dev->name));
1369 /* Only RX_START if stopped - RGH 07-07-96 */
1370 if (status & 0x1000) {
1371 if (netif_running(dev)) {
1372 DEB(DEB_ERRORS,printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1373 ack_cmd |= RX_START;
1374 lp->stats.rx_errors++;
1375 lp->stats.rx_fifo_errors++;
1376 rebuild_rx_bufs(dev);
1380 wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1381 lp->scb.command = ack_cmd;
1382 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1384 /* DANGER: I suspect that some kind of interrupt
1385 acknowledgement aside from acking the 82596 might be needed
1386 here... but it's running acceptably without */
1390 wait_cmd(dev,lp,100,"i596 interrupt, exit timeout");
1391 DEB(DEB_INTS,printk("%s: exiting interrupt.\n", dev->name));
1393 spin_unlock (&lp->lock);
1397 static int i596_close(struct net_device *dev)
1399 struct i596_private *lp = dev->priv;
1400 unsigned long flags;
1402 netif_stop_queue(dev);
1404 DEB(DEB_INIT,printk("%s: Shutting down ethercard, status was %4.4x.\n",
1405 dev->name, lp->scb.status));
1407 spin_lock_irqsave(&lp->lock, flags);
1409 wait_cmd(dev,lp,100,"close1 timed out");
1410 lp->scb.command = CUC_ABORT | RX_ABORT;
1411 CHECK_WBACK(&lp->scb, sizeof(struct i596_scb));
1415 wait_cmd(dev,lp,100,"close2 timed out");
1416 spin_unlock_irqrestore(&lp->lock, flags);
1417 DEB(DEB_STRUCT,i596_display_data(dev));
1418 i596_cleanup_cmd(dev,lp);
1420 disable_irq(dev->irq);
1422 free_irq(dev->irq, dev);
1423 remove_rx_bufs(dev);
1430 static struct net_device_stats *
1431 i596_get_stats(struct net_device *dev)
1433 struct i596_private *lp = dev->priv;
1439 * Set or clear the multicast filter for this adaptor.
1442 static void set_multicast_list(struct net_device *dev)
1444 struct i596_private *lp = dev->priv;
1445 int config = 0, cnt;
1447 DEB(DEB_MULTI,printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n", dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF", dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1449 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1450 lp->cf_cmd.i596_config[8] |= 0x01;
1453 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1454 lp->cf_cmd.i596_config[8] &= ~0x01;
1457 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1458 lp->cf_cmd.i596_config[11] &= ~0x20;
1461 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1462 lp->cf_cmd.i596_config[11] |= 0x20;
1466 if (lp->cf_cmd.cmd.command)
1467 printk("%s: config change request already queued\n",
1470 lp->cf_cmd.cmd.command = CmdConfigure;
1471 CHECK_WBACK_INV(&lp->cf_cmd, sizeof(struct cf_cmd));
1472 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1476 cnt = dev->mc_count;
1477 if (cnt > MAX_MC_CNT)
1480 printk("%s: Only %d multicast addresses supported",
1484 if (dev->mc_count > 0) {
1485 struct dev_mc_list *dmi;
1490 cmd->cmd.command = CmdMulticastList;
1491 cmd->mc_cnt = dev->mc_count * 6;
1493 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1494 memcpy(cp, dmi->dmi_addr, 6);
1496 DEB(DEB_MULTI,printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1497 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1499 CHECK_WBACK_INV(&lp->mc_cmd, sizeof(struct mc_cmd));
1500 i596_add_cmd(dev, &cmd->cmd);
1504 MODULE_PARM(debug, "i");
1505 MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
1506 static int debug = -1;
1508 static int num_drivers;
1509 static struct net_device *netdevs[MAX_DRIVERS];
1511 static int __devinit
1512 lan_init_chip(struct parisc_device *dev)
1514 struct net_device *netdevice;
1517 if (num_drivers >= MAX_DRIVERS) {
1518 /* max count of possible i82596 drivers reached */
1523 printk(KERN_ERR __FILE__ ": IRQ not found for i82596 at 0x%lx\n", dev->hpa);
1527 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa, dev->irq);
1529 netdevice = alloc_etherdev(0);
1533 netdevice->base_addr = dev->hpa;
1534 netdevice->irq = dev->irq;
1536 retval = i82596_probe(netdevice, &dev->dev);
1538 free_netdev(netdevice);
1542 retval = register_netdev(netdevice);
1544 struct i596_private *lp = netdevice->priv;
1545 printk(KERN_WARNING __FILE__ ": register_netdevice ret'd %d\n", retval);
1546 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
1547 (void *)netdevice->mem_start, lp->dma_addr);
1548 free_netdev(netdevice);
1551 if (dev->id.sversion == 0x72) {
1552 ((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
1555 netdevs[num_drivers++] = netdevice;
1561 static struct parisc_device_id lan_tbl[] = {
1562 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
1563 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
1567 MODULE_DEVICE_TABLE(parisc, lan_tbl);
1569 static struct parisc_driver lan_driver = {
1571 .id_table = lan_tbl,
1572 .probe = lan_init_chip,
1575 static int __devinit lasi_82596_init(void)
1579 return register_parisc_driver(&lan_driver);
1582 module_init(lasi_82596_init);
1584 static void __exit lasi_82596_exit(void)
1588 for (i=0; i<MAX_DRIVERS; i++) {
1589 struct i596_private *lp;
1590 struct net_device *netdevice;
1592 netdevice = netdevs[i];
1596 unregister_netdev(netdevice);
1598 lp = netdevice->priv;
1599 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
1600 (void *)netdevice->mem_start, lp->dma_addr);
1601 free_netdev(netdevice);
1604 unregister_parisc_driver(&lan_driver);
1607 module_exit(lasi_82596_exit);