2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
52 * 06/02/01 - Clean up, copy skb for small packets
54 * 06/22/01 - Add EISR error handling routines
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
78 /* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
80 #define OLYMPIC_DEBUG 0
83 #include <linux/config.h>
84 #include <linux/module.h>
85 #include <linux/kernel.h>
86 #include <linux/errno.h>
87 #include <linux/timer.h>
89 #include <linux/ioport.h>
90 #include <linux/string.h>
91 #include <linux/proc_fs.h>
92 #include <linux/ptrace.h>
93 #include <linux/skbuff.h>
94 #include <linux/interrupt.h>
95 #include <linux/delay.h>
96 #include <linux/netdevice.h>
97 #include <linux/trdevice.h>
98 #include <linux/stddef.h>
99 #include <linux/init.h>
100 #include <linux/pci.h>
101 #include <linux/spinlock.h>
103 #include <net/checksum.h>
106 #include <asm/system.h>
107 #include <asm/bitops.h>
111 /* I've got to put some intelligence into the version number so that Peter and I know
112 * which version of the code somebody has got.
113 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
114 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
116 * Official releases will only have an a.b.c version number format.
119 static char version[] __devinitdata =
120 "Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
122 static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
123 "Address Verification", "Neighbor Notification (Ring Poll)",
124 "Request Parameters","FDX Registration Request",
125 "FDX Duplicate Address Check", "Station registration Query Wait",
128 static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
129 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
130 "Duplicate Node Address","Request Parameters","Remove Received",
131 "Reserved", "Reserved", "No Monitor Detected for RPL",
132 "Monitor Contention failer for RPL", "FDX Protocol Error"};
134 /* Module paramters */
136 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
137 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
139 /* Ring Speed 0,4,16,100
141 * 4,16 = Selected speed only, no autosense
142 * This allows the card to be the first on the ring
143 * and become the active monitor.
144 * 100 = Nothing at present, 100mbps is autodetected
145 * if FDX is turned on. May be implemented in the future to
146 * fail if 100mpbs is not detected.
148 * WARNING: Some hubs will allow you to insert
152 static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
153 MODULE_PARM(ringspeed, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i");
155 /* Packet buffer size */
157 static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
158 MODULE_PARM(pkt_buf_sz, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i") ;
162 static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
163 MODULE_PARM(message_level, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i") ;
165 /* Change network_monitor to receive mac frames through the arb channel.
166 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
167 * device, i.e. tr0, tr1 etc.
168 * Intended to be used to create a ring-error reporting network module
169 * i.e. it will give you the source address of beaconers on the ring
171 static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
172 MODULE_PARM(network_monitor, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i");
174 static struct pci_device_id olympic_pci_tbl[] = {
175 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
176 { } /* Terminating Entry */
178 MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
181 static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
182 static int olympic_init(struct net_device *dev);
183 static int olympic_open(struct net_device *dev);
184 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
185 static int olympic_close(struct net_device *dev);
186 static void olympic_set_rx_mode(struct net_device *dev);
187 static void olympic_freemem(struct net_device *dev) ;
188 static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs);
189 static struct net_device_stats * olympic_get_stats(struct net_device *dev);
190 static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
191 static void olympic_arb_cmd(struct net_device *dev);
192 static int olympic_change_mtu(struct net_device *dev, int mtu);
193 static void olympic_srb_bh(struct net_device *dev) ;
194 static void olympic_asb_bh(struct net_device *dev) ;
195 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197 static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
199 struct net_device *dev ;
200 struct olympic_private *olympic_priv;
201 static int card_no = -1 ;
206 if ((i = pci_enable_device(pdev))) {
210 pci_set_master(pdev);
212 if ((i = pci_request_regions(pdev,"olympic"))) {
216 dev = alloc_trdev(sizeof(struct olympic_private)) ;
222 olympic_priv = dev->priv ;
224 init_waitqueue_head(&olympic_priv->srb_wait);
225 init_waitqueue_head(&olympic_priv->trb_wait);
227 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv);
230 dev->base_addr=pci_resource_start(pdev, 0);
231 olympic_priv->olympic_card_name = pci_name(pdev);
232 olympic_priv->pdev = pdev;
233 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
234 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
235 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
239 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
240 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
242 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
244 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
245 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
246 olympic_priv->olympic_message_level = message_level[card_no] ;
247 olympic_priv->olympic_network_monitor = network_monitor[card_no];
249 if ((i = olympic_init(dev))) {
253 dev->open=&olympic_open;
254 dev->hard_start_xmit=&olympic_xmit;
255 dev->change_mtu=&olympic_change_mtu;
256 dev->stop=&olympic_close;
258 dev->set_multicast_list=&olympic_set_rx_mode;
259 dev->get_stats=&olympic_get_stats ;
260 dev->set_mac_address=&olympic_set_mac_address ;
261 SET_MODULE_OWNER(dev) ;
262 SET_NETDEV_DEV(dev, &pdev->dev);
264 pci_set_drvdata(pdev,dev) ;
265 register_netdev(dev) ;
266 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
267 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
269 strcpy(proc_name,"net/olympic_") ;
270 strcat(proc_name,dev->name) ;
271 create_proc_read_entry(proc_name,0,0,olympic_proc_info,(void *)dev) ;
272 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
277 if (olympic_priv->olympic_mmio)
278 iounmap(olympic_priv->olympic_mmio);
279 if (olympic_priv->olympic_lap)
280 iounmap(olympic_priv->olympic_lap);
284 pci_release_regions(pdev);
287 pci_disable_device(pdev);
291 static int __devinit olympic_init(struct net_device *dev)
293 struct olympic_private *olympic_priv;
294 u8 *olympic_mmio, *init_srb,*adapter_addr;
296 unsigned int uaa_addr;
298 olympic_priv=(struct olympic_private *)dev->priv;
299 olympic_mmio=olympic_priv->olympic_mmio;
301 printk("%s \n", version);
302 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
304 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
306 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
308 if(jiffies-t > 40*HZ) {
309 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
314 spin_lock_init(&olympic_priv->olympic_lock) ;
316 /* Needed for cardbus */
317 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
318 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
322 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
323 printk("GPR: %x\n",readw(olympic_mmio+GPR));
324 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
326 /* Aaaahhh, You have got to be real careful setting GPR, the card
327 holds the previous values from flash memory, including autosense
330 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
332 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
333 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
334 if (olympic_priv->olympic_message_level)
335 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
336 } else if (olympic_priv->olympic_ring_speed == 16) {
337 if (olympic_priv->olympic_message_level)
338 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
339 writew(GPR_16MBPS, olympic_mmio+GPR);
340 } else if (olympic_priv->olympic_ring_speed == 4) {
341 if (olympic_priv->olympic_message_level)
342 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
343 writew(0, olympic_mmio+GPR);
346 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
349 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
351 /* Solo has been paused to meet the Cardbus power
352 * specs if the adapter is cardbus. Check to
353 * see its been paused and then restart solo. The
354 * adapter should set the pause bit within 1 second.
357 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
359 while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
361 if(jiffies-t > 2*HZ) {
362 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
366 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
369 /* start solo init */
370 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
373 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
375 if(jiffies-t > 15*HZ) {
376 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
381 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
384 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
387 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
392 printk("init_srb(%p): ",init_srb);
394 printk("%x ",readb(init_srb+i));
398 if(readw(init_srb+6)) {
399 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
403 if (olympic_priv->olympic_message_level) {
404 if ( readb(init_srb +2) & 0x40) {
405 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
407 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
411 uaa_addr=swab16(readw(init_srb+8));
414 printk("UAA resides at %x\n",uaa_addr);
417 writel(uaa_addr,olympic_mmio+LAPA);
418 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
421 printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n",
422 readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2),
423 readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5));
426 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
428 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
429 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
435 static int olympic_open(struct net_device *dev)
437 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
438 u8 *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
439 unsigned long flags, t;
440 char open_error[255] ;
441 int i, open_finished = 1 ;
443 DECLARE_WAITQUEUE(wait,current) ;
445 if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ , "olympic", dev)) {
450 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
451 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
454 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
456 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
458 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
460 /* adapter is closed, so SRB is pointed to by LAPWWO */
462 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
463 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
466 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
467 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
468 printk("Before the open command \n");
471 memset_io(init_srb,0,SRB_COMMAND_SIZE);
473 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
474 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
476 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
477 if (olympic_priv->olympic_network_monitor)
478 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
480 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
482 /* Test OR of first 3 bytes as its totally possible for
483 * someone to set the first 2 bytes to be zero, although this
484 * is an error, the first byte must have bit 6 set to 1 */
486 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
487 writeb(olympic_priv->olympic_laa[0],init_srb+12);
488 writeb(olympic_priv->olympic_laa[1],init_srb+13);
489 writeb(olympic_priv->olympic_laa[2],init_srb+14);
490 writeb(olympic_priv->olympic_laa[3],init_srb+15);
491 writeb(olympic_priv->olympic_laa[4],init_srb+16);
492 writeb(olympic_priv->olympic_laa[5],init_srb+17);
493 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
495 writeb(1,init_srb+30);
497 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
498 olympic_priv->srb_queued=1;
500 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
501 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
505 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
506 set_current_state(TASK_INTERRUPTIBLE) ;
508 while(olympic_priv->srb_queued) {
510 if(signal_pending(current)) {
511 printk(KERN_WARNING "%s: Signal received in open.\n",
513 printk(KERN_WARNING "SISR=%x LISR=%x\n",
514 readl(olympic_mmio+SISR),
515 readl(olympic_mmio+LISR));
516 olympic_priv->srb_queued=0;
519 if ((jiffies-t) > 10*HZ) {
520 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
521 olympic_priv->srb_queued=0;
524 set_current_state(TASK_INTERRUPTIBLE) ;
526 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
527 set_current_state(TASK_RUNNING) ;
528 olympic_priv->srb_queued = 0 ;
530 printk("init_srb(%p): ",init_srb);
532 printk("%02x ",readb(init_srb+i));
536 /* If we get the same return response as we set, the interrupt wasn't raised and the open
540 if(readb(init_srb+2)== OLYMPIC_CLEAR_RET_CODE) {
541 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
545 if(readb(init_srb+2)!=0) {
546 if (readb(init_srb+2) == 0x07) {
547 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
548 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
552 strcpy(open_error, open_maj_error[(readb(init_srb+7) & 0xf0) >> 4]) ;
553 strcat(open_error," - ") ;
554 strcat(open_error, open_min_error[(readb(init_srb+7) & 0x0f)]) ;
556 if (!olympic_priv->olympic_ring_speed && ((readb(init_srb+7) & 0x0f) == 0x0d)) {
557 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
558 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
559 free_irq(dev->irq, dev);
563 printk(KERN_WARNING "%s: %s\n",dev->name,open_error);
564 free_irq(dev->irq,dev) ;
567 } /* if autosense && open_finished */
568 } else if (init_srb[2] == 0x32) {
569 printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n",
571 olympic_priv->olympic_laa[0],
572 olympic_priv->olympic_laa[1],
573 olympic_priv->olympic_laa[2],
574 olympic_priv->olympic_laa[3],
575 olympic_priv->olympic_laa[4],
576 olympic_priv->olympic_laa[5]) ;
577 free_irq(dev->irq,dev) ;
580 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name,init_srb[2]);
581 free_irq(dev->irq, dev);
586 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
588 if (readb(init_srb+18) & (1<<3))
589 if (olympic_priv->olympic_message_level)
590 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
592 if (readb(init_srb+18) & (1<<1))
593 olympic_priv->olympic_ring_speed = 100 ;
594 else if (readb(init_srb+18) & 1)
595 olympic_priv->olympic_ring_speed = 16 ;
597 olympic_priv->olympic_ring_speed = 4 ;
599 if (olympic_priv->olympic_message_level)
600 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
602 olympic_priv->asb = swab16(readw(init_srb+8));
603 olympic_priv->srb = swab16(readw(init_srb+10));
604 olympic_priv->arb = swab16(readw(init_srb+12));
605 olympic_priv->trb = swab16(readw(init_srb+16));
607 olympic_priv->olympic_receive_options = 0x01 ;
608 olympic_priv->olympic_copy_all_options = 0 ;
612 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
614 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
616 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
620 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
626 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
627 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
628 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
629 olympic_priv->rx_ring_skb[i]=skb;
633 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
634 free_irq(dev->irq, dev);
638 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
639 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
640 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
641 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
642 writew(i, olympic_mmio+RXDESCQCNT);
644 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
645 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
646 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
647 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
649 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
650 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
652 writew(i, olympic_mmio+RXSTATQCNT);
655 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
656 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
657 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
658 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
659 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
661 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
662 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
663 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
666 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
669 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
670 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
671 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
674 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
678 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
679 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
680 olympic_priv->olympic_tx_ring[i].buffer=0xdeadbeef;
682 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
683 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
684 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
685 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
686 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
687 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
689 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
690 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
691 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
692 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
693 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
695 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
696 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
698 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
699 writel(0,olympic_mmio+EISR) ;
700 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
701 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
704 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
705 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
708 if (olympic_priv->olympic_network_monitor) {
711 oat = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
712 opt = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
714 printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
715 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
716 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
717 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
718 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
719 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
720 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5));
721 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
722 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
723 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
724 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
725 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
726 printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
727 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
728 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
729 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
730 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
731 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
732 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5));
735 netif_start_queue(dev);
741 * When we enter the rx routine we do not know how many frames have been
742 * queued on the rx channel. Therefore we start at the next rx status
743 * position and travel around the receive ring until we have completed
746 * This means that we may process the frame before we receive the end
747 * of frame interrupt. This is why we always test the status instead
748 * of blindly processing the next frame.
750 * We also remove the last 4 bytes from the packet as well, these are
751 * just token ring trailer info and upset protocols that don't check
752 * their own length, i.e. SNA.
755 static void olympic_rx(struct net_device *dev)
757 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
758 u8 *olympic_mmio=olympic_priv->olympic_mmio;
759 struct olympic_rx_status *rx_status;
760 struct olympic_rx_desc *rx_desc ;
761 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
762 struct sk_buff *skb, *skb2;
765 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
767 while (rx_status->status_buffercnt) {
768 u32 l_status_buffercnt;
770 olympic_priv->rx_status_last_received++ ;
771 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
773 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
775 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
776 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
777 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
778 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
781 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
783 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
784 if(l_status_buffercnt & 0xC0000000) {
785 if (l_status_buffercnt & 0x3B000000) {
786 if (olympic_priv->olympic_message_level) {
787 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
788 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
789 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
790 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
791 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
792 printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
793 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
794 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
795 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
796 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
798 olympic_priv->rx_ring_last_received += i ;
799 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
800 olympic_priv->olympic_stats.rx_errors++;
803 if (buffer_cnt == 1) {
804 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
806 skb = dev_alloc_skb(length) ;
810 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
811 olympic_priv->olympic_stats.rx_dropped++ ;
812 /* Update counters even though we don't transfer the frame */
813 olympic_priv->rx_ring_last_received += i ;
814 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
818 /* Optimise based upon number of buffers used.
819 If only one buffer is used we can simply swap the buffers around.
820 If more than one then we must use the new buffer and copy the information
821 first. Ideally all frames would be in a single buffer, this can be tuned by
822 altering the buffer size. If the length of the packet is less than
823 1500 bytes we're going to copy it over anyway to stop packets getting
824 dropped from sockets with buffers smaller than our pkt_buf_sz. */
827 olympic_priv->rx_ring_last_received++ ;
828 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
829 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
831 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
833 pci_unmap_single(olympic_priv->pdev,
834 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
835 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
836 skb_put(skb2,length-4);
837 skb2->protocol = tr_type_trans(skb2,dev);
838 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
839 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
840 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
841 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
842 cpu_to_le32(olympic_priv->pkt_buf_sz);
843 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
846 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
847 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
848 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
849 memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ;
850 pci_dma_sync_single_for_device(olympic_priv->pdev,
851 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
852 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
853 skb->protocol = tr_type_trans(skb,dev) ;
857 do { /* Walk the buffers */
858 olympic_priv->rx_ring_last_received++ ;
859 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
860 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
861 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
862 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
863 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
864 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
865 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
866 memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
867 pci_dma_sync_single_for_device(olympic_priv->pdev,
868 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
869 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
871 skb_trim(skb,skb->len-4) ;
872 skb->protocol = tr_type_trans(skb,dev);
875 dev->last_rx = jiffies ;
876 olympic_priv->olympic_stats.rx_packets++ ;
877 olympic_priv->olympic_stats.rx_bytes += length ;
878 } /* if skb == null */
879 } /* If status & 0x3b */
881 } else { /*if buffercnt & 0xC */
882 olympic_priv->rx_ring_last_received += i ;
883 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
886 rx_status->fragmentcnt_framelen = 0 ;
887 rx_status->status_buffercnt = 0 ;
888 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
890 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
895 static void olympic_freemem(struct net_device *dev)
897 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
900 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
901 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
902 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
903 pci_unmap_single(olympic_priv->pdev,
904 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
905 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
907 olympic_priv->rx_status_last_received++;
908 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
911 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
912 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
913 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
914 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
916 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
917 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
918 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
919 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
924 static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
926 struct net_device *dev= (struct net_device *)dev_id;
927 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
928 u8 *olympic_mmio=olympic_priv->olympic_mmio;
930 u8 *adapter_check_area ;
933 * Read sisr but don't reset it yet.
934 * The indication bit may have been set but the interrupt latch
935 * bit may not be set, so we'd lose the interrupt later.
937 sisr=readl(olympic_mmio+SISR) ;
938 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
940 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
942 spin_lock(&olympic_priv->olympic_lock);
944 /* Hotswap gives us this on removal */
945 if (sisr == 0xffffffff) {
946 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
947 olympic_freemem(dev) ;
948 free_irq(dev->irq, dev) ;
950 spin_unlock(&olympic_priv->olympic_lock) ;
954 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
955 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
957 /* If we ever get this the adapter is seriously dead. Only a reset is going to
958 * bring it back to life. We're talking pci bus errors and such like :( */
959 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
960 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
961 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
962 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
963 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
964 olympic_freemem(dev) ;
965 free_irq(dev->irq, dev) ;
967 spin_unlock(&olympic_priv->olympic_lock) ;
971 if(sisr & SISR_SRB_REPLY) {
972 if(olympic_priv->srb_queued==1) {
973 wake_up_interruptible(&olympic_priv->srb_wait);
974 } else if (olympic_priv->srb_queued==2) {
975 olympic_srb_bh(dev) ;
977 olympic_priv->srb_queued=0;
978 } /* SISR_SRB_REPLY */
980 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
981 we get all tx completions. */
982 if (sisr & SISR_TX1_EOF) {
983 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
984 olympic_priv->tx_ring_last_status++;
985 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
986 olympic_priv->free_tx_ring_entries++;
987 olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
988 olympic_priv->olympic_stats.tx_packets++ ;
989 pci_unmap_single(olympic_priv->pdev,
990 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
991 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
992 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
993 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;
994 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
996 netif_wake_queue(dev);
999 if (sisr & SISR_RX_STATUS) {
1001 } /* SISR_RX_STATUS */
1003 if (sisr & SISR_ADAPTER_CHECK) {
1004 netif_stop_queue(dev);
1005 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
1006 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
1007 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
1008 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
1009 olympic_freemem(dev) ;
1010 free_irq(dev->irq, dev) ;
1012 spin_unlock(&olympic_priv->olympic_lock) ;
1014 } /* SISR_ADAPTER_CHECK */
1016 if (sisr & SISR_ASB_FREE) {
1017 /* Wake up anything that is waiting for the asb response */
1018 if (olympic_priv->asb_queued) {
1019 olympic_asb_bh(dev) ;
1021 } /* SISR_ASB_FREE */
1023 if (sisr & SISR_ARB_CMD) {
1024 olympic_arb_cmd(dev) ;
1025 } /* SISR_ARB_CMD */
1027 if (sisr & SISR_TRB_REPLY) {
1028 /* Wake up anything that is waiting for the trb response */
1029 if (olympic_priv->trb_queued) {
1030 wake_up_interruptible(&olympic_priv->trb_wait);
1032 olympic_priv->trb_queued = 0 ;
1033 } /* SISR_TRB_REPLY */
1035 if (sisr & SISR_RX_NOBUF) {
1036 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1037 /var/log/messages. */
1038 } /* SISR_RX_NOBUF */
1040 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1041 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1042 } /* One if the interrupts we want */
1043 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1045 spin_unlock(&olympic_priv->olympic_lock) ;
1049 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1051 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1052 u8 *olympic_mmio=olympic_priv->olympic_mmio;
1053 unsigned long flags ;
1055 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1057 netif_stop_queue(dev);
1059 if(olympic_priv->free_tx_ring_entries) {
1060 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1061 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1062 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1063 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1064 olympic_priv->free_tx_ring_entries--;
1066 olympic_priv->tx_ring_free++;
1067 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1068 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1069 netif_wake_queue(dev);
1070 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1073 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1080 static int olympic_close(struct net_device *dev)
1082 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1083 u8 *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1084 unsigned long t,flags;
1086 DECLARE_WAITQUEUE(wait,current) ;
1088 netif_stop_queue(dev);
1090 writel(olympic_priv->srb,olympic_mmio+LAPA);
1091 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1093 writeb(SRB_CLOSE_ADAPTER,srb+0);
1095 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1097 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1098 olympic_priv->srb_queued=1;
1100 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1101 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1105 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1106 set_current_state(TASK_INTERRUPTIBLE) ;
1108 while(olympic_priv->srb_queued) {
1110 if(signal_pending(current)) {
1111 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1112 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1113 olympic_priv->srb_queued=0;
1116 if ((jiffies-t) > 60*HZ) {
1117 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1118 olympic_priv->srb_queued=0;
1121 set_current_state(TASK_INTERRUPTIBLE) ;
1123 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1124 set_current_state(TASK_RUNNING) ;
1126 olympic_priv->rx_status_last_received++;
1127 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1129 olympic_freemem(dev) ;
1131 /* reset tx/rx fifo's and busmaster logic */
1133 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1135 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1140 printk("srb(%p): ",srb);
1142 printk("%x ",readb(srb+i));
1146 free_irq(dev->irq,dev);
1152 static void olympic_set_rx_mode(struct net_device *dev)
1154 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1155 u8 *olympic_mmio = olympic_priv->olympic_mmio ;
1158 struct dev_mc_list *dmi ;
1159 unsigned char dev_mc_address[4] ;
1162 writel(olympic_priv->srb,olympic_mmio+LAPA);
1163 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1164 options = olympic_priv->olympic_copy_all_options;
1166 if (dev->flags&IFF_PROMISC)
1171 /* Only issue the srb if there is a change in options */
1173 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1175 /* Now to issue the srb command to alter the copy.all.options */
1177 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1179 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1181 writeb(olympic_priv->olympic_receive_options,srb+4);
1182 writeb(options,srb+5);
1184 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1186 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1188 olympic_priv->olympic_copy_all_options = options ;
1193 /* Set the functional addresses we need for multicast */
1195 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1197 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1198 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1199 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1200 dev_mc_address[2] |= dmi->dmi_addr[4] ;
1201 dev_mc_address[3] |= dmi->dmi_addr[5] ;
1204 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1206 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1210 writeb(dev_mc_address[0],srb+6);
1211 writeb(dev_mc_address[1],srb+7);
1212 writeb(dev_mc_address[2],srb+8);
1213 writeb(dev_mc_address[3],srb+9);
1215 olympic_priv->srb_queued = 2 ;
1216 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1220 static void olympic_srb_bh(struct net_device *dev)
1222 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1223 u8 *olympic_mmio = olympic_priv->olympic_mmio ;
1226 writel(olympic_priv->srb,olympic_mmio+LAPA);
1227 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1229 switch (readb(srb)) {
1231 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1232 * At some point we should do something if we get an error, such as
1233 * resetting the IFF_PROMISC flag in dev
1236 case SRB_MODIFY_RECEIVE_OPTIONS:
1237 switch (readb(srb+2)) {
1239 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1242 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1245 if (olympic_priv->olympic_message_level)
1246 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1248 } /* switch srb[2] */
1251 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1254 case SRB_SET_GROUP_ADDRESS:
1255 switch (readb(srb+2)) {
1259 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1262 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1265 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1267 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1268 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1271 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1275 } /* switch srb[2] */
1278 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1281 case SRB_RESET_GROUP_ADDRESS:
1282 switch (readb(srb+2)) {
1286 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1289 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1291 case 0x39: /* Must deal with this if individual multicast addresses used */
1292 printk(KERN_INFO "%s: Group address not found \n",dev->name);
1296 } /* switch srb[2] */
1300 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1303 case SRB_SET_FUNC_ADDRESS:
1304 switch (readb(srb+2)) {
1306 if (olympic_priv->olympic_message_level)
1307 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1310 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1313 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1317 } /* switch srb[2] */
1320 /* SRB_READ_LOG - Read and reset the adapter error counters
1324 switch (readb(srb+2)) {
1326 if (olympic_priv->olympic_message_level)
1327 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1330 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1333 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1336 } /* switch srb[2] */
1339 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1341 case SRB_READ_SR_COUNTERS:
1342 switch (readb(srb+2)) {
1344 if (olympic_priv->olympic_message_level)
1345 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1348 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1351 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1355 } /* switch srb[2] */
1359 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1361 } /* switch srb[0] */
1365 static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1367 struct olympic_private *olympic_priv ;
1368 olympic_priv=(struct olympic_private *) dev->priv;
1369 return (struct net_device_stats *) &olympic_priv->olympic_stats;
1372 static int olympic_set_mac_address (struct net_device *dev, void *addr)
1374 struct sockaddr *saddr = addr ;
1375 struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ;
1377 if (netif_running(dev)) {
1378 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1382 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1384 if (olympic_priv->olympic_message_level) {
1385 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1386 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1387 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1388 olympic_priv->olympic_laa[5]);
1394 static void olympic_arb_cmd(struct net_device *dev)
1396 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1397 u8 *olympic_mmio=olympic_priv->olympic_mmio;
1398 u8 *arb_block, *asb_block, *srb ;
1400 u16 frame_len, buffer_len ;
1401 struct sk_buff *mac_frame ;
1405 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1410 arb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
1411 asb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
1412 srb = (u8 *)(olympic_priv->olympic_lap + olympic_priv->srb) ;
1414 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1416 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1417 frame_len = swab16(readw(arb_block + 10)) ;
1419 buff_off = swab16(readw(arb_block + 6)) ;
1421 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1426 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1428 for (i=0 ; i < 14 ; i++) {
1429 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1432 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1435 mac_frame = dev_alloc_skb(frame_len) ;
1437 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1441 /* Walk the buffer chain, creating the frame */
1444 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1445 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1446 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1447 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1448 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
1450 if (olympic_priv->olympic_network_monitor) {
1451 struct trh_hdr *mac_hdr ;
1452 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
1453 mac_hdr = (struct trh_hdr *)mac_frame->data ;
1454 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ;
1455 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ;
1457 mac_frame->dev = dev ;
1458 mac_frame->protocol = tr_type_trans(mac_frame,dev);
1459 netif_rx(mac_frame) ;
1460 dev->last_rx = jiffies;
1463 /* Now tell the card we have dealt with the received frame */
1465 /* Set LISR Bit 1 */
1466 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1468 /* Is the ASB free ? */
1470 if (readb(asb_block + 2) != 0xff) {
1471 olympic_priv->asb_queued = 1 ;
1472 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1474 /* Drop out and wait for the bottom half to be run */
1477 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1478 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1479 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1480 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1482 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1484 olympic_priv->asb_queued = 2 ;
1488 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1489 lan_status = swab16(readw(arb_block+6));
1490 fdx_prot_error = readb(arb_block+8) ;
1492 /* Issue ARB Free */
1493 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1495 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1497 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1498 if (lan_status_diff & LSC_LWF)
1499 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1500 if (lan_status_diff & LSC_ARW)
1501 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1502 if (lan_status_diff & LSC_FPE)
1503 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1504 if (lan_status_diff & LSC_RR)
1505 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1507 /* Adapter has been closed by the hardware */
1509 /* reset tx/rx fifo's and busmaster logic */
1511 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1513 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1514 netif_stop_queue(dev);
1515 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1516 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
1517 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
1518 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
1519 pci_unmap_single(olympic_priv->pdev,
1520 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
1521 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
1523 olympic_priv->rx_status_last_received++;
1524 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1527 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
1528 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
1529 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
1530 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
1532 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
1533 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
1534 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
1535 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
1537 free_irq(dev->irq,dev);
1539 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1540 } /* If serious error */
1542 if (olympic_priv->olympic_message_level) {
1543 if (lan_status_diff & LSC_SIG_LOSS)
1544 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1545 if (lan_status_diff & LSC_HARD_ERR)
1546 printk(KERN_INFO "%s: Beaconing \n",dev->name);
1547 if (lan_status_diff & LSC_SOFT_ERR)
1548 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1549 if (lan_status_diff & LSC_TRAN_BCN)
1550 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1551 if (lan_status_diff & LSC_SS)
1552 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1553 if (lan_status_diff & LSC_RING_REC)
1554 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1555 if (lan_status_diff & LSC_FDX_MODE)
1556 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1559 if (lan_status_diff & LSC_CO) {
1561 if (olympic_priv->olympic_message_level)
1562 printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1564 /* Issue READ.LOG command */
1566 writeb(SRB_READ_LOG, srb);
1568 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1573 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1575 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1579 if (lan_status_diff & LSC_SR_CO) {
1581 if (olympic_priv->olympic_message_level)
1582 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1584 /* Issue a READ.SR.COUNTERS */
1586 writeb(SRB_READ_SR_COUNTERS,srb);
1588 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1591 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1593 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1597 olympic_priv->olympic_lan_status = lan_status ;
1599 } /* Lan.change.status */
1601 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1604 static void olympic_asb_bh(struct net_device *dev)
1606 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1607 u8 *arb_block, *asb_block ;
1609 arb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->arb) ;
1610 asb_block = (u8 *)(olympic_priv->olympic_lap + olympic_priv->asb) ;
1612 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1614 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1615 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1616 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1617 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1619 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1620 olympic_priv->asb_queued = 2 ;
1625 if (olympic_priv->asb_queued == 2) {
1626 switch (readb(asb_block+2)) {
1628 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1631 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1634 /* Valid response, everything should be ok again */
1637 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1641 olympic_priv->asb_queued = 0 ;
1644 static int olympic_change_mtu(struct net_device *dev, int mtu)
1646 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1649 if (olympic_priv->olympic_ring_speed == 4)
1660 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1665 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1667 struct net_device *dev = (struct net_device *)data ;
1668 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1669 u8 *oat = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1670 u8 *opt = (u8 *)(olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1676 size = sprintf(buffer,
1677 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1678 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1681 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
1689 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
1690 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
1691 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
1692 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
1693 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
1694 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5),
1695 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1696 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1697 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1698 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1700 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1702 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1705 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n",
1707 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1708 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1709 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1710 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1711 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
1712 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
1713 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
1714 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
1715 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
1716 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5),
1717 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)),
1718 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1),
1719 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2),
1720 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3),
1721 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4),
1722 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5),
1723 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1724 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1725 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1727 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1730 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1732 readb(opt+offsetof(struct olympic_parameters_table, source_addr)),
1733 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1),
1734 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2),
1735 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3),
1736 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4),
1737 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5),
1738 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1739 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1740 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1741 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1742 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1743 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1745 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1748 size += sprintf(buffer+size, "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n",
1750 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1751 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1752 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)),
1753 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1),
1754 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2),
1755 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3),
1756 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4),
1757 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5),
1758 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1759 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1760 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1761 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1769 *start=buffer+(offset-begin); /* Start of wanted data */
1770 len-=(offset-begin); /* Start slop */
1772 len=length; /* Ending slop */
1776 static void __devexit olympic_remove_one(struct pci_dev *pdev)
1778 struct net_device *dev = pci_get_drvdata(pdev) ;
1779 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1781 if (olympic_priv->olympic_network_monitor) {
1782 char proc_name[20] ;
1783 strcpy(proc_name,"net/olympic_") ;
1784 strcat(proc_name,dev->name) ;
1785 remove_proc_entry(proc_name,NULL);
1787 unregister_netdev(dev) ;
1788 iounmap(olympic_priv->olympic_mmio) ;
1789 iounmap(olympic_priv->olympic_lap) ;
1790 pci_release_regions(pdev) ;
1791 pci_set_drvdata(pdev,NULL) ;
1795 static struct pci_driver olympic_driver = {
1797 .id_table = olympic_pci_tbl,
1798 .probe = olympic_probe,
1799 .remove = __devexit_p(olympic_remove_one),
1802 static int __init olympic_pci_init(void)
1804 return pci_module_init (&olympic_driver) ;
1807 static void __exit olympic_pci_cleanup(void)
1809 pci_unregister_driver(&olympic_driver) ;
1813 module_init(olympic_pci_init) ;
1814 module_exit(olympic_pci_cleanup) ;
1816 MODULE_LICENSE("GPL");