2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
52 * 06/02/01 - Clean up, copy skb for small packets
54 * 06/22/01 - Add EISR error handling routines
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
78 /* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
80 #define OLYMPIC_DEBUG 0
83 #include <linux/config.h>
84 #include <linux/module.h>
85 #include <linux/kernel.h>
86 #include <linux/errno.h>
87 #include <linux/timer.h>
89 #include <linux/ioport.h>
90 #include <linux/string.h>
91 #include <linux/proc_fs.h>
92 #include <linux/ptrace.h>
93 #include <linux/skbuff.h>
94 #include <linux/interrupt.h>
95 #include <linux/delay.h>
96 #include <linux/netdevice.h>
97 #include <linux/trdevice.h>
98 #include <linux/stddef.h>
99 #include <linux/init.h>
100 #include <linux/pci.h>
101 #include <linux/spinlock.h>
103 #include <net/checksum.h>
106 #include <asm/system.h>
107 #include <asm/bitops.h>
111 /* I've got to put some intelligence into the version number so that Peter and I know
112 * which version of the code somebody has got.
113 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
114 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
116 * Official releases will only have an a.b.c version number format.
119 static char version[] __devinitdata =
120 "Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
122 static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
123 "Address Verification", "Neighbor Notification (Ring Poll)",
124 "Request Parameters","FDX Registration Request",
125 "FDX Duplicate Address Check", "Station registration Query Wait",
128 static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
129 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
130 "Duplicate Node Address","Request Parameters","Remove Received",
131 "Reserved", "Reserved", "No Monitor Detected for RPL",
132 "Monitor Contention failer for RPL", "FDX Protocol Error"};
134 /* Module paramters */
136 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
137 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
139 /* Ring Speed 0,4,16,100
141 * 4,16 = Selected speed only, no autosense
142 * This allows the card to be the first on the ring
143 * and become the active monitor.
144 * 100 = Nothing at present, 100mbps is autodetected
145 * if FDX is turned on. May be implemented in the future to
146 * fail if 100mpbs is not detected.
148 * WARNING: Some hubs will allow you to insert
152 static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
153 MODULE_PARM(ringspeed, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i");
155 /* Packet buffer size */
157 static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
158 MODULE_PARM(pkt_buf_sz, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i") ;
162 static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
163 MODULE_PARM(message_level, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i") ;
165 /* Change network_monitor to receive mac frames through the arb channel.
166 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
167 * device, i.e. tr0, tr1 etc.
168 * Intended to be used to create a ring-error reporting network module
169 * i.e. it will give you the source address of beaconers on the ring
171 static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
172 MODULE_PARM(network_monitor, "1-" __MODULE_STRING(OLYMPIC_MAX_ADAPTERS) "i");
174 static struct pci_device_id olympic_pci_tbl[] = {
175 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
176 { } /* Terminating Entry */
178 MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
181 static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
182 static int olympic_init(struct net_device *dev);
183 static int olympic_open(struct net_device *dev);
184 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev);
185 static int olympic_close(struct net_device *dev);
186 static void olympic_set_rx_mode(struct net_device *dev);
187 static void olympic_freemem(struct net_device *dev) ;
188 static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs);
189 static struct net_device_stats * olympic_get_stats(struct net_device *dev);
190 static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
191 static void olympic_arb_cmd(struct net_device *dev);
192 static int olympic_change_mtu(struct net_device *dev, int mtu);
193 static void olympic_srb_bh(struct net_device *dev) ;
194 static void olympic_asb_bh(struct net_device *dev) ;
195 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
197 static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
199 struct net_device *dev ;
200 struct olympic_private *olympic_priv;
201 static int card_no = -1 ;
206 if ((i = pci_enable_device(pdev))) {
210 pci_set_master(pdev);
212 if ((i = pci_request_regions(pdev,"olympic"))) {
216 dev = alloc_trdev(sizeof(struct olympic_private)) ;
222 olympic_priv = dev->priv ;
224 spin_lock_init(&olympic_priv->olympic_lock) ;
226 init_waitqueue_head(&olympic_priv->srb_wait);
227 init_waitqueue_head(&olympic_priv->trb_wait);
229 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, dev->priv);
232 dev->base_addr=pci_resource_start(pdev, 0);
233 olympic_priv->olympic_card_name = pci_name(pdev);
234 olympic_priv->pdev = pdev;
235 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
236 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
237 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
241 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
242 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
244 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
246 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
247 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
248 olympic_priv->olympic_message_level = message_level[card_no] ;
249 olympic_priv->olympic_network_monitor = network_monitor[card_no];
251 if ((i = olympic_init(dev))) {
255 dev->open=&olympic_open;
256 dev->hard_start_xmit=&olympic_xmit;
257 dev->change_mtu=&olympic_change_mtu;
258 dev->stop=&olympic_close;
260 dev->set_multicast_list=&olympic_set_rx_mode;
261 dev->get_stats=&olympic_get_stats ;
262 dev->set_mac_address=&olympic_set_mac_address ;
263 SET_MODULE_OWNER(dev) ;
264 SET_NETDEV_DEV(dev, &pdev->dev);
266 pci_set_drvdata(pdev,dev) ;
267 register_netdev(dev) ;
268 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
269 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
271 strcpy(proc_name,"net/olympic_") ;
272 strcat(proc_name,dev->name) ;
273 create_proc_read_entry(proc_name,0,NULL,olympic_proc_info,(void *)dev) ;
274 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
279 if (olympic_priv->olympic_mmio)
280 iounmap(olympic_priv->olympic_mmio);
281 if (olympic_priv->olympic_lap)
282 iounmap(olympic_priv->olympic_lap);
286 pci_release_regions(pdev);
289 pci_disable_device(pdev);
293 static int __devinit olympic_init(struct net_device *dev)
295 struct olympic_private *olympic_priv;
296 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
298 unsigned int uaa_addr;
300 olympic_priv=(struct olympic_private *)dev->priv;
301 olympic_mmio=olympic_priv->olympic_mmio;
303 printk("%s \n", version);
304 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
306 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
308 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
310 if(jiffies-t > 40*HZ) {
311 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
317 /* Needed for cardbus */
318 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
319 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
323 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
324 printk("GPR: %x\n",readw(olympic_mmio+GPR));
325 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
327 /* Aaaahhh, You have got to be real careful setting GPR, the card
328 holds the previous values from flash memory, including autosense
331 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
333 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
334 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
335 if (olympic_priv->olympic_message_level)
336 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
337 } else if (olympic_priv->olympic_ring_speed == 16) {
338 if (olympic_priv->olympic_message_level)
339 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
340 writew(GPR_16MBPS, olympic_mmio+GPR);
341 } else if (olympic_priv->olympic_ring_speed == 4) {
342 if (olympic_priv->olympic_message_level)
343 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
344 writew(0, olympic_mmio+GPR);
347 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
350 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
352 /* Solo has been paused to meet the Cardbus power
353 * specs if the adapter is cardbus. Check to
354 * see its been paused and then restart solo. The
355 * adapter should set the pause bit within 1 second.
358 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
360 while (!readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE) {
362 if(jiffies-t > 2*HZ) {
363 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
367 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
370 /* start solo init */
371 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
374 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
376 if(jiffies-t > 15*HZ) {
377 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
382 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
385 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
388 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
393 printk("init_srb(%p): ",init_srb);
395 printk("%x ",readb(init_srb+i));
399 if(readw(init_srb+6)) {
400 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
404 if (olympic_priv->olympic_message_level) {
405 if ( readb(init_srb +2) & 0x40) {
406 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
408 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
412 uaa_addr=swab16(readw(init_srb+8));
415 printk("UAA resides at %x\n",uaa_addr);
418 writel(uaa_addr,olympic_mmio+LAPA);
419 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
422 printk("adapter address: %02x:%02x:%02x:%02x:%02x:%02x\n",
423 readb(adapter_addr), readb(adapter_addr+1),readb(adapter_addr+2),
424 readb(adapter_addr+3),readb(adapter_addr+4),readb(adapter_addr+5));
427 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
429 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
430 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
436 static int olympic_open(struct net_device *dev)
438 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
439 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
440 unsigned long flags, t;
441 char open_error[255] ;
442 int i, open_finished = 1 ;
444 DECLARE_WAITQUEUE(wait,current) ;
448 if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ , "olympic", dev)) {
453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
454 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
457 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
459 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
461 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
463 /* adapter is closed, so SRB is pointed to by LAPWWO */
465 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
466 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
469 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
470 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
471 printk("Before the open command \n");
474 memset_io(init_srb,0,SRB_COMMAND_SIZE);
476 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
477 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
479 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
480 if (olympic_priv->olympic_network_monitor)
481 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
483 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
485 /* Test OR of first 3 bytes as its totally possible for
486 * someone to set the first 2 bytes to be zero, although this
487 * is an error, the first byte must have bit 6 set to 1 */
489 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
490 writeb(olympic_priv->olympic_laa[0],init_srb+12);
491 writeb(olympic_priv->olympic_laa[1],init_srb+13);
492 writeb(olympic_priv->olympic_laa[2],init_srb+14);
493 writeb(olympic_priv->olympic_laa[3],init_srb+15);
494 writeb(olympic_priv->olympic_laa[4],init_srb+16);
495 writeb(olympic_priv->olympic_laa[5],init_srb+17);
496 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
498 writeb(1,init_srb+30);
500 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
501 olympic_priv->srb_queued=1;
503 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
504 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
508 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
509 set_current_state(TASK_INTERRUPTIBLE) ;
511 while(olympic_priv->srb_queued) {
513 if(signal_pending(current)) {
514 printk(KERN_WARNING "%s: Signal received in open.\n",
516 printk(KERN_WARNING "SISR=%x LISR=%x\n",
517 readl(olympic_mmio+SISR),
518 readl(olympic_mmio+LISR));
519 olympic_priv->srb_queued=0;
522 if ((jiffies-t) > 10*HZ) {
523 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ;
524 olympic_priv->srb_queued=0;
527 set_current_state(TASK_INTERRUPTIBLE) ;
529 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
530 set_current_state(TASK_RUNNING) ;
531 olympic_priv->srb_queued = 0 ;
533 printk("init_srb(%p): ",init_srb);
535 printk("%02x ",readb(init_srb+i));
539 /* If we get the same return response as we set, the interrupt wasn't raised and the open
543 if(readb(init_srb+2)== OLYMPIC_CLEAR_RET_CODE) {
544 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
548 if(readb(init_srb+2)!=0) {
549 if (readb(init_srb+2) == 0x07) {
550 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
551 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name);
555 strcpy(open_error, open_maj_error[(readb(init_srb+7) & 0xf0) >> 4]) ;
556 strcat(open_error," - ") ;
557 strcat(open_error, open_min_error[(readb(init_srb+7) & 0x0f)]) ;
559 if (!olympic_priv->olympic_ring_speed && ((readb(init_srb+7) & 0x0f) == 0x0d)) {
560 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
561 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name);
562 free_irq(dev->irq, dev);
566 printk(KERN_WARNING "%s: %s\n",dev->name,open_error);
567 free_irq(dev->irq,dev) ;
570 } /* if autosense && open_finished */
571 } else if (init_srb[2] == 0x32) {
572 printk(KERN_WARNING "%s: Invalid LAA: %02x:%02x:%02x:%02x:%02x:%02x\n",
574 olympic_priv->olympic_laa[0],
575 olympic_priv->olympic_laa[1],
576 olympic_priv->olympic_laa[2],
577 olympic_priv->olympic_laa[3],
578 olympic_priv->olympic_laa[4],
579 olympic_priv->olympic_laa[5]) ;
580 free_irq(dev->irq,dev) ;
583 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name,init_srb[2]);
584 free_irq(dev->irq, dev);
589 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
591 if (readb(init_srb+18) & (1<<3))
592 if (olympic_priv->olympic_message_level)
593 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
595 if (readb(init_srb+18) & (1<<1))
596 olympic_priv->olympic_ring_speed = 100 ;
597 else if (readb(init_srb+18) & 1)
598 olympic_priv->olympic_ring_speed = 16 ;
600 olympic_priv->olympic_ring_speed = 4 ;
602 if (olympic_priv->olympic_message_level)
603 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
605 olympic_priv->asb = swab16(readw(init_srb+8));
606 olympic_priv->srb = swab16(readw(init_srb+10));
607 olympic_priv->arb = swab16(readw(init_srb+12));
608 olympic_priv->trb = swab16(readw(init_srb+16));
610 olympic_priv->olympic_receive_options = 0x01 ;
611 olympic_priv->olympic_copy_all_options = 0 ;
615 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
617 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
619 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
623 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
629 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
630 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
631 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
632 olympic_priv->rx_ring_skb[i]=skb;
636 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
637 free_irq(dev->irq, dev);
641 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
642 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
643 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
644 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
645 writew(i, olympic_mmio+RXDESCQCNT);
647 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
648 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
649 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
650 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
652 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
653 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
655 writew(i, olympic_mmio+RXSTATQCNT);
658 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
659 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
660 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
661 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
662 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
664 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
665 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
666 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
669 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
672 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
673 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
674 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
677 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
681 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
682 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
683 olympic_priv->olympic_tx_ring[i].buffer=0xdeadbeef;
685 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
686 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
687 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
688 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
689 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
690 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
692 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
693 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
694 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
695 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
696 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
698 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
699 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
701 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
702 writel(0,olympic_mmio+EISR) ;
703 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
704 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
707 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
708 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
711 if (olympic_priv->olympic_network_monitor) {
714 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
715 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
717 printk("%s: Node Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
718 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
719 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
720 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
721 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
722 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
723 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5));
724 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
725 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
726 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
727 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
728 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
729 printk("%s: NAUN Address: %02x:%02x:%02x:%02x:%02x:%02x\n",dev->name,
730 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
731 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
732 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
733 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
734 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
735 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5));
738 netif_start_queue(dev);
744 * When we enter the rx routine we do not know how many frames have been
745 * queued on the rx channel. Therefore we start at the next rx status
746 * position and travel around the receive ring until we have completed
749 * This means that we may process the frame before we receive the end
750 * of frame interrupt. This is why we always test the status instead
751 * of blindly processing the next frame.
753 * We also remove the last 4 bytes from the packet as well, these are
754 * just token ring trailer info and upset protocols that don't check
755 * their own length, i.e. SNA.
758 static void olympic_rx(struct net_device *dev)
760 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
761 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
762 struct olympic_rx_status *rx_status;
763 struct olympic_rx_desc *rx_desc ;
764 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
765 struct sk_buff *skb, *skb2;
768 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
770 while (rx_status->status_buffercnt) {
771 u32 l_status_buffercnt;
773 olympic_priv->rx_status_last_received++ ;
774 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
776 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
778 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
779 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
780 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
781 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
784 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
786 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
787 if(l_status_buffercnt & 0xC0000000) {
788 if (l_status_buffercnt & 0x3B000000) {
789 if (olympic_priv->olympic_message_level) {
790 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
791 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name);
792 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
793 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name);
794 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
795 printk(KERN_WARNING "%s: No receive buffers \n",dev->name);
796 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
797 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name);
798 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
799 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name);
801 olympic_priv->rx_ring_last_received += i ;
802 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
803 olympic_priv->olympic_stats.rx_errors++;
806 if (buffer_cnt == 1) {
807 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
809 skb = dev_alloc_skb(length) ;
813 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ;
814 olympic_priv->olympic_stats.rx_dropped++ ;
815 /* Update counters even though we don't transfer the frame */
816 olympic_priv->rx_ring_last_received += i ;
817 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
821 /* Optimise based upon number of buffers used.
822 If only one buffer is used we can simply swap the buffers around.
823 If more than one then we must use the new buffer and copy the information
824 first. Ideally all frames would be in a single buffer, this can be tuned by
825 altering the buffer size. If the length of the packet is less than
826 1500 bytes we're going to copy it over anyway to stop packets getting
827 dropped from sockets with buffers smaller than our pkt_buf_sz. */
830 olympic_priv->rx_ring_last_received++ ;
831 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
832 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
834 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
836 pci_unmap_single(olympic_priv->pdev,
837 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
838 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
839 skb_put(skb2,length-4);
840 skb2->protocol = tr_type_trans(skb2,dev);
841 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
842 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
843 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
844 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
845 cpu_to_le32(olympic_priv->pkt_buf_sz);
846 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
849 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
850 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
851 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
852 memcpy(skb_put(skb,length-4),olympic_priv->rx_ring_skb[rx_ring_last_received]->data,length-4) ;
853 pci_dma_sync_single_for_device(olympic_priv->pdev,
854 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
855 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
856 skb->protocol = tr_type_trans(skb,dev) ;
860 do { /* Walk the buffers */
861 olympic_priv->rx_ring_last_received++ ;
862 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
863 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
864 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
865 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
866 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
867 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
868 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
869 memcpy(skb_put(skb, cpy_length), olympic_priv->rx_ring_skb[rx_ring_last_received]->data, cpy_length) ;
870 pci_dma_sync_single_for_device(olympic_priv->pdev,
871 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
872 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
874 skb_trim(skb,skb->len-4) ;
875 skb->protocol = tr_type_trans(skb,dev);
878 dev->last_rx = jiffies ;
879 olympic_priv->olympic_stats.rx_packets++ ;
880 olympic_priv->olympic_stats.rx_bytes += length ;
881 } /* if skb == null */
882 } /* If status & 0x3b */
884 } else { /*if buffercnt & 0xC */
885 olympic_priv->rx_ring_last_received += i ;
886 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
889 rx_status->fragmentcnt_framelen = 0 ;
890 rx_status->status_buffercnt = 0 ;
891 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
893 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
898 static void olympic_freemem(struct net_device *dev)
900 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
903 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
904 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
905 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
906 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
908 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != 0xdeadbeef) {
909 pci_unmap_single(olympic_priv->pdev,
910 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
911 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
913 olympic_priv->rx_status_last_received++;
914 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
917 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
918 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
919 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
920 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
922 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
923 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
924 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
925 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
930 static irqreturn_t olympic_interrupt(int irq, void *dev_id, struct pt_regs *regs)
932 struct net_device *dev= (struct net_device *)dev_id;
933 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
934 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
936 u8 __iomem *adapter_check_area ;
939 * Read sisr but don't reset it yet.
940 * The indication bit may have been set but the interrupt latch
941 * bit may not be set, so we'd lose the interrupt later.
943 sisr=readl(olympic_mmio+SISR) ;
944 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
946 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
948 spin_lock(&olympic_priv->olympic_lock);
950 /* Hotswap gives us this on removal */
951 if (sisr == 0xffffffff) {
952 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
953 spin_unlock(&olympic_priv->olympic_lock) ;
957 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
958 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
960 /* If we ever get this the adapter is seriously dead. Only a reset is going to
961 * bring it back to life. We're talking pci bus errors and such like :( */
962 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
963 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
964 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
965 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
966 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
967 wake_up_interruptible(&olympic_priv->srb_wait);
968 spin_unlock(&olympic_priv->olympic_lock) ;
972 if(sisr & SISR_SRB_REPLY) {
973 if(olympic_priv->srb_queued==1) {
974 wake_up_interruptible(&olympic_priv->srb_wait);
975 } else if (olympic_priv->srb_queued==2) {
976 olympic_srb_bh(dev) ;
978 olympic_priv->srb_queued=0;
979 } /* SISR_SRB_REPLY */
981 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
982 we get all tx completions. */
983 if (sisr & SISR_TX1_EOF) {
984 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
985 olympic_priv->tx_ring_last_status++;
986 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
987 olympic_priv->free_tx_ring_entries++;
988 olympic_priv->olympic_stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
989 olympic_priv->olympic_stats.tx_packets++ ;
990 pci_unmap_single(olympic_priv->pdev,
991 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
992 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
993 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
994 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=0xdeadbeef;
995 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
997 netif_wake_queue(dev);
1000 if (sisr & SISR_RX_STATUS) {
1002 } /* SISR_RX_STATUS */
1004 if (sisr & SISR_ADAPTER_CHECK) {
1005 netif_stop_queue(dev);
1006 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
1007 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
1008 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
1009 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
1010 spin_unlock(&olympic_priv->olympic_lock) ;
1012 } /* SISR_ADAPTER_CHECK */
1014 if (sisr & SISR_ASB_FREE) {
1015 /* Wake up anything that is waiting for the asb response */
1016 if (olympic_priv->asb_queued) {
1017 olympic_asb_bh(dev) ;
1019 } /* SISR_ASB_FREE */
1021 if (sisr & SISR_ARB_CMD) {
1022 olympic_arb_cmd(dev) ;
1023 } /* SISR_ARB_CMD */
1025 if (sisr & SISR_TRB_REPLY) {
1026 /* Wake up anything that is waiting for the trb response */
1027 if (olympic_priv->trb_queued) {
1028 wake_up_interruptible(&olympic_priv->trb_wait);
1030 olympic_priv->trb_queued = 0 ;
1031 } /* SISR_TRB_REPLY */
1033 if (sisr & SISR_RX_NOBUF) {
1034 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1035 /var/log/messages. */
1036 } /* SISR_RX_NOBUF */
1038 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1039 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1040 } /* One if the interrupts we want */
1041 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1043 spin_unlock(&olympic_priv->olympic_lock) ;
1047 static int olympic_xmit(struct sk_buff *skb, struct net_device *dev)
1049 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1050 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1051 unsigned long flags ;
1053 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1055 netif_stop_queue(dev);
1057 if(olympic_priv->free_tx_ring_entries) {
1058 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1059 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1060 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1061 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1062 olympic_priv->free_tx_ring_entries--;
1064 olympic_priv->tx_ring_free++;
1065 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1066 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1067 netif_wake_queue(dev);
1068 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1071 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1078 static int olympic_close(struct net_device *dev)
1080 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1081 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1082 unsigned long t,flags;
1084 DECLARE_WAITQUEUE(wait,current) ;
1086 netif_stop_queue(dev);
1088 writel(olympic_priv->srb,olympic_mmio+LAPA);
1089 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1091 writeb(SRB_CLOSE_ADAPTER,srb+0);
1093 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1095 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1096 set_current_state(TASK_INTERRUPTIBLE) ;
1098 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1099 olympic_priv->srb_queued=1;
1101 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1102 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1104 while(olympic_priv->srb_queued) {
1106 t = schedule_timeout(60*HZ);
1108 if(signal_pending(current)) {
1109 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1110 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1111 olympic_priv->srb_queued=0;
1116 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ;
1118 olympic_priv->srb_queued=0;
1120 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1122 olympic_priv->rx_status_last_received++;
1123 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1125 olympic_freemem(dev) ;
1127 /* reset tx/rx fifo's and busmaster logic */
1129 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1131 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1136 printk("srb(%p): ",srb);
1138 printk("%x ",readb(srb+i));
1142 free_irq(dev->irq,dev);
1148 static void olympic_set_rx_mode(struct net_device *dev)
1150 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1151 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1154 struct dev_mc_list *dmi ;
1155 unsigned char dev_mc_address[4] ;
1158 writel(olympic_priv->srb,olympic_mmio+LAPA);
1159 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1160 options = olympic_priv->olympic_copy_all_options;
1162 if (dev->flags&IFF_PROMISC)
1167 /* Only issue the srb if there is a change in options */
1169 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1171 /* Now to issue the srb command to alter the copy.all.options */
1173 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1175 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1177 writeb(olympic_priv->olympic_receive_options,srb+4);
1178 writeb(options,srb+5);
1180 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1182 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1184 olympic_priv->olympic_copy_all_options = options ;
1189 /* Set the functional addresses we need for multicast */
1191 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1193 for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
1194 dev_mc_address[0] |= dmi->dmi_addr[2] ;
1195 dev_mc_address[1] |= dmi->dmi_addr[3] ;
1196 dev_mc_address[2] |= dmi->dmi_addr[4] ;
1197 dev_mc_address[3] |= dmi->dmi_addr[5] ;
1200 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1202 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1206 writeb(dev_mc_address[0],srb+6);
1207 writeb(dev_mc_address[1],srb+7);
1208 writeb(dev_mc_address[2],srb+8);
1209 writeb(dev_mc_address[3],srb+9);
1211 olympic_priv->srb_queued = 2 ;
1212 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1216 static void olympic_srb_bh(struct net_device *dev)
1218 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1219 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1222 writel(olympic_priv->srb,olympic_mmio+LAPA);
1223 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1225 switch (readb(srb)) {
1227 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1228 * At some point we should do something if we get an error, such as
1229 * resetting the IFF_PROMISC flag in dev
1232 case SRB_MODIFY_RECEIVE_OPTIONS:
1233 switch (readb(srb+2)) {
1235 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1238 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1241 if (olympic_priv->olympic_message_level)
1242 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1244 } /* switch srb[2] */
1247 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1250 case SRB_SET_GROUP_ADDRESS:
1251 switch (readb(srb+2)) {
1255 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1258 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1261 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1263 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1264 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1267 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1271 } /* switch srb[2] */
1274 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1277 case SRB_RESET_GROUP_ADDRESS:
1278 switch (readb(srb+2)) {
1282 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1285 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1287 case 0x39: /* Must deal with this if individual multicast addresses used */
1288 printk(KERN_INFO "%s: Group address not found \n",dev->name);
1292 } /* switch srb[2] */
1296 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1299 case SRB_SET_FUNC_ADDRESS:
1300 switch (readb(srb+2)) {
1302 if (olympic_priv->olympic_message_level)
1303 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ;
1306 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1309 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1313 } /* switch srb[2] */
1316 /* SRB_READ_LOG - Read and reset the adapter error counters
1320 switch (readb(srb+2)) {
1322 if (olympic_priv->olympic_message_level)
1323 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1326 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1329 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1332 } /* switch srb[2] */
1335 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1337 case SRB_READ_SR_COUNTERS:
1338 switch (readb(srb+2)) {
1340 if (olympic_priv->olympic_message_level)
1341 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1344 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ;
1347 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1351 } /* switch srb[2] */
1355 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1357 } /* switch srb[0] */
1361 static struct net_device_stats * olympic_get_stats(struct net_device *dev)
1363 struct olympic_private *olympic_priv ;
1364 olympic_priv=(struct olympic_private *) dev->priv;
1365 return (struct net_device_stats *) &olympic_priv->olympic_stats;
1368 static int olympic_set_mac_address (struct net_device *dev, void *addr)
1370 struct sockaddr *saddr = addr ;
1371 struct olympic_private *olympic_priv = (struct olympic_private *)dev->priv ;
1373 if (netif_running(dev)) {
1374 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1378 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1380 if (olympic_priv->olympic_message_level) {
1381 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1382 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1383 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1384 olympic_priv->olympic_laa[5]);
1390 static void olympic_arb_cmd(struct net_device *dev)
1392 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1393 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1394 u8 __iomem *arb_block, *asb_block, *srb ;
1396 u16 frame_len, buffer_len ;
1397 struct sk_buff *mac_frame ;
1398 u8 __iomem *buf_ptr ;
1399 u8 __iomem *frame_data ;
1401 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1406 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1407 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1408 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1410 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1412 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1413 frame_len = swab16(readw(arb_block + 10)) ;
1415 buff_off = swab16(readw(arb_block + 6)) ;
1417 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1422 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1424 for (i=0 ; i < 14 ; i++) {
1425 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1428 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1431 mac_frame = dev_alloc_skb(frame_len) ;
1433 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1437 /* Walk the buffer chain, creating the frame */
1440 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1441 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1442 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1443 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1444 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + ntohs(next_ptr)));
1446 if (olympic_priv->olympic_network_monitor) {
1447 struct trh_hdr *mac_hdr ;
1448 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name) ;
1449 mac_hdr = (struct trh_hdr *)mac_frame->data ;
1450 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->daddr[0], mac_hdr->daddr[1], mac_hdr->daddr[2], mac_hdr->daddr[3], mac_hdr->daddr[4], mac_hdr->daddr[5]) ;
1451 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %02x:%02x:%02x:%02x:%02x:%02x \n", dev->name , mac_hdr->saddr[0], mac_hdr->saddr[1], mac_hdr->saddr[2], mac_hdr->saddr[3], mac_hdr->saddr[4], mac_hdr->saddr[5]) ;
1453 mac_frame->dev = dev ;
1454 mac_frame->protocol = tr_type_trans(mac_frame,dev);
1455 netif_rx(mac_frame) ;
1456 dev->last_rx = jiffies;
1459 /* Now tell the card we have dealt with the received frame */
1461 /* Set LISR Bit 1 */
1462 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1464 /* Is the ASB free ? */
1466 if (readb(asb_block + 2) != 0xff) {
1467 olympic_priv->asb_queued = 1 ;
1468 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1470 /* Drop out and wait for the bottom half to be run */
1473 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1474 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1475 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1476 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1478 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1480 olympic_priv->asb_queued = 2 ;
1484 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1485 lan_status = swab16(readw(arb_block+6));
1486 fdx_prot_error = readb(arb_block+8) ;
1488 /* Issue ARB Free */
1489 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1491 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1493 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1494 if (lan_status_diff & LSC_LWF)
1495 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1496 if (lan_status_diff & LSC_ARW)
1497 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1498 if (lan_status_diff & LSC_FPE)
1499 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1500 if (lan_status_diff & LSC_RR)
1501 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1503 /* Adapter has been closed by the hardware */
1505 /* reset tx/rx fifo's and busmaster logic */
1507 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1509 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1510 netif_stop_queue(dev);
1511 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1512 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ;
1513 } /* If serious error */
1515 if (olympic_priv->olympic_message_level) {
1516 if (lan_status_diff & LSC_SIG_LOSS)
1517 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ;
1518 if (lan_status_diff & LSC_HARD_ERR)
1519 printk(KERN_INFO "%s: Beaconing \n",dev->name);
1520 if (lan_status_diff & LSC_SOFT_ERR)
1521 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name);
1522 if (lan_status_diff & LSC_TRAN_BCN)
1523 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1524 if (lan_status_diff & LSC_SS)
1525 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name);
1526 if (lan_status_diff & LSC_RING_REC)
1527 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1528 if (lan_status_diff & LSC_FDX_MODE)
1529 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1532 if (lan_status_diff & LSC_CO) {
1534 if (olympic_priv->olympic_message_level)
1535 printk(KERN_INFO "%s: Counter Overflow \n", dev->name);
1537 /* Issue READ.LOG command */
1539 writeb(SRB_READ_LOG, srb);
1541 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1546 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1548 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1552 if (lan_status_diff & LSC_SR_CO) {
1554 if (olympic_priv->olympic_message_level)
1555 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1557 /* Issue a READ.SR.COUNTERS */
1559 writeb(SRB_READ_SR_COUNTERS,srb);
1561 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1564 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1566 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1570 olympic_priv->olympic_lan_status = lan_status ;
1572 } /* Lan.change.status */
1574 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name);
1577 static void olympic_asb_bh(struct net_device *dev)
1579 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv ;
1580 u8 __iomem *arb_block, *asb_block ;
1582 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1583 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1585 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1587 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1588 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1589 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1590 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1592 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1593 olympic_priv->asb_queued = 2 ;
1598 if (olympic_priv->asb_queued == 2) {
1599 switch (readb(asb_block+2)) {
1601 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name);
1604 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name);
1607 /* Valid response, everything should be ok again */
1610 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1614 olympic_priv->asb_queued = 0 ;
1617 static int olympic_change_mtu(struct net_device *dev, int mtu)
1619 struct olympic_private *olympic_priv = (struct olympic_private *) dev->priv;
1622 if (olympic_priv->olympic_ring_speed == 4)
1633 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1638 static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
1640 struct net_device *dev = (struct net_device *)data ;
1641 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1642 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1643 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1649 size = sprintf(buffer,
1650 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1651 size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1654 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x\n",
1662 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)),
1663 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+1),
1664 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+2),
1665 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+3),
1666 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+4),
1667 readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+5),
1668 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1669 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1670 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1671 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1673 size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1675 size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1678 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x :\n",
1680 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1681 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1682 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1683 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1684 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)),
1685 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+1),
1686 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+2),
1687 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+3),
1688 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+4),
1689 readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+5),
1690 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)),
1691 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+1),
1692 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+2),
1693 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+3),
1694 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+4),
1695 readb(opt+offsetof(struct olympic_parameters_table, poll_addr)+5),
1696 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1697 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1698 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1700 size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1703 size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x:%02x:%02x : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1705 readb(opt+offsetof(struct olympic_parameters_table, source_addr)),
1706 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+1),
1707 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+2),
1708 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+3),
1709 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+4),
1710 readb(opt+offsetof(struct olympic_parameters_table, source_addr)+5),
1711 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1712 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1713 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1714 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1715 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1716 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1718 size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1721 size += sprintf(buffer+size, "%6s: : %02x : %02x : %02x:%02x:%02x:%02x:%02x:%02x : %02x:%02x:%02x:%02x : \n",
1723 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1724 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1725 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)),
1726 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+1),
1727 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+2),
1728 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+3),
1729 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+4),
1730 readb(opt+offsetof(struct olympic_parameters_table, beacon_naun)+5),
1731 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1732 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1733 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1734 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1742 *start=buffer+(offset-begin); /* Start of wanted data */
1743 len-=(offset-begin); /* Start slop */
1745 len=length; /* Ending slop */
1749 static void __devexit olympic_remove_one(struct pci_dev *pdev)
1751 struct net_device *dev = pci_get_drvdata(pdev) ;
1752 struct olympic_private *olympic_priv=(struct olympic_private *)dev->priv;
1754 if (olympic_priv->olympic_network_monitor) {
1755 char proc_name[20] ;
1756 strcpy(proc_name,"net/olympic_") ;
1757 strcat(proc_name,dev->name) ;
1758 remove_proc_entry(proc_name,NULL);
1760 unregister_netdev(dev) ;
1761 iounmap(olympic_priv->olympic_mmio) ;
1762 iounmap(olympic_priv->olympic_lap) ;
1763 pci_release_regions(pdev) ;
1764 pci_set_drvdata(pdev,NULL) ;
1768 static struct pci_driver olympic_driver = {
1770 .id_table = olympic_pci_tbl,
1771 .probe = olympic_probe,
1772 .remove = __devexit_p(olympic_remove_one),
1775 static int __init olympic_pci_init(void)
1777 return pci_module_init (&olympic_driver) ;
1780 static void __exit olympic_pci_cleanup(void)
1782 pci_unregister_driver(&olympic_driver) ;
1786 module_init(olympic_pci_init) ;
1787 module_exit(olympic_pci_cleanup) ;
1789 MODULE_LICENSE("GPL");