1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The information below comes from Donald Becker's original driver:
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
26 -----------------------------------------------------------
28 Linux kernel-specific changes:
31 - Use PCI driver interface
36 - Merge Becker version 0.15
38 LK1.1.3 (Andrew Morton)
42 - Merge Becker version 1.03
44 LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
45 - Support hardware Rx/Tx checksumming
46 - Use the GFP firmware taken from Adaptec's Netware driver
48 LK1.2.2 (Ion Badulescu)
51 LK1.2.3 (Ion Badulescu)
52 - Fix the flaky mdio interface
53 - More compat clean-ups
55 LK1.2.4 (Ion Badulescu)
56 - More 2.2.x initialization fixes
58 LK1.2.5 (Ion Badulescu)
59 - Several fixes from Manfred Spraul
61 LK1.2.6 (Ion Badulescu)
62 - Fixed ifup/ifdown/ifup problem in 2.4.x
64 LK1.2.7 (Ion Badulescu)
66 - Made more functions static and __init
68 LK1.2.8 (Ion Badulescu)
69 - Quell bogus error messages, inform about the Tx threshold
70 - Removed #ifdef CONFIG_PCI, this driver is PCI only
72 LK1.2.9 (Ion Badulescu)
73 - Merged Jeff Garzik's changes from 2.4.4-pre5
74 - Added 2.2.x compatibility stuff required by the above changes
76 LK1.2.9a (Ion Badulescu)
77 - More updates from Jeff Garzik
79 LK1.3.0 (Ion Badulescu)
80 - Merged zerocopy support
82 LK1.3.1 (Ion Badulescu)
83 - Added ethtool support
84 - Added GPIO (media change) interrupt support
86 LK1.3.2 (Ion Badulescu)
87 - Fixed 2.2.x compatibility issues introduced in 1.3.1
88 - Fixed ethtool ioctl returning uninitialized memory
90 LK1.3.3 (Ion Badulescu)
91 - Initialize the TxMode register properly
92 - Don't dereference dev->priv after freeing it
94 LK1.3.4 (Ion Badulescu)
95 - Fixed initialization timing problems
96 - Fixed interrupt mask definitions
99 - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
102 - Sparc64 support and fixes (Ion Badulescu)
103 - Better stats and error handling (Ion Badulescu)
104 - Use new pci_set_mwi() PCI API function (jgarzik)
106 LK1.3.7 (Ion Badulescu)
107 - minimal implementation of tx_timeout()
108 - correctly shutdown the Rx/Tx engines in netdev_close()
109 - added calls to netif_carrier_on/off
110 (patch from Stefan Rompf <srompf@isg.de>)
113 LK1.3.8 (Ion Badulescu)
114 - adjust DMA burst size on sparc64
116 - reworked zerocopy support for 64-bit buffers
117 - working and usable interrupt mitigation/latency
118 - reduced Tx interrupt frequency for lower interrupt overhead
120 LK1.3.9 (Ion Badulescu)
121 - bugfix for mcast filter
122 - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
124 LK1.4.0 (Ion Badulescu)
127 LK1.4.1 (Ion Badulescu)
128 - flush PCI posting buffers after disabling Rx interrupts
129 - put the chip to a D3 slumber on driver unload
130 - added config option to enable/disable NAPI
132 TODO: bugfixes (no bugs known as of right now)
135 #define DRV_NAME "starfire"
136 #define DRV_VERSION "1.03+LK1.4.1"
137 #define DRV_RELDATE "February 10, 2002"
139 #include <linux/config.h>
140 #include <linux/version.h>
141 #include <linux/module.h>
142 #include <linux/kernel.h>
143 #include <linux/pci.h>
144 #include <linux/netdevice.h>
145 #include <linux/etherdevice.h>
146 #include <linux/init.h>
147 #include <linux/delay.h>
148 #include <asm/processor.h> /* Processor type for cache alignment. */
149 #include <asm/uaccess.h>
153 * Adaptec's license for their drivers (which is where I got the
154 * firmware files) does not allow one to redistribute them. Thus, we can't
155 * include the firmware with this driver.
157 * However, should a legal-to-distribute firmware become available,
158 * the driver developer would need only to obtain the firmware in the
159 * form of a C header file.
160 * Once that's done, the #undef below must be changed into a #define
161 * for this driver to really use the firmware. Note that Rx/Tx
162 * hardware TCP checksumming is not possible without the firmware.
164 * WANTED: legal firmware to include with this GPL'd driver.
168 * The current frame processor firmware fails to checksum a fragment
169 * of length 1. If and when this is fixed, the #define below can be removed.
171 #define HAS_BROKEN_FIRMWARE
173 * Define this if using the driver with the zero-copy patch
175 #if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
180 #include "starfire_firmware.h"
181 #endif /* HAS_FIRMWARE */
183 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
187 #ifndef CONFIG_ADAPTEC_STARFIRE_NAPI
188 #undef HAVE_NETDEV_POLL
191 /* The user-configurable values.
192 These may be modified when a driver module is loaded.*/
194 /* Used for tuning interrupt latency vs. overhead. */
195 static int intr_latency;
196 static int small_frames;
198 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
199 static int max_interrupt_work = 20;
201 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
202 The Starfire has a 512 element hash table based on the Ethernet CRC. */
203 static int multicast_filter_limit = 512;
204 /* Whether to do TCP/UDP checksums in hardware */
206 static int enable_hw_cksum = 1;
208 static int enable_hw_cksum = 0;
211 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
213 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
214 * Setting to > 1518 effectively disables this feature.
217 * The ia64 doesn't allow for unaligned loads even of integers being
218 * misaligned on a 2 byte boundary. Thus always force copying of
219 * packets as the starfire doesn't allow for misaligned DMAs ;-(
222 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
223 * at least, having unaligned frames leads to a rather serious performance
226 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
227 static int rx_copybreak = PKT_BUF_SZ;
229 static int rx_copybreak /* = 0 */;
232 /* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
234 #define DMA_BURST_SIZE 64
236 #define DMA_BURST_SIZE 128
239 /* Used to pass the media type, etc.
240 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
241 The media type is usually passed in 'options[]'.
242 These variables are deprecated, use ethtool instead. -Ion
244 #define MAX_UNITS 8 /* More are supported, limit only on options */
245 static int options[MAX_UNITS] = {0, };
246 static int full_duplex[MAX_UNITS] = {0, };
248 /* Operational parameters that are set at compile time. */
250 /* The "native" ring sizes are either 256 or 2048.
251 However in some modes a descriptor may be marked to wrap the ring earlier.
253 #define RX_RING_SIZE 256
254 #define TX_RING_SIZE 32
255 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
256 #define DONE_Q_SIZE 1024
257 /* All queues must be aligned on a 256-byte boundary */
258 #define QUEUE_ALIGN 256
260 #if RX_RING_SIZE > 256
261 #define RX_Q_ENTRIES Rx2048QEntries
263 #define RX_Q_ENTRIES Rx256QEntries
266 /* Operational parameters that usually are not changed. */
267 /* Time in jiffies before concluding the transmitter is hung. */
268 #define TX_TIMEOUT (2 * HZ)
272 * We need a much better method to determine if dma_addr_t is 64-bit.
274 #if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
275 /* 64-bit dma_addr_t */
276 #define ADDR_64BITS /* This chip uses 64 bit addresses. */
277 #define cpu_to_dma(x) cpu_to_le64(x)
278 #define dma_to_cpu(x) le64_to_cpu(x)
279 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
280 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
281 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
282 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
283 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
284 #else /* 32-bit dma_addr_t */
285 #define cpu_to_dma(x) cpu_to_le32(x)
286 #define dma_to_cpu(x) le32_to_cpu(x)
287 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
288 #define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
289 #define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
290 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
291 #define RX_DESC_ADDR_SIZE RxDescAddr32bit
295 #define skb_first_frag_len(skb) skb_headlen(skb)
296 #define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
297 #else /* not MAX_SKB_FRAGS */
298 #define skb_first_frag_len(skb) (skb->len)
299 #define skb_num_frags(skb) 1
300 #endif /* not MAX_SKB_FRAGS */
302 /* 2.2.x compatibility code */
303 #if LINUX_VERSION_CODE < 0x20300
305 #include "starfire-kcomp22.h"
307 #else /* LINUX_VERSION_CODE > 0x20300 */
309 #include <linux/crc32.h>
310 #include <linux/ethtool.h>
311 #include <linux/mii.h>
313 #include <linux/if_vlan.h>
315 #define init_tx_timer(dev, func, timeout) \
316 dev->tx_timeout = func; \
317 dev->watchdog_timeo = timeout;
318 #define kick_tx_timer(dev, func, timeout)
320 #define netif_start_if(dev)
321 #define netif_stop_if(dev)
323 #define PCI_SLOT_NAME(pci_dev) pci_name(pci_dev)
325 #endif /* LINUX_VERSION_CODE > 0x20300 */
327 #ifdef HAVE_NETDEV_POLL
328 #define init_poll(dev) \
329 dev->poll = &netdev_poll; \
330 dev->weight = max_interrupt_work;
331 #define netdev_rx(dev, ioaddr) \
334 if (netif_rx_schedule_prep(dev)) { \
335 __netif_rx_schedule(dev); \
336 intr_enable = readl(ioaddr + IntrEnable); \
337 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
338 writel(intr_enable, ioaddr + IntrEnable); \
339 readl(ioaddr + IntrEnable); /* flush PCI posting buffers */ \
341 /* Paranoia check */ \
342 intr_enable = readl(ioaddr + IntrEnable); \
343 if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
344 printk("%s: interrupt while in polling mode!\n", dev->name); \
345 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
346 writel(intr_enable, ioaddr + IntrEnable); \
350 #define netdev_receive_skb(skb) netif_receive_skb(skb)
351 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid)
352 static int netdev_poll(struct net_device *dev, int *budget);
353 #else /* not HAVE_NETDEV_POLL */
354 #define init_poll(dev)
355 #define netdev_receive_skb(skb) netif_rx(skb)
356 #define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid)
357 #define netdev_rx(dev, ioaddr) \
359 int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \
360 __netdev_rx(dev, "a);\
362 #endif /* not HAVE_NETDEV_POLL */
363 /* end of compatibility code */
366 /* These identify the driver base version and may not be removed. */
367 static char version[] __devinitdata =
368 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
369 KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
371 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
372 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
373 MODULE_LICENSE("GPL");
375 MODULE_PARM(max_interrupt_work, "i");
376 MODULE_PARM(mtu, "i");
377 MODULE_PARM(debug, "i");
378 MODULE_PARM(rx_copybreak, "i");
379 MODULE_PARM(intr_latency, "i");
380 MODULE_PARM(small_frames, "i");
381 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
382 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
383 MODULE_PARM(enable_hw_cksum, "i");
384 MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
385 MODULE_PARM_DESC(mtu, "MTU (all boards)");
386 MODULE_PARM_DESC(debug, "Debug level (0-6)");
387 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
388 MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
389 MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
390 MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
391 MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
392 MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
397 I. Board Compatibility
399 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
401 II. Board-specific settings
403 III. Driver operation
407 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
408 ring sizes are set fixed by the hardware, but may optionally be wrapped
409 earlier by the END bit in the descriptor.
410 This driver uses that hardware queue size for the Rx ring, where a large
411 number of entries has no ill effect beyond increases the potential backlog.
412 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
413 disables the queue layer priority ordering and we have no mechanism to
414 utilize the hardware two-level priority queue. When modifying the
415 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
418 IIIb/c. Transmit/Receive Structure
420 See the Adaptec manual for the many possible structures, and options for
421 each structure. There are far too many to document all of them here.
423 For transmit this driver uses type 0/1 transmit descriptors (depending
424 on the 32/64 bitness of the architecture), and relies on automatic
425 minimum-length padding. It does not use the completion queue
426 consumer index, but instead checks for non-zero status entries.
428 For receive this driver uses type 0/1/2/3 receive descriptors. The driver
429 allocates full frame size skbuffs for the Rx ring buffers, so all frames
430 should fit in a single descriptor. The driver does not use the completion
431 queue consumer index, but instead checks for non-zero status entries.
433 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
434 is allocated and the frame is copied to the new skbuff. When the incoming
435 frame is larger, the skbuff is passed directly up the protocol stack.
436 Buffers consumed this way are replaced by newly allocated skbuffs in a later
439 A notable aspect of operation is that unaligned buffers are not permitted by
440 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
441 isn't longword aligned, which may cause problems on some machine
442 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
443 the frame into a new skbuff unconditionally. Copied frames are put into the
444 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
446 IIId. Synchronization
448 The driver runs as two independent, single-threaded flows of control. One
449 is the send-packet routine, which enforces single-threaded use by the
450 dev->tbusy flag. The other thread is the interrupt handler, which is single
451 threaded by the hardware and interrupt handling software.
453 The send packet thread has partial control over the Tx ring and the netif_queue
454 status. If the number of free Tx slots in the ring falls below a certain number
455 (currently hardcoded to 4), it signals the upper layer to stop the queue.
457 The interrupt handler has exclusive control over the Rx ring and records stats
458 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
459 empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
460 number of free Tx slow is above the threshold, it signals the upper layer to
467 The Adaptec Starfire manuals, available only from Adaptec.
468 http://www.scyld.com/expert/100mbps.html
469 http://www.scyld.com/expert/NWay.html
473 - StopOnPerr is broken, don't enable
474 - Hardware ethernet padding exposes random data, perform software padding
475 instead (unverified -- works correctly for all the hardware I have)
481 enum chip_capability_flags {CanHaveMII=1, };
487 static struct pci_device_id starfire_pci_tbl[] = {
488 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
491 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
493 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
494 static struct chip_info {
497 } netdrv_tbl[] __devinitdata = {
498 { "Adaptec Starfire 6915", CanHaveMII },
502 /* Offsets to the device registers.
503 Unlike software-only systems, device drivers interact with complex hardware.
504 It's not useful to define symbolic names for every register bit in the
505 device. The name can only partially document the semantics and make
506 the driver longer and more difficult to read.
507 In general, only the important configuration values or bits changed
508 multiple times should be defined symbolically.
510 enum register_offsets {
511 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
512 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
513 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
514 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
515 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
516 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
517 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
519 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
520 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
521 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
522 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
523 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
524 TxMode=0x55000, VlanType=0x55064,
525 PerfFilterTable=0x56000, HashTable=0x56100,
526 TxGfpMem=0x58000, RxGfpMem=0x5a000,
530 * Bits in the interrupt status/mask registers.
531 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
532 * enables all the interrupt sources that are or'ed into those status bits.
534 enum intr_status_bits {
535 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
536 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
537 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
538 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
539 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
540 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
541 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
542 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
543 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
544 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
545 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
546 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
547 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
548 IntrTxGfp=0x02, IntrPCIPad=0x01,
550 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
551 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
552 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
555 /* Bits in the RxFilterMode register. */
557 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
558 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
559 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
563 /* Bits in the TxMode register */
565 MiiSoftReset=0x8000, MIILoopback=0x4000,
566 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
567 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
570 /* Bits in the TxDescCtrl register. */
572 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
573 TxDescSpace128=0x30, TxDescSpace256=0x40,
574 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
575 TxDescType3=0x03, TxDescType4=0x04,
576 TxNoDMACompletion=0x08,
577 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
578 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
579 TxDMABurstSizeShift=8,
582 /* Bits in the RxDescQCtrl register. */
584 RxBufferLenShift=16, RxMinDescrThreshShift=0,
585 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
586 Rx2048QEntries=0x4000, Rx256QEntries=0,
587 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
588 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
589 RxDescSpace4=0x000, RxDescSpace8=0x100,
590 RxDescSpace16=0x200, RxDescSpace32=0x300,
591 RxDescSpace64=0x400, RxDescSpace128=0x500,
595 /* Bits in the RxDMACtrl register. */
596 enum rx_dmactrl_bits {
597 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
598 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
599 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
600 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
601 RxChecksumRejectTCPOnly=0x01000000,
602 RxCompletionQ2Enable=0x800000,
603 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
604 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
605 RxDMAQ2NonIP=0x400000,
606 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
607 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
611 /* Bits in the RxCompletionAddr register */
613 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
614 RxComplProducerWrEn=0x40,
615 RxComplType0=0x00, RxComplType1=0x10,
616 RxComplType2=0x20, RxComplType3=0x30,
617 RxComplThreshShift=0,
620 /* Bits in the TxCompletionAddr register */
622 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
623 TxComplProducerWrEn=0x40,
624 TxComplIntrStatus=0x20,
625 CommonQueueMode=0x10,
626 TxComplThreshShift=0,
629 /* Bits in the GenCtrl register */
631 RxEnable=0x05, TxEnable=0x0a,
632 RxGFPEnable=0x10, TxGFPEnable=0x20,
635 /* Bits in the IntrTimerCtrl register */
636 enum intr_ctrl_bits {
637 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
638 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
639 IntrLatencyMask=0x1f,
642 /* The Rx and Tx buffer descriptors. */
643 struct starfire_rx_desc {
647 RxDescValid=1, RxDescEndRing=2,
650 /* Completion queue entry. */
651 struct short_rx_done_desc {
652 u32 status; /* Low 16 bits is length. */
654 struct basic_rx_done_desc {
655 u32 status; /* Low 16 bits is length. */
659 struct csum_rx_done_desc {
660 u32 status; /* Low 16 bits is length. */
661 u16 csum; /* Partial checksum */
664 struct full_rx_done_desc {
665 u32 status; /* Low 16 bits is length. */
669 u16 csum; /* partial checksum */
672 /* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
675 typedef struct full_rx_done_desc rx_done_desc;
676 #define RxComplType RxComplType3
677 #else /* not VLAN_SUPPORT */
678 typedef struct csum_rx_done_desc rx_done_desc;
679 #define RxComplType RxComplType2
680 #endif /* not VLAN_SUPPORT */
681 #else /* not HAS_FIRMWARE */
683 typedef struct basic_rx_done_desc rx_done_desc;
684 #define RxComplType RxComplType1
685 #else /* not VLAN_SUPPORT */
686 typedef struct short_rx_done_desc rx_done_desc;
687 #define RxComplType RxComplType0
688 #endif /* not VLAN_SUPPORT */
689 #endif /* not HAS_FIRMWARE */
692 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
695 /* Type 1 Tx descriptor. */
696 struct starfire_tx_desc_1 {
697 u32 status; /* Upper bits are status, lower 16 length. */
701 /* Type 2 Tx descriptor. */
702 struct starfire_tx_desc_2 {
703 u32 status; /* Upper bits are status, lower 16 length. */
709 typedef struct starfire_tx_desc_2 starfire_tx_desc;
710 #define TX_DESC_TYPE TxDescType2
711 #else /* not ADDR_64BITS */
712 typedef struct starfire_tx_desc_1 starfire_tx_desc;
713 #define TX_DESC_TYPE TxDescType1
714 #endif /* not ADDR_64BITS */
715 #define TX_DESC_SPACING TxDescSpaceUnlim
719 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
720 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
722 struct tx_done_desc {
723 u32 status; /* timestamp, index. */
725 u32 intrstatus; /* interrupt status */
729 struct rx_ring_info {
733 struct tx_ring_info {
736 unsigned int used_slots;
740 struct netdev_private {
741 /* Descriptor rings first for alignment. */
742 struct starfire_rx_desc *rx_ring;
743 starfire_tx_desc *tx_ring;
744 dma_addr_t rx_ring_dma;
745 dma_addr_t tx_ring_dma;
746 /* The addresses of rx/tx-in-place skbuffs. */
747 struct rx_ring_info rx_info[RX_RING_SIZE];
748 struct tx_ring_info tx_info[TX_RING_SIZE];
749 /* Pointers to completion queues (full pages). */
750 rx_done_desc *rx_done_q;
751 dma_addr_t rx_done_q_dma;
752 unsigned int rx_done;
753 struct tx_done_desc *tx_done_q;
754 dma_addr_t tx_done_q_dma;
755 unsigned int tx_done;
756 struct net_device_stats stats;
757 struct pci_dev *pci_dev;
759 struct vlan_group *vlgrp;
762 dma_addr_t queue_mem_dma;
763 size_t queue_mem_size;
765 /* Frequently used values: keep some adjacent for cache effect. */
767 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
768 unsigned int cur_tx, dirty_tx, reap_tx;
769 unsigned int rx_buf_sz; /* Based on MTU+slack. */
770 /* These values keep track of the transceiver/media in use. */
771 int speed100; /* Set if speed == 100MBit. */
775 /* MII transceiver section. */
776 struct mii_if_info mii_if; /* MII lib hooks/info */
777 int phy_cnt; /* MII device addresses. */
778 unsigned char phys[PHY_CNT]; /* MII device addresses. */
782 static int mdio_read(struct net_device *dev, int phy_id, int location);
783 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
784 static int netdev_open(struct net_device *dev);
785 static void check_duplex(struct net_device *dev);
786 static void tx_timeout(struct net_device *dev);
787 static void init_ring(struct net_device *dev);
788 static int start_tx(struct sk_buff *skb, struct net_device *dev);
789 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
790 static void netdev_error(struct net_device *dev, int intr_status);
791 static int __netdev_rx(struct net_device *dev, int *quota);
792 static void refill_rx_ring(struct net_device *dev);
793 static void netdev_error(struct net_device *dev, int intr_status);
794 static void set_rx_mode(struct net_device *dev);
795 static struct net_device_stats *get_stats(struct net_device *dev);
796 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
797 static int netdev_close(struct net_device *dev);
798 static void netdev_media_change(struct net_device *dev);
799 static struct ethtool_ops ethtool_ops;
803 static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
805 struct netdev_private *np = netdev_priv(dev);
807 spin_lock(&np->lock);
809 printk("%s: Setting vlgrp to %p\n", dev->name, grp);
812 spin_unlock(&np->lock);
815 static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
817 struct netdev_private *np = netdev_priv(dev);
819 spin_lock(&np->lock);
821 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
823 spin_unlock(&np->lock);
826 static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
828 struct netdev_private *np = netdev_priv(dev);
830 spin_lock(&np->lock);
832 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
834 np->vlgrp->vlan_devices[vid] = NULL;
836 spin_unlock(&np->lock);
838 #endif /* VLAN_SUPPORT */
841 static int __devinit starfire_init_one(struct pci_dev *pdev,
842 const struct pci_device_id *ent)
844 struct netdev_private *np;
845 int i, irq, option, chip_idx = ent->driver_data;
846 struct net_device *dev;
847 static int card_idx = -1;
849 int drv_flags, io_size;
852 /* when built into the kernel, we only print version if device is found */
854 static int printed_version;
855 if (!printed_version++)
861 if (pci_enable_device (pdev))
864 ioaddr = pci_resource_start(pdev, 0);
865 io_size = pci_resource_len(pdev, 0);
866 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
867 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
871 dev = alloc_etherdev(sizeof(*np));
873 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
876 SET_MODULE_OWNER(dev);
877 SET_NETDEV_DEV(dev, &pdev->dev);
881 if (pci_request_regions (pdev, DRV_NAME)) {
882 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
883 goto err_out_free_netdev;
886 /* ioremap is borken in Linux-2.2.x/sparc64 */
887 #if !defined(CONFIG_SPARC64) || LINUX_VERSION_CODE > 0x20300
888 ioaddr = (long) ioremap(ioaddr, io_size);
890 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
891 card_idx, io_size, ioaddr);
892 goto err_out_free_res;
894 #endif /* !CONFIG_SPARC64 || Linux 2.3.0+ */
896 pci_set_master(pdev);
898 /* enable MWI -- it vastly improves Rx performance on sparc64 */
902 dev->features |= NETIF_F_SG;
903 #endif /* MAX_SKB_FRAGS */
905 /* Starfire can do TCP/UDP checksumming */
907 dev->features |= NETIF_F_IP_CSUM;
908 #endif /* ZEROCOPY */
910 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
911 dev->vlan_rx_register = netdev_vlan_rx_register;
912 dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
913 dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
914 #endif /* VLAN_RX_KILL_VID */
916 dev->features |= NETIF_F_HIGHDMA;
917 #endif /* ADDR_64BITS */
919 /* Serial EEPROM reads are hidden by the hardware. */
920 for (i = 0; i < 6; i++)
921 dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20 - i);
923 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
925 for (i = 0; i < 0x20; i++)
927 (unsigned int)readb(ioaddr + EEPROMCtrl + i),
928 i % 16 != 15 ? " " : "\n");
931 /* Issue soft reset */
932 writel(MiiSoftReset, ioaddr + TxMode);
934 writel(0, ioaddr + TxMode);
936 /* Reset the chip to erase previous misconfiguration. */
937 writel(1, ioaddr + PCIDeviceConfig);
939 while (--boguscnt > 0) {
941 if ((readl(ioaddr + PCIDeviceConfig) & 1) == 0)
945 printk("%s: chipset reset never completed!\n", dev->name);
946 /* wait a little longer */
949 dev->base_addr = ioaddr;
952 np = netdev_priv(dev);
953 spin_lock_init(&np->lock);
954 pci_set_drvdata(pdev, dev);
958 np->mii_if.dev = dev;
959 np->mii_if.mdio_read = mdio_read;
960 np->mii_if.mdio_write = mdio_write;
961 np->mii_if.phy_id_mask = 0x1f;
962 np->mii_if.reg_num_mask = 0x1f;
964 drv_flags = netdrv_tbl[chip_idx].drv_flags;
966 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
968 option = dev->mem_start;
970 /* The lower four bits are the media type. */
972 np->mii_if.full_duplex = 1;
974 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
975 np->mii_if.full_duplex = 1;
977 if (np->mii_if.full_duplex)
978 np->mii_if.force_media = 1;
980 np->mii_if.force_media = 0;
983 /* timer resolution is 128 * 0.8us */
984 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
985 Timer10X | EnableIntrMasking;
987 if (small_frames > 0) {
988 np->intr_timer_ctrl |= SmallFrameBypass;
989 switch (small_frames) {
991 np->intr_timer_ctrl |= SmallFrame64;
994 np->intr_timer_ctrl |= SmallFrame128;
997 np->intr_timer_ctrl |= SmallFrame256;
1000 np->intr_timer_ctrl |= SmallFrame512;
1001 if (small_frames > 512)
1002 printk("Adjusting small_frames down to 512\n");
1007 /* The chip-specific entries in the device structure. */
1008 dev->open = &netdev_open;
1009 dev->hard_start_xmit = &start_tx;
1010 init_tx_timer(dev, tx_timeout, TX_TIMEOUT);
1012 dev->stop = &netdev_close;
1013 dev->get_stats = &get_stats;
1014 dev->set_multicast_list = &set_rx_mode;
1015 dev->do_ioctl = &netdev_ioctl;
1016 SET_ETHTOOL_OPS(dev, ðtool_ops);
1021 if (register_netdev(dev))
1022 goto err_out_cleardev;
1024 printk(KERN_INFO "%s: %s at %#lx, ",
1025 dev->name, netdrv_tbl[chip_idx].name, ioaddr);
1026 for (i = 0; i < 5; i++)
1027 printk("%2.2x:", dev->dev_addr[i]);
1028 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
1030 if (drv_flags & CanHaveMII) {
1031 int phy, phy_idx = 0;
1033 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
1034 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
1037 while (--boguscnt > 0)
1038 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
1040 if (boguscnt == 0) {
1041 printk("%s: PHY reset never completed!\n", dev->name);
1044 mii_status = mdio_read(dev, phy, MII_BMSR);
1045 if (mii_status != 0) {
1046 np->phys[phy_idx++] = phy;
1047 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
1048 printk(KERN_INFO "%s: MII PHY found at address %d, status "
1049 "%#4.4x advertising %#4.4x.\n",
1050 dev->name, phy, mii_status, np->mii_if.advertising);
1051 /* there can be only one PHY on-board */
1055 np->phy_cnt = phy_idx;
1056 if (np->phy_cnt > 0)
1057 np->mii_if.phy_id = np->phys[0];
1059 memset(&np->mii_if, 0, sizeof(np->mii_if));
1062 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
1063 dev->name, enable_hw_cksum ? "enabled" : "disabled");
1067 pci_set_drvdata(pdev, NULL);
1068 iounmap((void *)ioaddr);
1070 pci_release_regions (pdev);
1071 err_out_free_netdev:
1077 /* Read the MII Management Data I/O (MDIO) interfaces. */
1078 static int mdio_read(struct net_device *dev, int phy_id, int location)
1080 long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
1081 int result, boguscnt=1000;
1082 /* ??? Should we add a busy-wait here? */
1084 result = readl(mdio_addr);
1085 while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
1088 if ((result & 0xffff) == 0xffff)
1090 return result & 0xffff;
1094 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
1096 long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
1097 writel(value, mdio_addr);
1098 /* The busy-wait will occur before a read. */
1102 static int netdev_open(struct net_device *dev)
1104 struct netdev_private *np = netdev_priv(dev);
1105 long ioaddr = dev->base_addr;
1107 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
1109 /* Do we ever need to reset the chip??? */
1110 retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
1114 /* Disable the Rx and Tx, and reset the chip. */
1115 writel(0, ioaddr + GenCtrl);
1116 writel(1, ioaddr + PCIDeviceConfig);
1118 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1119 dev->name, dev->irq);
1121 /* Allocate the various queues. */
1122 if (np->queue_mem == 0) {
1123 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1124 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1125 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1126 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1127 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1128 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1129 if (np->queue_mem == 0)
1132 np->tx_done_q = np->queue_mem;
1133 np->tx_done_q_dma = np->queue_mem_dma;
1134 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
1135 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
1136 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
1137 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
1138 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
1139 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
1142 /* Start with no carrier, it gets adjusted later */
1143 netif_carrier_off(dev);
1145 /* Set the size of the Rx buffers. */
1146 writel((np->rx_buf_sz << RxBufferLenShift) |
1147 (0 << RxMinDescrThreshShift) |
1148 RxPrefetchMode | RxVariableQ |
1150 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
1152 ioaddr + RxDescQCtrl);
1154 /* Set up the Rx DMA controller. */
1155 writel(RxChecksumIgnore |
1156 (0 << RxEarlyIntThreshShift) |
1157 (6 << RxHighPrioThreshShift) |
1158 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
1159 ioaddr + RxDMACtrl);
1161 /* Set Tx descriptor */
1162 writel((2 << TxHiPriFIFOThreshShift) |
1163 (0 << TxPadLenShift) |
1164 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
1165 TX_DESC_Q_ADDR_SIZE |
1166 TX_DESC_SPACING | TX_DESC_TYPE,
1167 ioaddr + TxDescCtrl);
1169 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
1170 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
1171 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
1172 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
1173 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1175 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
1176 writel(np->rx_done_q_dma |
1178 (0 << RxComplThreshShift),
1179 ioaddr + RxCompletionAddr);
1182 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1184 /* Fill both the Tx SA register and the Rx perfect filter. */
1185 for (i = 0; i < 6; i++)
1186 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1187 /* The first entry is special because it bypasses the VLAN filter.
1189 writew(0, ioaddr + PerfFilterTable);
1190 writew(0, ioaddr + PerfFilterTable + 4);
1191 writew(0, ioaddr + PerfFilterTable + 8);
1192 for (i = 1; i < 16; i++) {
1193 u16 *eaddrs = (u16 *)dev->dev_addr;
1194 long setup_frm = ioaddr + PerfFilterTable + i * 16;
1195 writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
1196 writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
1197 writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
1200 /* Initialize other registers. */
1201 /* Configure the PCI bus bursts and FIFO thresholds. */
1202 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
1203 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1205 writel(np->tx_mode, ioaddr + TxMode);
1206 np->tx_threshold = 4;
1207 writel(np->tx_threshold, ioaddr + TxThreshold);
1209 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1211 netif_start_if(dev);
1212 netif_start_queue(dev);
1215 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1218 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1221 /* Enable GPIO interrupts on link change */
1222 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1224 /* Set the interrupt mask */
1225 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1226 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1227 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1228 ioaddr + IntrEnable);
1229 /* Enable PCI interrupts. */
1230 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1231 ioaddr + PCIDeviceConfig);
1234 /* Set VLAN type to 802.1q */
1235 writel(ETH_P_8021Q, ioaddr + VlanType);
1236 #endif /* VLAN_SUPPORT */
1239 /* Load Rx/Tx firmware into the frame processors */
1240 for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1241 writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1242 for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1243 writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1244 #endif /* HAS_FIRMWARE */
1245 if (enable_hw_cksum)
1246 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1247 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1249 /* Enable the Rx and Tx units only. */
1250 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1253 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1260 static void check_duplex(struct net_device *dev)
1262 struct netdev_private *np = netdev_priv(dev);
1264 int silly_count = 1000;
1266 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1267 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1269 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1272 printk("%s: MII reset failed!\n", dev->name);
1276 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1278 if (!np->mii_if.force_media) {
1279 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1281 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1283 reg0 |= BMCR_SPEED100;
1284 if (np->mii_if.full_duplex)
1285 reg0 |= BMCR_FULLDPLX;
1286 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1288 np->speed100 ? "100" : "10",
1289 np->mii_if.full_duplex ? "full" : "half");
1291 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1295 static void tx_timeout(struct net_device *dev)
1297 struct netdev_private *np = netdev_priv(dev);
1298 long ioaddr = dev->base_addr;
1301 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1302 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1304 /* Perhaps we should reinitialize the hardware here. */
1307 * Stop and restart the interface.
1308 * Cheat and increase the debug level temporarily.
1316 /* Trigger an immediate transmit demand. */
1318 dev->trans_start = jiffies;
1319 np->stats.tx_errors++;
1320 netif_wake_queue(dev);
1324 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1325 static void init_ring(struct net_device *dev)
1327 struct netdev_private *np = netdev_priv(dev);
1330 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1331 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1333 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1335 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1336 for (i = 0; i < RX_RING_SIZE; i++) {
1337 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1338 np->rx_info[i].skb = skb;
1341 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1342 skb->dev = dev; /* Mark as being used by this device. */
1343 /* Grrr, we cannot offset to correctly align the IP header. */
1344 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1346 writew(i - 1, dev->base_addr + RxDescQIdx);
1347 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1349 /* Clear the remainder of the Rx buffer ring. */
1350 for ( ; i < RX_RING_SIZE; i++) {
1351 np->rx_ring[i].rxaddr = 0;
1352 np->rx_info[i].skb = NULL;
1353 np->rx_info[i].mapping = 0;
1355 /* Mark the last entry as wrapping the ring. */
1356 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1358 /* Clear the completion rings. */
1359 for (i = 0; i < DONE_Q_SIZE; i++) {
1360 np->rx_done_q[i].status = 0;
1361 np->tx_done_q[i].status = 0;
1364 for (i = 0; i < TX_RING_SIZE; i++)
1365 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1371 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1373 struct netdev_private *np = netdev_priv(dev);
1378 kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
1381 * be cautious here, wrapping the queue has weird semantics
1382 * and we may not have enough slots even when it seems we do.
1384 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1385 netif_stop_queue(dev);
1389 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1391 int has_bad_length = 0;
1393 if (skb_first_frag_len(skb) == 1)
1396 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1397 if (skb_shinfo(skb)->frags[i].size == 1) {
1404 skb_checksum_help(skb);
1406 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1408 entry = np->cur_tx % TX_RING_SIZE;
1409 for (i = 0; i < skb_num_frags(skb); i++) {
1414 np->tx_info[entry].skb = skb;
1416 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1417 status |= TxRingWrap;
1421 status |= TxDescIntr;
1424 if (skb->ip_summed == CHECKSUM_HW) {
1426 np->stats.tx_compressed++;
1428 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1430 np->tx_info[entry].mapping =
1431 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1433 #ifdef MAX_SKB_FRAGS
1434 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1435 status |= this_frag->size;
1436 np->tx_info[entry].mapping =
1437 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1438 #endif /* MAX_SKB_FRAGS */
1441 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1442 np->tx_ring[entry].status = cpu_to_le32(status);
1444 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1445 dev->name, np->cur_tx, np->dirty_tx,
1448 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1449 np->cur_tx += np->tx_info[entry].used_slots;
1452 np->tx_info[entry].used_slots = 1;
1453 np->cur_tx += np->tx_info[entry].used_slots;
1456 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1457 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1461 /* Non-x86: explicitly flush descriptor cache lines here. */
1462 /* Ensure all descriptors are written back before the transmit is
1466 /* Update the producer index. */
1467 writel(entry * (sizeof(starfire_tx_desc) / 8), dev->base_addr + TxProducerIdx);
1469 /* 4 is arbitrary, but should be ok */
1470 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1471 netif_stop_queue(dev);
1473 dev->trans_start = jiffies;
1479 /* The interrupt handler does all of the Rx thread work and cleans up
1480 after the Tx thread. */
1481 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1483 struct net_device *dev = dev_instance;
1484 struct netdev_private *np;
1486 int boguscnt = max_interrupt_work;
1491 ioaddr = dev->base_addr;
1492 np = netdev_priv(dev);
1495 u32 intr_status = readl(ioaddr + IntrClear);
1498 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1499 dev->name, intr_status);
1501 if (intr_status == 0 || intr_status == (u32) -1)
1506 if (intr_status & (IntrRxDone | IntrRxEmpty))
1507 netdev_rx(dev, ioaddr);
1509 /* Scavenge the skbuff list based on the Tx-done queue.
1510 There are redundant checks here that may be cleaned up
1511 after the driver has proven to be reliable. */
1512 consumer = readl(ioaddr + TxConsumerIdx);
1514 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1515 dev->name, consumer);
1517 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1519 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1520 dev->name, np->dirty_tx, np->tx_done, tx_status);
1521 if ((tx_status & 0xe0000000) == 0xa0000000) {
1522 np->stats.tx_packets++;
1523 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1524 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1525 struct sk_buff *skb = np->tx_info[entry].skb;
1526 np->tx_info[entry].skb = NULL;
1527 pci_unmap_single(np->pci_dev,
1528 np->tx_info[entry].mapping,
1529 skb_first_frag_len(skb),
1531 np->tx_info[entry].mapping = 0;
1532 np->dirty_tx += np->tx_info[entry].used_slots;
1533 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1534 #ifdef MAX_SKB_FRAGS
1537 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1538 pci_unmap_single(np->pci_dev,
1539 np->tx_info[entry].mapping,
1540 skb_shinfo(skb)->frags[i].size,
1546 #endif /* MAX_SKB_FRAGS */
1547 dev_kfree_skb_irq(skb);
1549 np->tx_done_q[np->tx_done].status = 0;
1550 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1552 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1554 if (netif_queue_stopped(dev) &&
1555 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1556 /* The ring is no longer full, wake the queue. */
1557 netif_wake_queue(dev);
1560 /* Stats overflow */
1561 if (intr_status & IntrStatsMax)
1564 /* Media change interrupt. */
1565 if (intr_status & IntrLinkChange)
1566 netdev_media_change(dev);
1568 /* Abnormal error summary/uncommon events handlers. */
1569 if (intr_status & IntrAbnormalSummary)
1570 netdev_error(dev, intr_status);
1572 if (--boguscnt < 0) {
1574 printk(KERN_WARNING "%s: Too much work at interrupt, "
1576 dev->name, intr_status);
1582 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1583 dev->name, (int) readl(ioaddr + IntrStatus));
1584 return IRQ_RETVAL(handled);
1588 /* This routine is logically part of the interrupt/poll handler, but separated
1589 for clarity, code sharing between NAPI/non-NAPI, and better register allocation. */
1590 static int __netdev_rx(struct net_device *dev, int *quota)
1592 struct netdev_private *np = netdev_priv(dev);
1596 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1597 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1598 struct sk_buff *skb;
1601 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1604 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1605 if (!(desc_status & RxOK)) {
1606 /* There was a error. */
1608 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1609 np->stats.rx_errors++;
1610 if (desc_status & RxFIFOErr)
1611 np->stats.rx_fifo_errors++;
1615 if (*quota <= 0) { /* out of rx quota */
1621 pkt_len = desc_status; /* Implicitly Truncate */
1622 entry = (desc_status >> 16) & 0x7ff;
1625 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1626 /* Check if the packet is long enough to accept without copying
1627 to a minimally-sized skbuff. */
1628 if (pkt_len < rx_copybreak
1629 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1631 skb_reserve(skb, 2); /* 16 byte align the IP header */
1632 pci_dma_sync_single_for_cpu(np->pci_dev,
1633 np->rx_info[entry].mapping,
1634 pkt_len, PCI_DMA_FROMDEVICE);
1635 eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
1636 pci_dma_sync_single_for_device(np->pci_dev,
1637 np->rx_info[entry].mapping,
1638 pkt_len, PCI_DMA_FROMDEVICE);
1639 skb_put(skb, pkt_len);
1641 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1642 skb = np->rx_info[entry].skb;
1643 skb_put(skb, pkt_len);
1644 np->rx_info[entry].skb = NULL;
1645 np->rx_info[entry].mapping = 0;
1647 #ifndef final_version /* Remove after testing. */
1648 /* You will want this info for the initial debug. */
1650 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1651 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n",
1652 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1653 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1654 skb->data[8], skb->data[9], skb->data[10],
1655 skb->data[11], skb->data[12], skb->data[13]);
1658 skb->protocol = eth_type_trans(skb, dev);
1659 #if defined(HAS_FIRMWARE) || defined(VLAN_SUPPORT)
1661 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1664 if (le16_to_cpu(desc->status2) & 0x0100) {
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666 np->stats.rx_compressed++;
1669 * This feature doesn't seem to be working, at least
1670 * with the two firmware versions I have. If the GFP sees
1671 * an IP fragment, it either ignores it completely, or reports
1672 * "bad checksum" on it.
1674 * Maybe I missed something -- corrections are welcome.
1675 * Until then, the printk stays. :-) -Ion
1677 else if (le16_to_cpu(desc->status2) & 0x0040) {
1678 skb->ip_summed = CHECKSUM_HW;
1679 skb->csum = le16_to_cpu(desc->csum);
1680 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1682 #endif /* HAS_FIRMWARE */
1684 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1686 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid));
1687 /* vlan_netdev_receive_skb() expects a packet with the VLAN tag stripped out */
1688 vlan_netdev_receive_skb(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK);
1690 #endif /* VLAN_SUPPORT */
1691 netdev_receive_skb(skb);
1692 dev->last_rx = jiffies;
1693 np->stats.rx_packets++;
1698 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1700 writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);
1703 refill_rx_ring(dev);
1705 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1706 retcode, np->rx_done, desc_status);
1711 #ifdef HAVE_NETDEV_POLL
1712 static int netdev_poll(struct net_device *dev, int *budget)
1715 long ioaddr = dev->base_addr;
1716 int retcode = 0, quota = dev->quota;
1719 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1721 retcode = __netdev_rx(dev, "a);
1722 *budget -= (dev->quota - quota);
1727 intr_status = readl(ioaddr + IntrStatus);
1728 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1730 netif_rx_complete(dev);
1731 intr_status = readl(ioaddr + IntrEnable);
1732 intr_status |= IntrRxDone | IntrRxEmpty;
1733 writel(intr_status, ioaddr + IntrEnable);
1737 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode);
1739 /* Restart Rx engine if stopped. */
1742 #endif /* HAVE_NETDEV_POLL */
1745 static void refill_rx_ring(struct net_device *dev)
1747 struct netdev_private *np = netdev_priv(dev);
1748 struct sk_buff *skb;
1751 /* Refill the Rx ring buffers. */
1752 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1753 entry = np->dirty_rx % RX_RING_SIZE;
1754 if (np->rx_info[entry].skb == NULL) {
1755 skb = dev_alloc_skb(np->rx_buf_sz);
1756 np->rx_info[entry].skb = skb;
1758 break; /* Better luck next round. */
1759 np->rx_info[entry].mapping =
1760 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1761 skb->dev = dev; /* Mark as being used by this device. */
1762 np->rx_ring[entry].rxaddr =
1763 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1765 if (entry == RX_RING_SIZE - 1)
1766 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1769 writew(entry, dev->base_addr + RxDescQIdx);
1773 static void netdev_media_change(struct net_device *dev)
1775 struct netdev_private *np = netdev_priv(dev);
1776 long ioaddr = dev->base_addr;
1777 u16 reg0, reg1, reg4, reg5;
1779 u32 new_intr_timer_ctrl;
1781 /* reset status first */
1782 mdio_read(dev, np->phys[0], MII_BMCR);
1783 mdio_read(dev, np->phys[0], MII_BMSR);
1785 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1786 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1788 if (reg1 & BMSR_LSTATUS) {
1790 if (reg0 & BMCR_ANENABLE) {
1791 /* autonegotiation is enabled */
1792 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1793 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1794 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1796 np->mii_if.full_duplex = 1;
1797 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1799 np->mii_if.full_duplex = 0;
1800 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1802 np->mii_if.full_duplex = 1;
1805 np->mii_if.full_duplex = 0;
1808 /* autonegotiation is disabled */
1809 if (reg0 & BMCR_SPEED100)
1813 if (reg0 & BMCR_FULLDPLX)
1814 np->mii_if.full_duplex = 1;
1816 np->mii_if.full_duplex = 0;
1818 netif_carrier_on(dev);
1819 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1821 np->speed100 ? "100" : "10",
1822 np->mii_if.full_duplex ? "full" : "half");
1824 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1825 if (np->mii_if.full_duplex)
1826 new_tx_mode |= FullDuplex;
1827 if (np->tx_mode != new_tx_mode) {
1828 np->tx_mode = new_tx_mode;
1829 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1831 writel(np->tx_mode, ioaddr + TxMode);
1834 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1836 new_intr_timer_ctrl |= Timer10X;
1837 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1838 np->intr_timer_ctrl = new_intr_timer_ctrl;
1839 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1842 netif_carrier_off(dev);
1843 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1848 static void netdev_error(struct net_device *dev, int intr_status)
1850 struct netdev_private *np = netdev_priv(dev);
1852 /* Came close to underrunning the Tx FIFO, increase threshold. */
1853 if (intr_status & IntrTxDataLow) {
1854 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1855 writel(++np->tx_threshold, dev->base_addr + TxThreshold);
1856 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1857 dev->name, np->tx_threshold * 16);
1859 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1861 if (intr_status & IntrRxGFPDead) {
1862 np->stats.rx_fifo_errors++;
1863 np->stats.rx_errors++;
1865 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1866 np->stats.tx_fifo_errors++;
1867 np->stats.tx_errors++;
1869 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1870 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1871 dev->name, intr_status);
1875 static struct net_device_stats *get_stats(struct net_device *dev)
1877 long ioaddr = dev->base_addr;
1878 struct netdev_private *np = netdev_priv(dev);
1880 /* This adapter architecture needs no SMP locks. */
1881 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1882 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1883 np->stats.tx_packets = readl(ioaddr + 0x57000);
1884 np->stats.tx_aborted_errors =
1885 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1886 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1887 np->stats.collisions =
1888 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1890 /* The chip only need report frame silently dropped. */
1891 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1892 writew(0, ioaddr + RxDMAStatus);
1893 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1894 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1895 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1896 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1902 /* Chips may use the upper or lower CRC bits, and may reverse and/or invert
1903 them. Select the endian-ness that results in minimal calculations.
1905 static void set_rx_mode(struct net_device *dev)
1907 long ioaddr = dev->base_addr;
1908 u32 rx_mode = MinVLANPrio;
1909 struct dev_mc_list *mclist;
1912 struct netdev_private *np = netdev_priv(dev);
1914 rx_mode |= VlanMode;
1917 long filter_addr = ioaddr + HashTable + 8;
1918 for (i = 0; i < VLAN_VID_MASK; i++) {
1919 if (np->vlgrp->vlan_devices[i]) {
1920 if (vlan_count >= 32)
1922 writew(cpu_to_be16(i), filter_addr);
1927 if (i == VLAN_VID_MASK) {
1928 rx_mode |= PerfectFilterVlan;
1929 while (vlan_count < 32) {
1930 writew(0, filter_addr);
1936 #endif /* VLAN_SUPPORT */
1938 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1939 rx_mode |= AcceptAll;
1940 } else if ((dev->mc_count > multicast_filter_limit)
1941 || (dev->flags & IFF_ALLMULTI)) {
1942 /* Too many to match, or accept all multicasts. */
1943 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1944 } else if (dev->mc_count <= 14) {
1945 /* Use the 16 element perfect filter, skip first two entries. */
1946 long filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1948 for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1949 i++, mclist = mclist->next) {
1950 eaddrs = (u16 *)mclist->dmi_addr;
1951 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
1952 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1953 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
1955 eaddrs = (u16 *)dev->dev_addr;
1957 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1958 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1959 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1961 rx_mode |= AcceptBroadcast|PerfectFilter;
1963 /* Must use a multicast hash table. */
1966 u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1968 memset(mc_filter, 0, sizeof(mc_filter));
1969 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1970 i++, mclist = mclist->next) {
1971 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1972 __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
1974 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1976 /* Clear the perfect filter list, skip first two entries. */
1977 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1978 eaddrs = (u16 *)dev->dev_addr;
1979 for (i = 2; i < 16; i++) {
1980 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1981 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1982 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1984 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1985 writew(mc_filter[i], filter_addr);
1986 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1988 writel(rx_mode, ioaddr + RxFilterMode);
1991 static int check_if_running(struct net_device *dev)
1993 if (!netif_running(dev))
1998 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2000 struct netdev_private *np = netdev_priv(dev);
2001 strcpy(info->driver, DRV_NAME);
2002 strcpy(info->version, DRV_VERSION);
2003 strcpy(info->bus_info, PCI_SLOT_NAME(np->pci_dev));
2006 static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2008 struct netdev_private *np = netdev_priv(dev);
2009 spin_lock_irq(&np->lock);
2010 mii_ethtool_gset(&np->mii_if, ecmd);
2011 spin_unlock_irq(&np->lock);
2015 static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2017 struct netdev_private *np = netdev_priv(dev);
2019 spin_lock_irq(&np->lock);
2020 res = mii_ethtool_sset(&np->mii_if, ecmd);
2021 spin_unlock_irq(&np->lock);
2026 static int nway_reset(struct net_device *dev)
2028 struct netdev_private *np = netdev_priv(dev);
2029 return mii_nway_restart(&np->mii_if);
2032 static u32 get_link(struct net_device *dev)
2034 struct netdev_private *np = netdev_priv(dev);
2035 return mii_link_ok(&np->mii_if);
2038 static u32 get_msglevel(struct net_device *dev)
2043 static void set_msglevel(struct net_device *dev, u32 val)
2048 static struct ethtool_ops ethtool_ops = {
2049 .begin = check_if_running,
2050 .get_drvinfo = get_drvinfo,
2051 .get_settings = get_settings,
2052 .set_settings = set_settings,
2053 .nway_reset = nway_reset,
2054 .get_link = get_link,
2055 .get_msglevel = get_msglevel,
2056 .set_msglevel = set_msglevel,
2059 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2061 struct netdev_private *np = netdev_priv(dev);
2062 struct mii_ioctl_data *data = if_mii(rq);
2065 if (!netif_running(dev))
2068 spin_lock_irq(&np->lock);
2069 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
2070 spin_unlock_irq(&np->lock);
2072 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
2078 static int netdev_close(struct net_device *dev)
2080 long ioaddr = dev->base_addr;
2081 struct netdev_private *np = netdev_priv(dev);
2084 netif_stop_queue(dev);
2088 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
2089 dev->name, (int) readl(ioaddr + IntrStatus));
2090 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
2091 dev->name, np->cur_tx, np->dirty_tx,
2092 np->cur_rx, np->dirty_rx);
2095 /* Disable interrupts by clearing the interrupt mask. */
2096 writel(0, ioaddr + IntrEnable);
2098 /* Stop the chip's Tx and Rx processes. */
2099 writel(0, ioaddr + GenCtrl);
2100 readl(ioaddr + GenCtrl);
2103 printk(KERN_DEBUG" Tx ring at %#llx:\n",
2104 (long long) np->tx_ring_dma);
2105 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
2106 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
2107 i, le32_to_cpu(np->tx_ring[i].status),
2108 (long long) dma_to_cpu(np->tx_ring[i].addr),
2109 le32_to_cpu(np->tx_done_q[i].status));
2110 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
2111 (long long) np->rx_ring_dma, np->rx_done_q);
2113 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
2114 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
2115 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
2119 free_irq(dev->irq, dev);
2121 /* Free all the skbuffs in the Rx queue. */
2122 for (i = 0; i < RX_RING_SIZE; i++) {
2123 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
2124 if (np->rx_info[i].skb != NULL) {
2125 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
2126 dev_kfree_skb(np->rx_info[i].skb);
2128 np->rx_info[i].skb = NULL;
2129 np->rx_info[i].mapping = 0;
2131 for (i = 0; i < TX_RING_SIZE; i++) {
2132 struct sk_buff *skb = np->tx_info[i].skb;
2135 pci_unmap_single(np->pci_dev,
2136 np->tx_info[i].mapping,
2137 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
2138 np->tx_info[i].mapping = 0;
2140 np->tx_info[i].skb = NULL;
2147 static void __devexit starfire_remove_one (struct pci_dev *pdev)
2149 struct net_device *dev = pci_get_drvdata(pdev);
2150 struct netdev_private *np = netdev_priv(dev);
2155 unregister_netdev(dev);
2158 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2161 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2162 pci_set_power_state(pdev, 3); /* go to sleep in D3 mode */
2163 pci_disable_device(pdev);
2165 iounmap((char *)dev->base_addr);
2166 pci_release_regions(pdev);
2168 pci_set_drvdata(pdev, NULL);
2169 free_netdev(dev); /* Will also free np!! */
2173 static struct pci_driver starfire_driver = {
2175 .probe = starfire_init_one,
2176 .remove = __devexit_p(starfire_remove_one),
2177 .id_table = starfire_pci_tbl,
2181 static int __init starfire_init (void)
2183 /* when a module, this is printed whether or not devices are found in probe */
2188 /* we can do this test only at run-time... sigh */
2189 if (sizeof(dma_addr_t) == sizeof(u64)) {
2190 printk("This driver has not been ported to this 64-bit architecture yet\n");
2193 #endif /* not ADDR_64BITS */
2194 #ifndef HAS_FIRMWARE
2195 /* unconditionally disable hw cksums if firmware is not present */
2196 enable_hw_cksum = 0;
2197 #endif /* not HAS_FIRMWARE */
2198 return pci_module_init (&starfire_driver);
2202 static void __exit starfire_cleanup (void)
2204 pci_unregister_driver (&starfire_driver);
2208 module_init(starfire_init);
2209 module_exit(starfire_cleanup);