1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
15 See the file COPYING in this distribution for more information.
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
30 #define DRV_NAME "de2104x"
31 #define DRV_VERSION "0.7"
32 #define DRV_RELDATE "Mar 17, 2004"
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ethtool.h>
43 #include <linux/compiler.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/crc32.h>
49 #include <asm/uaccess.h>
50 #include <asm/unaligned.h>
52 /* These identify the driver base version and may not be removed. */
53 static char version[] =
54 KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
56 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58 MODULE_LICENSE("GPL");
60 static int debug = -1;
61 MODULE_PARM (debug, "i");
62 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
64 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
66 || defined(__sparc_) || defined(__ia64__) \
67 || defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
70 static int rx_copybreak = 100;
72 MODULE_PARM (rx_copybreak, "i");
73 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
75 #define PFX DRV_NAME ": "
77 #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
85 #define DE_RX_RING_SIZE 64
86 #define DE_TX_RING_SIZE 64
87 #define DE_RING_BYTES \
88 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
89 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
90 #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
91 #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
92 #define TX_BUFFS_AVAIL(CP) \
93 (((CP)->tx_tail <= (CP)->tx_head) ? \
94 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
95 (CP)->tx_tail - (CP)->tx_head - 1)
97 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
100 #define DE_SETUP_SKB ((struct sk_buff *) 1)
101 #define DE_DUMMY_SKB ((struct sk_buff *) 2)
102 #define DE_SETUP_FRAME_WORDS 96
103 #define DE_EEPROM_WORDS 256
104 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
105 #define DE_MAX_MEDIA 5
107 #define DE_MEDIA_TP_AUTO 0
108 #define DE_MEDIA_BNC 1
109 #define DE_MEDIA_AUI 2
110 #define DE_MEDIA_TP 3
111 #define DE_MEDIA_TP_FD 4
112 #define DE_MEDIA_INVALID DE_MAX_MEDIA
113 #define DE_MEDIA_FIRST 0
114 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
115 #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
117 #define DE_TIMER_LINK (60 * HZ)
118 #define DE_TIMER_NO_LINK (5 * HZ)
120 #define DE_NUM_REGS 16
121 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
122 #define DE_REGS_VER 1
124 /* Time in jiffies before concluding the transmitter is hung. */
125 #define TX_TIMEOUT (6*HZ)
127 #define DE_UNALIGNED_16(a) (u16)(get_unaligned((u16 *)(a)))
129 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
130 to support a pre-NWay full-duplex signaling mechanism using short frames.
131 No one knows what it should be, but if left at its default value some
132 10base2(!) packets trigger a full-duplex-request interrupt. */
133 #define FULL_DUPLEX_MAGIC 0x6969
156 CacheAlign16 = 0x00008000,
157 BurstLen4 = 0x00000400,
160 NormalTxPoll = (1 << 0),
161 NormalRxPoll = (1 << 0),
163 /* Tx/Rx descriptor status bits */
166 RxErrLong = (1 << 7),
168 RxErrFIFO = (1 << 0),
169 RxErrRunt = (1 << 11),
170 RxErrFrame = (1 << 14),
172 FirstFrag = (1 << 29),
173 LastFrag = (1 << 30),
175 TxFIFOUnder = (1 << 1),
176 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
179 TxJabber = (1 << 14),
180 SetupFrame = (1 << 27),
191 TxState = (1 << 22) | (1 << 21) | (1 << 20),
192 RxState = (1 << 19) | (1 << 18) | (1 << 17),
193 LinkFail = (1 << 12),
195 RxStopped = (1 << 8),
196 TxStopped = (1 << 1),
199 TxEnable = (1 << 13),
201 RxTx = TxEnable | RxEnable,
202 FullDuplex = (1 << 9),
203 AcceptAllMulticast = (1 << 7),
204 AcceptAllPhys = (1 << 6),
206 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
207 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
210 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
211 EE_CS = 0x01, /* EEPROM chip select. */
212 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
215 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
216 EE_ENB = (0x4800 | EE_CS),
218 /* The EEPROM commands include the alway-set leading bit. */
222 RxMissedOver = (1 << 16),
223 RxMissedMask = 0xffff,
225 /* SROM-related bits */
227 MediaBlockMask = 0x3f,
228 MediaCustomCSRs = (1 << 6),
231 PM_Sleep = (1 << 31),
232 PM_Snooze = (1 << 30),
233 PM_Mask = PM_Sleep | PM_Snooze,
236 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
237 NWayRestart = (1 << 12),
238 NonselPortActive = (1 << 9),
239 LinkFailStatus = (1 << 2),
240 NetCxnErr = (1 << 1),
243 static const u32 de_intr_mask =
244 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
245 LinkPass | LinkFail | PciErr;
248 * Set the programmable burst length to 4 longwords for all:
249 * DMA errors result without these values. Cache align 16 long.
251 static const u32 de_bus_mode = CacheAlign16 | BurstLen4;
253 struct de_srom_media_block {
258 } __attribute__((packed));
260 struct de_srom_info_leaf {
264 } __attribute__((packed));
274 u16 type; /* DE_MEDIA_xxx */
291 struct net_device *dev;
294 struct de_desc *rx_ring;
295 struct de_desc *tx_ring;
296 struct ring_info tx_skb[DE_TX_RING_SIZE];
297 struct ring_info rx_skb[DE_RX_RING_SIZE];
303 struct net_device_stats net_stats;
305 struct pci_dev *pdev;
307 u16 setup_frame[DE_SETUP_FRAME_WORDS];
312 struct media_info media[DE_MAX_MEDIA];
313 struct timer_list media_timer;
317 unsigned de21040 : 1;
318 unsigned media_lock : 1;
322 static void de_set_rx_mode (struct net_device *dev);
323 static void de_tx (struct de_private *de);
324 static void de_clean_rings (struct de_private *de);
325 static void de_media_interrupt (struct de_private *de, u32 status);
326 static void de21040_media_timer (unsigned long data);
327 static void de21041_media_timer (unsigned long data);
328 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
331 static struct pci_device_id de_pci_tbl[] = {
332 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
333 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
334 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
335 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
338 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
340 static const char * const media_name[DE_MAX_MEDIA] = {
348 /* 21040 transceiver register settings:
349 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
350 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
351 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
352 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
354 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
355 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
356 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
357 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
360 static inline unsigned long
361 msec_to_jiffies(unsigned long ms)
363 return (((ms)*HZ+999)/1000);
367 #define dr32(reg) readl(de->regs + (reg))
368 #define dw32(reg,val) writel((val), de->regs + (reg))
371 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
374 if (netif_msg_rx_err (de))
376 "%s: rx err, slot %d status 0x%x len %d\n",
377 de->dev->name, rx_tail, status, len);
379 if ((status & 0x38000300) != 0x0300) {
380 /* Ingore earlier buffers. */
381 if ((status & 0xffff) != 0x7fff) {
382 if (netif_msg_rx_err(de))
383 printk(KERN_WARNING "%s: Oversized Ethernet frame "
384 "spanned multiple buffers, status %8.8x!\n",
385 de->dev->name, status);
386 de->net_stats.rx_length_errors++;
388 } else if (status & RxError) {
389 /* There was a fatal error. */
390 de->net_stats.rx_errors++; /* end of a packet.*/
391 if (status & 0x0890) de->net_stats.rx_length_errors++;
392 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
393 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
397 static void de_rx (struct de_private *de)
399 unsigned rx_tail = de->rx_tail;
400 unsigned rx_work = DE_RX_RING_SIZE;
407 struct sk_buff *skb, *copy_skb;
408 unsigned copying_skb, buflen;
410 skb = de->rx_skb[rx_tail].skb;
414 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
415 if (status & DescOwn)
418 len = ((status >> 16) & 0x7ff) - 4;
419 mapping = de->rx_skb[rx_tail].mapping;
421 if (unlikely(drop)) {
422 de->net_stats.rx_dropped++;
426 if (unlikely((status & 0x38008300) != 0x0300)) {
427 de_rx_err_acct(de, rx_tail, status, len);
431 copying_skb = (len <= rx_copybreak);
433 if (unlikely(netif_msg_rx_status(de)))
434 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
435 de->dev->name, rx_tail, status, len,
438 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
439 copy_skb = dev_alloc_skb (buflen);
440 if (unlikely(!copy_skb)) {
441 de->net_stats.rx_dropped++;
446 copy_skb->dev = de->dev;
449 pci_unmap_single(de->pdev, mapping,
450 buflen, PCI_DMA_FROMDEVICE);
454 de->rx_skb[rx_tail].mapping =
455 pci_map_single(de->pdev, copy_skb->tail,
456 buflen, PCI_DMA_FROMDEVICE);
457 de->rx_skb[rx_tail].skb = copy_skb;
459 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
460 skb_reserve(copy_skb, RX_OFFSET);
461 memcpy(skb_put(copy_skb, len), skb->tail, len);
463 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
465 /* We'll reuse the original ring buffer. */
469 skb->protocol = eth_type_trans (skb, de->dev);
471 de->net_stats.rx_packets++;
472 de->net_stats.rx_bytes += skb->len;
473 de->dev->last_rx = jiffies;
475 if (rc == NET_RX_DROP)
479 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
480 if (rx_tail == (DE_RX_RING_SIZE - 1))
481 de->rx_ring[rx_tail].opts2 =
482 cpu_to_le32(RingEnd | de->rx_buf_sz);
484 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
485 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
486 rx_tail = NEXT_RX(rx_tail);
490 printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
492 de->rx_tail = rx_tail;
495 static irqreturn_t de_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
497 struct net_device *dev = dev_instance;
498 struct de_private *de = dev->priv;
501 status = dr32(MacStatus);
502 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
505 if (netif_msg_intr(de))
506 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
507 dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
509 dw32(MacStatus, status);
511 if (status & (RxIntr | RxEmpty)) {
513 if (status & RxEmpty)
514 dw32(RxPoll, NormalRxPoll);
517 spin_lock(&de->lock);
519 if (status & (TxIntr | TxEmpty))
522 if (status & (LinkPass | LinkFail))
523 de_media_interrupt(de, status);
525 spin_unlock(&de->lock);
527 if (status & PciErr) {
530 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
531 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
532 printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
533 dev->name, status, pci_status);
539 static void de_tx (struct de_private *de)
541 unsigned tx_head = de->tx_head;
542 unsigned tx_tail = de->tx_tail;
544 while (tx_tail != tx_head) {
549 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
550 if (status & DescOwn)
553 skb = de->tx_skb[tx_tail].skb;
556 if (unlikely(skb == DE_DUMMY_SKB))
559 if (unlikely(skb == DE_SETUP_SKB)) {
560 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
561 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
565 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
566 skb->len, PCI_DMA_TODEVICE);
568 if (status & LastFrag) {
569 if (status & TxError) {
570 if (netif_msg_tx_err(de))
571 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
572 de->dev->name, status);
573 de->net_stats.tx_errors++;
575 de->net_stats.tx_window_errors++;
576 if (status & TxMaxCol)
577 de->net_stats.tx_aborted_errors++;
578 if (status & TxLinkFail)
579 de->net_stats.tx_carrier_errors++;
580 if (status & TxFIFOUnder)
581 de->net_stats.tx_fifo_errors++;
583 de->net_stats.tx_packets++;
584 de->net_stats.tx_bytes += skb->len;
585 if (netif_msg_tx_done(de))
586 printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
588 dev_kfree_skb_irq(skb);
592 de->tx_skb[tx_tail].skb = NULL;
594 tx_tail = NEXT_TX(tx_tail);
597 de->tx_tail = tx_tail;
599 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
600 netif_wake_queue(de->dev);
603 static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
605 struct de_private *de = dev->priv;
606 unsigned int entry, tx_free;
607 u32 mapping, len, flags = FirstFrag | LastFrag;
610 spin_lock_irq(&de->lock);
612 tx_free = TX_BUFFS_AVAIL(de);
614 netif_stop_queue(dev);
615 spin_unlock_irq(&de->lock);
622 txd = &de->tx_ring[entry];
625 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
626 if (entry == (DE_TX_RING_SIZE - 1))
628 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
631 txd->opts2 = cpu_to_le32(flags);
632 txd->addr1 = cpu_to_le32(mapping);
634 de->tx_skb[entry].skb = skb;
635 de->tx_skb[entry].mapping = mapping;
638 txd->opts1 = cpu_to_le32(DescOwn);
641 de->tx_head = NEXT_TX(entry);
642 if (netif_msg_tx_queued(de))
643 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
644 dev->name, entry, skb->len);
647 netif_stop_queue(dev);
649 spin_unlock_irq(&de->lock);
651 /* Trigger an immediate transmit demand. */
652 dw32(TxPoll, NormalTxPoll);
653 dev->trans_start = jiffies;
658 /* Set or clear the multicast filter for this adaptor.
659 Note that we only use exclusion around actually queueing the
660 new frame, not around filling de->setup_frame. This is non-deterministic
661 when re-entered but still correct. */
664 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
666 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
668 struct de_private *de = dev->priv;
670 struct dev_mc_list *mclist;
674 memset(hash_table, 0, sizeof(hash_table));
675 set_bit_le(255, hash_table); /* Broadcast entry */
676 /* This should work on big-endian machines as well. */
677 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
678 i++, mclist = mclist->next) {
679 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
681 set_bit_le(index, hash_table);
683 for (i = 0; i < 32; i++) {
684 *setup_frm++ = hash_table[i];
685 *setup_frm++ = hash_table[i];
687 setup_frm = &de->setup_frame[13*6];
690 /* Fill the final entry with our physical address. */
691 eaddrs = (u16 *)dev->dev_addr;
692 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
693 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
694 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
697 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
699 struct de_private *de = dev->priv;
700 struct dev_mc_list *mclist;
704 /* We have <= 14 addresses so we can use the wonderful
705 16 address perfect filtering of the Tulip. */
706 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
707 i++, mclist = mclist->next) {
708 eaddrs = (u16 *)mclist->dmi_addr;
709 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
710 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
713 /* Fill the unused entries with the broadcast address. */
714 memset(setup_frm, 0xff, (15-i)*12);
715 setup_frm = &de->setup_frame[15*6];
717 /* Fill the final entry with our physical address. */
718 eaddrs = (u16 *)dev->dev_addr;
719 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
720 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
721 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
725 static void __de_set_rx_mode (struct net_device *dev)
727 struct de_private *de = dev->priv;
732 struct de_desc *dummy_txd = NULL;
734 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
736 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
737 macmode |= AcceptAllMulticast | AcceptAllPhys;
741 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
742 /* Too many to filter well -- accept all multicasts. */
743 macmode |= AcceptAllMulticast;
747 /* Note that only the low-address shortword of setup_frame is valid!
748 The values are doubled for big-endian architectures. */
749 if (dev->mc_count > 14) /* Must use a multicast hash table. */
750 build_setup_frame_hash (de->setup_frame, dev);
752 build_setup_frame_perfect (de->setup_frame, dev);
755 * Now add this frame to the Tx list.
760 /* Avoid a chip errata by prefixing a dummy entry. */
762 de->tx_skb[entry].skb = DE_DUMMY_SKB;
764 dummy_txd = &de->tx_ring[entry];
765 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
766 cpu_to_le32(RingEnd) : 0;
767 dummy_txd->addr1 = 0;
769 /* Must set DescOwned later to avoid race with chip */
771 entry = NEXT_TX(entry);
774 de->tx_skb[entry].skb = DE_SETUP_SKB;
775 de->tx_skb[entry].mapping = mapping =
776 pci_map_single (de->pdev, de->setup_frame,
777 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
779 /* Put the setup frame on the Tx list. */
780 txd = &de->tx_ring[entry];
781 if (entry == (DE_TX_RING_SIZE - 1))
782 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
784 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
785 txd->addr1 = cpu_to_le32(mapping);
788 txd->opts1 = cpu_to_le32(DescOwn);
792 dummy_txd->opts1 = cpu_to_le32(DescOwn);
796 de->tx_head = NEXT_TX(entry);
798 if (TX_BUFFS_AVAIL(de) < 0)
800 if (TX_BUFFS_AVAIL(de) == 0)
801 netif_stop_queue(dev);
803 /* Trigger an immediate transmit demand. */
804 dw32(TxPoll, NormalTxPoll);
807 if (macmode != dr32(MacMode))
808 dw32(MacMode, macmode);
811 static void de_set_rx_mode (struct net_device *dev)
814 struct de_private *de = dev->priv;
816 spin_lock_irqsave (&de->lock, flags);
817 __de_set_rx_mode(dev);
818 spin_unlock_irqrestore (&de->lock, flags);
821 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
823 if (unlikely(rx_missed & RxMissedOver))
824 de->net_stats.rx_missed_errors += RxMissedMask;
826 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
829 static void __de_get_stats(struct de_private *de)
831 u32 tmp = dr32(RxMissed); /* self-clearing */
833 de_rx_missed(de, tmp);
836 static struct net_device_stats *de_get_stats(struct net_device *dev)
838 struct de_private *de = dev->priv;
840 /* The chip only need report frame silently dropped. */
841 spin_lock_irq(&de->lock);
842 if (netif_running(dev) && netif_device_present(dev))
844 spin_unlock_irq(&de->lock);
846 return &de->net_stats;
849 static inline int de_is_running (struct de_private *de)
851 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
854 static void de_stop_rxtx (struct de_private *de)
857 unsigned int work = 1000;
859 macmode = dr32(MacMode);
860 if (macmode & RxTx) {
861 dw32(MacMode, macmode & ~RxTx);
866 if (!de_is_running(de))
871 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
874 static inline void de_start_rxtx (struct de_private *de)
878 macmode = dr32(MacMode);
879 if ((macmode & RxTx) != RxTx) {
880 dw32(MacMode, macmode | RxTx);
885 static void de_stop_hw (struct de_private *de)
893 dw32(MacStatus, dr32(MacStatus));
898 de->tx_head = de->tx_tail = 0;
901 static void de_link_up(struct de_private *de)
903 if (!netif_carrier_ok(de->dev)) {
904 netif_carrier_on(de->dev);
905 if (netif_msg_link(de))
906 printk(KERN_INFO "%s: link up, media %s\n",
907 de->dev->name, media_name[de->media_type]);
911 static void de_link_down(struct de_private *de)
913 if (netif_carrier_ok(de->dev)) {
914 netif_carrier_off(de->dev);
915 if (netif_msg_link(de))
916 printk(KERN_INFO "%s: link down\n", de->dev->name);
920 static void de_set_media (struct de_private *de)
922 unsigned media = de->media_type;
923 u32 macmode = dr32(MacMode);
925 if (de_is_running(de))
929 dw32(CSR11, FULL_DUPLEX_MAGIC);
930 dw32(CSR13, 0); /* Reset phy */
931 dw32(CSR14, de->media[media].csr14);
932 dw32(CSR15, de->media[media].csr15);
933 dw32(CSR13, de->media[media].csr13);
935 /* must delay 10ms before writing to other registers,
940 if (media == DE_MEDIA_TP_FD)
941 macmode |= FullDuplex;
943 macmode &= ~FullDuplex;
945 if (netif_msg_link(de)) {
946 printk(KERN_INFO "%s: set link %s\n"
947 KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
948 KERN_INFO "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
949 de->dev->name, media_name[media],
950 de->dev->name, dr32(MacMode), dr32(SIAStatus),
951 dr32(CSR13), dr32(CSR14), dr32(CSR15),
952 de->dev->name, macmode, de->media[media].csr13,
953 de->media[media].csr14, de->media[media].csr15);
955 if (macmode != dr32(MacMode))
956 dw32(MacMode, macmode);
959 static void de_next_media (struct de_private *de, u32 *media,
960 unsigned int n_media)
964 for (i = 0; i < n_media; i++) {
965 if (de_ok_to_advertise(de, media[i])) {
966 de->media_type = media[i];
972 static void de21040_media_timer (unsigned long data)
974 struct de_private *de = (struct de_private *) data;
975 struct net_device *dev = de->dev;
976 u32 status = dr32(SIAStatus);
977 unsigned int carrier;
980 carrier = (status & NetCxnErr) ? 0 : 1;
983 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
986 de->media_timer.expires = jiffies + DE_TIMER_LINK;
987 add_timer(&de->media_timer);
988 if (!netif_carrier_ok(dev))
991 if (netif_msg_timer(de))
992 printk(KERN_INFO "%s: %s link ok, status %x\n",
993 dev->name, media_name[de->media_type],
1003 if (de->media_type == DE_MEDIA_AUI) {
1004 u32 next_state = DE_MEDIA_TP;
1005 de_next_media(de, &next_state, 1);
1007 u32 next_state = DE_MEDIA_AUI;
1008 de_next_media(de, &next_state, 1);
1011 spin_lock_irqsave(&de->lock, flags);
1013 spin_unlock_irqrestore(&de->lock, flags);
1018 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1019 add_timer(&de->media_timer);
1021 if (netif_msg_timer(de))
1022 printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1023 dev->name, media_name[de->media_type], status);
1026 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1028 switch (new_media) {
1029 case DE_MEDIA_TP_AUTO:
1030 if (!(de->media_advertise & ADVERTISED_Autoneg))
1032 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1036 if (!(de->media_advertise & ADVERTISED_BNC))
1040 if (!(de->media_advertise & ADVERTISED_AUI))
1044 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1047 case DE_MEDIA_TP_FD:
1048 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1056 static void de21041_media_timer (unsigned long data)
1058 struct de_private *de = (struct de_private *) data;
1059 struct net_device *dev = de->dev;
1060 u32 status = dr32(SIAStatus);
1061 unsigned int carrier;
1062 unsigned long flags;
1064 carrier = (status & NetCxnErr) ? 0 : 1;
1067 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1068 de->media_type == DE_MEDIA_TP ||
1069 de->media_type == DE_MEDIA_TP_FD) &&
1070 (status & LinkFailStatus))
1073 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1074 add_timer(&de->media_timer);
1075 if (!netif_carrier_ok(dev))
1078 if (netif_msg_timer(de))
1079 printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
1080 dev->name, media_name[de->media_type],
1081 dr32(MacMode), status);
1087 /* if media type locked, don't switch media */
1091 /* if activity detected, use that as hint for new media type */
1092 if (status & NonselPortActive) {
1093 unsigned int have_media = 1;
1095 /* if AUI/BNC selected, then activity is on TP port */
1096 if (de->media_type == DE_MEDIA_AUI ||
1097 de->media_type == DE_MEDIA_BNC) {
1098 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1099 de->media_type = DE_MEDIA_TP_AUTO;
1104 /* TP selected. If there is only TP and BNC, then it's BNC */
1105 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1106 de_ok_to_advertise(de, DE_MEDIA_BNC))
1107 de->media_type = DE_MEDIA_BNC;
1109 /* TP selected. If there is only TP and AUI, then it's AUI */
1110 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1111 de_ok_to_advertise(de, DE_MEDIA_AUI))
1112 de->media_type = DE_MEDIA_AUI;
1114 /* otherwise, ignore the hint */
1123 * Absent or ambiguous activity hint, move to next advertised
1124 * media state. If de->media_type is left unchanged, this
1125 * simply resets the PHY and reloads the current media settings.
1127 if (de->media_type == DE_MEDIA_AUI) {
1128 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1129 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1130 } else if (de->media_type == DE_MEDIA_BNC) {
1131 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1132 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1134 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1135 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1139 spin_lock_irqsave(&de->lock, flags);
1141 spin_unlock_irqrestore(&de->lock, flags);
1146 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1147 add_timer(&de->media_timer);
1149 if (netif_msg_timer(de))
1150 printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1151 dev->name, media_name[de->media_type], status);
1154 static void de_media_interrupt (struct de_private *de, u32 status)
1156 if (status & LinkPass) {
1158 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1162 if (!(status & LinkFail))
1165 if (netif_carrier_ok(de->dev)) {
1167 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1171 static int de_reset_mac (struct de_private *de)
1176 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1180 if (dr32(BusMode) == 0xffffffff)
1183 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1184 dw32 (BusMode, CmdReset);
1187 dw32 (BusMode, de_bus_mode);
1190 for (tmp = 0; tmp < 5; tmp++) {
1197 status = dr32(MacStatus);
1198 if (status & (RxState | TxState))
1200 if (status == 0xffffffff)
1205 static void de_adapter_wake (struct de_private *de)
1212 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1213 if (pmctl & PM_Mask) {
1215 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1217 /* de4x5.c delays, so we do too */
1218 current->state = TASK_UNINTERRUPTIBLE;
1219 schedule_timeout(msec_to_jiffies(10));
1223 static void de_adapter_sleep (struct de_private *de)
1230 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1232 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1235 static int de_init_hw (struct de_private *de)
1237 struct net_device *dev = de->dev;
1241 de_adapter_wake(de);
1243 macmode = dr32(MacMode) & ~MacModeClear;
1245 rc = de_reset_mac(de);
1249 de_set_media(de); /* reset phy */
1251 dw32(RxRingAddr, de->ring_dma);
1252 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1254 dw32(MacMode, RxTx | macmode);
1256 dr32(RxMissed); /* self-clearing */
1258 dw32(IntrMask, de_intr_mask);
1260 de_set_rx_mode(dev);
1265 static int de_refill_rx (struct de_private *de)
1269 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1270 struct sk_buff *skb;
1272 skb = dev_alloc_skb(de->rx_buf_sz);
1278 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1279 skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1280 de->rx_skb[i].skb = skb;
1282 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1283 if (i == (DE_RX_RING_SIZE - 1))
1284 de->rx_ring[i].opts2 =
1285 cpu_to_le32(RingEnd | de->rx_buf_sz);
1287 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1288 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1289 de->rx_ring[i].addr2 = 0;
1299 static int de_init_rings (struct de_private *de)
1301 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1302 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1305 de->tx_head = de->tx_tail = 0;
1307 return de_refill_rx (de);
1310 static int de_alloc_rings (struct de_private *de)
1312 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1315 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1316 return de_init_rings(de);
1319 static void de_clean_rings (struct de_private *de)
1323 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1324 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1326 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1327 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1330 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1331 if (de->rx_skb[i].skb) {
1332 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1333 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1334 dev_kfree_skb(de->rx_skb[i].skb);
1338 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1339 struct sk_buff *skb = de->tx_skb[i].skb;
1340 if ((skb) && (skb != DE_DUMMY_SKB)) {
1341 if (skb != DE_SETUP_SKB) {
1343 de->net_stats.tx_dropped++;
1344 pci_unmap_single(de->pdev,
1345 de->tx_skb[i].mapping,
1346 skb->len, PCI_DMA_TODEVICE);
1348 pci_unmap_single(de->pdev,
1349 de->tx_skb[i].mapping,
1350 sizeof(de->setup_frame),
1356 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1357 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1360 static void de_free_rings (struct de_private *de)
1363 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1368 static int de_open (struct net_device *dev)
1370 struct de_private *de = dev->priv;
1372 unsigned long flags;
1374 if (netif_msg_ifup(de))
1375 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1377 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1379 rc = de_alloc_rings(de);
1381 printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
1386 rc = de_init_hw(de);
1388 printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1393 rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
1395 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
1396 dev->name, dev->irq, rc);
1400 netif_start_queue(dev);
1401 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1406 spin_lock_irqsave(&de->lock, flags);
1408 spin_unlock_irqrestore(&de->lock, flags);
1415 static int de_close (struct net_device *dev)
1417 struct de_private *de = dev->priv;
1418 unsigned long flags;
1420 if (netif_msg_ifdown(de))
1421 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1423 del_timer_sync(&de->media_timer);
1425 spin_lock_irqsave(&de->lock, flags);
1427 netif_stop_queue(dev);
1428 netif_carrier_off(dev);
1429 spin_unlock_irqrestore(&de->lock, flags);
1431 free_irq(dev->irq, dev);
1434 de_adapter_sleep(de);
1435 pci_disable_device(de->pdev);
1439 static void de_tx_timeout (struct net_device *dev)
1441 struct de_private *de = dev->priv;
1443 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1444 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1445 de->rx_tail, de->tx_head, de->tx_tail);
1447 del_timer_sync(&de->media_timer);
1449 disable_irq(dev->irq);
1450 spin_lock_irq(&de->lock);
1453 netif_stop_queue(dev);
1454 netif_carrier_off(dev);
1456 spin_unlock_irq(&de->lock);
1457 enable_irq(dev->irq);
1459 /* Update the error counts. */
1462 synchronize_irq(dev->irq);
1467 netif_wake_queue(dev);
1470 static void __de_get_regs(struct de_private *de, u8 *buf)
1473 u32 *rbuf = (u32 *)buf;
1476 for (i = 0; i < DE_NUM_REGS; i++)
1477 rbuf[i] = dr32(i * 8);
1479 /* handle self-clearing RxMissed counter, CSR8 */
1480 de_rx_missed(de, rbuf[8]);
1483 static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1485 ecmd->supported = de->media_supported;
1486 ecmd->transceiver = XCVR_INTERNAL;
1487 ecmd->phy_address = 0;
1488 ecmd->advertising = de->media_advertise;
1490 switch (de->media_type) {
1492 ecmd->port = PORT_AUI;
1496 ecmd->port = PORT_BNC;
1500 ecmd->port = PORT_TP;
1501 ecmd->speed = SPEED_10;
1505 if (dr32(MacMode) & FullDuplex)
1506 ecmd->duplex = DUPLEX_FULL;
1508 ecmd->duplex = DUPLEX_HALF;
1511 ecmd->autoneg = AUTONEG_DISABLE;
1513 ecmd->autoneg = AUTONEG_ENABLE;
1515 /* ignore maxtxpkt, maxrxpkt for now */
1520 static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1523 unsigned int media_lock;
1525 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1527 if (de->de21040 && ecmd->speed == 2)
1529 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1531 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1533 if (de->de21040 && ecmd->port == PORT_BNC)
1535 if (ecmd->transceiver != XCVR_INTERNAL)
1537 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1539 if (ecmd->advertising & ~de->media_supported)
1541 if (ecmd->autoneg == AUTONEG_ENABLE &&
1542 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1545 switch (ecmd->port) {
1547 new_media = DE_MEDIA_AUI;
1548 if (!(ecmd->advertising & ADVERTISED_AUI))
1552 new_media = DE_MEDIA_BNC;
1553 if (!(ecmd->advertising & ADVERTISED_BNC))
1557 if (ecmd->autoneg == AUTONEG_ENABLE)
1558 new_media = DE_MEDIA_TP_AUTO;
1559 else if (ecmd->duplex == DUPLEX_FULL)
1560 new_media = DE_MEDIA_TP_FD;
1562 new_media = DE_MEDIA_TP;
1563 if (!(ecmd->advertising & ADVERTISED_TP))
1565 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1570 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1572 if ((new_media == de->media_type) &&
1573 (media_lock == de->media_lock) &&
1574 (ecmd->advertising == de->media_advertise))
1575 return 0; /* nothing to change */
1580 de->media_type = new_media;
1581 de->media_lock = media_lock;
1582 de->media_advertise = ecmd->advertising;
1588 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1590 struct de_private *de = dev->priv;
1592 strcpy (info->driver, DRV_NAME);
1593 strcpy (info->version, DRV_VERSION);
1594 strcpy (info->bus_info, pci_name(de->pdev));
1595 info->eedump_len = DE_EEPROM_SIZE;
1598 static int de_get_regs_len(struct net_device *dev)
1600 return DE_REGS_SIZE;
1603 static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1605 struct de_private *de = dev->priv;
1608 spin_lock_irq(&de->lock);
1609 rc = __de_get_settings(de, ecmd);
1610 spin_unlock_irq(&de->lock);
1615 static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1617 struct de_private *de = dev->priv;
1620 spin_lock_irq(&de->lock);
1621 rc = __de_set_settings(de, ecmd);
1622 spin_unlock_irq(&de->lock);
1627 static u32 de_get_msglevel(struct net_device *dev)
1629 struct de_private *de = dev->priv;
1631 return de->msg_enable;
1634 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1636 struct de_private *de = dev->priv;
1638 de->msg_enable = msglvl;
1641 static int de_get_eeprom(struct net_device *dev,
1642 struct ethtool_eeprom *eeprom, u8 *data)
1644 struct de_private *de = dev->priv;
1648 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1649 (eeprom->len != DE_EEPROM_SIZE))
1651 memcpy(data, de->ee_data, eeprom->len);
1656 static int de_nway_reset(struct net_device *dev)
1658 struct de_private *de = dev->priv;
1661 if (de->media_type != DE_MEDIA_TP_AUTO)
1663 if (netif_carrier_ok(de->dev))
1666 status = dr32(SIAStatus);
1667 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1668 if (netif_msg_link(de))
1669 printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
1670 de->dev->name, status, dr32(SIAStatus));
1674 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1677 struct de_private *de = dev->priv;
1679 regs->version = (DE_REGS_VER << 2) | de->de21040;
1681 spin_lock_irq(&de->lock);
1682 __de_get_regs(de, data);
1683 spin_unlock_irq(&de->lock);
1686 static struct ethtool_ops de_ethtool_ops = {
1687 .get_link = ethtool_op_get_link,
1688 .get_tx_csum = ethtool_op_get_tx_csum,
1689 .get_sg = ethtool_op_get_sg,
1690 .get_drvinfo = de_get_drvinfo,
1691 .get_regs_len = de_get_regs_len,
1692 .get_settings = de_get_settings,
1693 .set_settings = de_set_settings,
1694 .get_msglevel = de_get_msglevel,
1695 .set_msglevel = de_set_msglevel,
1696 .get_eeprom = de_get_eeprom,
1697 .nway_reset = de_nway_reset,
1698 .get_regs = de_get_regs,
1701 static void __init de21040_get_mac_address (struct de_private *de)
1705 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
1707 for (i = 0; i < 6; i++) {
1708 int value, boguscnt = 100000;
1710 value = dr32(ROMCmd);
1711 while (value < 0 && --boguscnt > 0);
1712 de->dev->dev_addr[i] = value;
1714 printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
1718 static void __init de21040_get_media_info(struct de_private *de)
1722 de->media_type = DE_MEDIA_TP;
1723 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1724 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1725 de->media_advertise = de->media_supported;
1727 for (i = 0; i < DE_MAX_MEDIA; i++) {
1731 case DE_MEDIA_TP_FD:
1732 de->media[i].type = i;
1733 de->media[i].csr13 = t21040_csr13[i];
1734 de->media[i].csr14 = t21040_csr14[i];
1735 de->media[i].csr15 = t21040_csr15[i];
1738 de->media[i].type = DE_MEDIA_INVALID;
1744 /* Note: this routine returns extra data bits for size detection. */
1745 static unsigned __init tulip_read_eeprom(void *regs, int location, int addr_len)
1748 unsigned retval = 0;
1749 void *ee_addr = regs + ROMCmd;
1750 int read_cmd = location | (EE_READ_CMD << addr_len);
1752 writel(EE_ENB & ~EE_CS, ee_addr);
1753 writel(EE_ENB, ee_addr);
1755 /* Shift the read command bits out. */
1756 for (i = 4 + addr_len; i >= 0; i--) {
1757 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1758 writel(EE_ENB | dataval, ee_addr);
1760 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1762 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1764 writel(EE_ENB, ee_addr);
1767 for (i = 16; i > 0; i--) {
1768 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1770 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1771 writel(EE_ENB, ee_addr);
1775 /* Terminate the EEPROM access. */
1776 writel(EE_ENB & ~EE_CS, ee_addr);
1780 static void __init de21041_get_srom_info (struct de_private *de)
1782 unsigned i, sa_offset = 0, ofs;
1783 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1784 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1785 struct de_srom_info_leaf *il;
1788 /* download entire eeprom */
1789 for (i = 0; i < DE_EEPROM_WORDS; i++)
1790 ((u16 *)ee_data)[i] =
1791 le16_to_cpu(tulip_read_eeprom(de->regs, i, ee_addr_size));
1793 /* DEC now has a specification but early board makers
1794 just put the address in the first EEPROM locations. */
1795 /* This does memcmp(eedata, eedata+16, 8) */
1796 for (i = 0; i < 8; i ++)
1797 if (ee_data[i] != ee_data[16+i])
1800 /* store MAC address */
1801 for (i = 0; i < 6; i ++)
1802 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1804 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1805 ofs = ee_data[SROMC0InfoLeaf];
1806 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1809 /* get pointer to info leaf */
1810 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1812 /* paranoia checks */
1813 if (il->n_blocks == 0)
1815 if ((sizeof(ee_data) - ofs) <
1816 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1819 /* get default media type */
1820 switch (DE_UNALIGNED_16(&il->default_media)) {
1821 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1822 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1823 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1824 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1827 if (netif_msg_probe(de))
1828 printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
1830 media_name[de->media_type]);
1832 /* init SIA register values to defaults */
1833 for (i = 0; i < DE_MAX_MEDIA; i++) {
1834 de->media[i].type = DE_MEDIA_INVALID;
1835 de->media[i].csr13 = 0xffff;
1836 de->media[i].csr14 = 0xffff;
1837 de->media[i].csr15 = 0xffff;
1840 /* parse media blocks to see what medias are supported,
1841 * and if any custom CSR values are provided
1843 bufp = ((void *)il) + sizeof(*il);
1844 for (i = 0; i < il->n_blocks; i++) {
1845 struct de_srom_media_block *ib = bufp;
1848 /* index based on media type in media block */
1849 switch(ib->opts & MediaBlockMask) {
1850 case 0: /* 10baseT */
1851 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1852 | SUPPORTED_Autoneg;
1854 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1857 de->media_supported |= SUPPORTED_BNC;
1861 de->media_supported |= SUPPORTED_AUI;
1864 case 4: /* 10baseT-FD */
1865 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1866 | SUPPORTED_Autoneg;
1867 idx = DE_MEDIA_TP_FD;
1868 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1874 de->media[idx].type = idx;
1876 if (netif_msg_probe(de))
1877 printk(KERN_INFO "de%d: media block #%u: %s",
1879 media_name[de->media[idx].type]);
1881 bufp += sizeof (ib->opts);
1883 if (ib->opts & MediaCustomCSRs) {
1884 de->media[idx].csr13 = DE_UNALIGNED_16(&ib->csr13);
1885 de->media[idx].csr14 = DE_UNALIGNED_16(&ib->csr14);
1886 de->media[idx].csr15 = DE_UNALIGNED_16(&ib->csr15);
1887 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1890 if (netif_msg_probe(de))
1891 printk(" (%x,%x,%x)\n",
1892 de->media[idx].csr13,
1893 de->media[idx].csr14,
1894 de->media[idx].csr15);
1896 } else if (netif_msg_probe(de))
1899 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1903 de->media_advertise = de->media_supported;
1906 /* fill in defaults, for cases where custom CSRs not used */
1907 for (i = 0; i < DE_MAX_MEDIA; i++) {
1908 if (de->media[i].csr13 == 0xffff)
1909 de->media[i].csr13 = t21041_csr13[i];
1910 if (de->media[i].csr14 == 0xffff)
1911 de->media[i].csr14 = t21041_csr14[i];
1912 if (de->media[i].csr15 == 0xffff)
1913 de->media[i].csr15 = t21041_csr15[i];
1916 de->ee_data = kmalloc(DE_EEPROM_SIZE, GFP_KERNEL);
1918 memcpy(de->ee_data, &ee_data[0], DE_EEPROM_SIZE);
1923 /* for error cases, it's ok to assume we support all these */
1924 for (i = 0; i < DE_MAX_MEDIA; i++)
1925 de->media[i].type = i;
1926 de->media_supported =
1927 SUPPORTED_10baseT_Half |
1928 SUPPORTED_10baseT_Full |
1936 static int __devinit de_init_one (struct pci_dev *pdev,
1937 const struct pci_device_id *ent)
1939 struct net_device *dev;
1940 struct de_private *de;
1944 static int board_idx = -1;
1950 printk("%s", version);
1953 /* allocate a new ethernet device structure, and fill in defaults */
1954 dev = alloc_etherdev(sizeof(struct de_private));
1958 SET_MODULE_OWNER(dev);
1959 SET_NETDEV_DEV(dev, &pdev->dev);
1960 dev->open = de_open;
1961 dev->stop = de_close;
1962 dev->set_multicast_list = de_set_rx_mode;
1963 dev->hard_start_xmit = de_start_xmit;
1964 dev->get_stats = de_get_stats;
1965 dev->ethtool_ops = &de_ethtool_ops;
1966 dev->tx_timeout = de_tx_timeout;
1967 dev->watchdog_timeo = TX_TIMEOUT;
1969 dev->irq = pdev->irq;
1972 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1975 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1976 de->board_idx = board_idx;
1977 spin_lock_init (&de->lock);
1978 init_timer(&de->media_timer);
1980 de->media_timer.function = de21040_media_timer;
1982 de->media_timer.function = de21041_media_timer;
1983 de->media_timer.data = (unsigned long) de;
1985 netif_carrier_off(dev);
1986 netif_stop_queue(dev);
1988 /* wake up device, assign resources */
1989 rc = pci_enable_device(pdev);
1993 /* reserve PCI resources to ensure driver atomicity */
1994 rc = pci_request_regions(pdev, DRV_NAME);
1996 goto err_out_disable;
1998 /* check for invalid IRQ value */
1999 if (pdev->irq < 2) {
2001 printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
2002 pdev->irq, pci_name(pdev));
2006 /* obtain and check validity of PCI I/O address */
2007 pciaddr = pci_resource_start(pdev, 1);
2010 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
2014 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2016 printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
2017 pci_resource_len(pdev, 1), pci_name(pdev));
2021 /* remap CSR registers */
2022 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2025 printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
2026 pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
2029 dev->base_addr = (unsigned long) regs;
2032 de_adapter_wake(de);
2034 /* make sure hardware is not running */
2035 rc = de_reset_mac(de);
2037 printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
2042 /* get MAC address, initialize default media type and
2043 * get list of supported media
2046 de21040_get_mac_address(de);
2047 de21040_get_media_info(de);
2049 de21041_get_srom_info(de);
2052 /* register new network interface with kernel */
2053 rc = register_netdev(dev);
2057 /* print info about board and interface just registered */
2058 printk (KERN_INFO "%s: %s at 0x%lx, "
2059 "%02x:%02x:%02x:%02x:%02x:%02x, "
2062 de->de21040 ? "21040" : "21041",
2064 dev->dev_addr[0], dev->dev_addr[1],
2065 dev->dev_addr[2], dev->dev_addr[3],
2066 dev->dev_addr[4], dev->dev_addr[5],
2069 pci_set_drvdata(pdev, dev);
2071 /* enable busmastering */
2072 pci_set_master(pdev);
2074 /* put adapter to sleep */
2075 de_adapter_sleep(de);
2084 pci_release_regions(pdev);
2086 pci_disable_device(pdev);
2092 static void __exit de_remove_one (struct pci_dev *pdev)
2094 struct net_device *dev = pci_get_drvdata(pdev);
2095 struct de_private *de = dev->priv;
2099 unregister_netdev(dev);
2103 pci_release_regions(pdev);
2104 pci_disable_device(pdev);
2105 pci_set_drvdata(pdev, NULL);
2111 static int de_suspend (struct pci_dev *pdev, u32 state)
2113 struct net_device *dev = pci_get_drvdata (pdev);
2114 struct de_private *de = dev->priv;
2117 if (netif_running (dev)) {
2118 del_timer_sync(&de->media_timer);
2120 disable_irq(dev->irq);
2121 spin_lock_irq(&de->lock);
2124 netif_stop_queue(dev);
2125 netif_device_detach(dev);
2126 netif_carrier_off(dev);
2128 spin_unlock_irq(&de->lock);
2129 enable_irq(dev->irq);
2131 /* Update the error counts. */
2134 synchronize_irq(dev->irq);
2137 de_adapter_sleep(de);
2138 pci_disable_device(pdev);
2140 netif_device_detach(dev);
2146 static int de_resume (struct pci_dev *pdev)
2148 struct net_device *dev = pci_get_drvdata (pdev);
2149 struct de_private *de = dev->priv;
2152 if (netif_device_present(dev))
2154 if (netif_running(dev)) {
2155 pci_enable_device(pdev);
2157 netif_device_attach(dev);
2159 netif_device_attach(dev);
2166 #endif /* CONFIG_PM */
2168 static struct pci_driver de_driver = {
2170 .id_table = de_pci_tbl,
2171 .probe = de_init_one,
2172 .remove = __exit_p(de_remove_one),
2174 .suspend = de_suspend,
2175 .resume = de_resume,
2179 static int __init de_init (void)
2182 printk("%s", version);
2184 return pci_module_init (&de_driver);
2187 static void __exit de_exit (void)
2189 pci_unregister_driver (&de_driver);
2192 module_init(de_init);
2193 module_exit(de_exit);