1 /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
15 See the file COPYING in this distribution for more information.
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
30 #define DRV_NAME "de2104x"
31 #define DRV_VERSION "0.7"
32 #define DRV_RELDATE "Mar 17, 2004"
34 #include <linux/config.h>
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/ethtool.h>
43 #include <linux/compiler.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/crc32.h>
49 #include <asm/uaccess.h>
50 #include <asm/unaligned.h>
52 /* These identify the driver base version and may not be removed. */
53 static char version[] =
54 KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
56 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
57 MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
58 MODULE_LICENSE("GPL");
59 MODULE_VERSION(DRV_VERSION);
61 static int debug = -1;
62 MODULE_PARM (debug, "i");
63 MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
65 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
66 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
67 || defined(__sparc_) || defined(__ia64__) \
68 || defined(__sh__) || defined(__mips__)
69 static int rx_copybreak = 1518;
71 static int rx_copybreak = 100;
73 MODULE_PARM (rx_copybreak, "i");
74 MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
76 #define PFX DRV_NAME ": "
78 #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
86 #define DE_RX_RING_SIZE 64
87 #define DE_TX_RING_SIZE 64
88 #define DE_RING_BYTES \
89 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
90 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
91 #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
92 #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
93 #define TX_BUFFS_AVAIL(CP) \
94 (((CP)->tx_tail <= (CP)->tx_head) ? \
95 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
96 (CP)->tx_tail - (CP)->tx_head - 1)
98 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
101 #define DE_SETUP_SKB ((struct sk_buff *) 1)
102 #define DE_DUMMY_SKB ((struct sk_buff *) 2)
103 #define DE_SETUP_FRAME_WORDS 96
104 #define DE_EEPROM_WORDS 256
105 #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
106 #define DE_MAX_MEDIA 5
108 #define DE_MEDIA_TP_AUTO 0
109 #define DE_MEDIA_BNC 1
110 #define DE_MEDIA_AUI 2
111 #define DE_MEDIA_TP 3
112 #define DE_MEDIA_TP_FD 4
113 #define DE_MEDIA_INVALID DE_MAX_MEDIA
114 #define DE_MEDIA_FIRST 0
115 #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
116 #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
118 #define DE_TIMER_LINK (60 * HZ)
119 #define DE_TIMER_NO_LINK (5 * HZ)
121 #define DE_NUM_REGS 16
122 #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
123 #define DE_REGS_VER 1
125 /* Time in jiffies before concluding the transmitter is hung. */
126 #define TX_TIMEOUT (6*HZ)
128 #define DE_UNALIGNED_16(a) (u16)(get_unaligned((u16 *)(a)))
130 /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
131 to support a pre-NWay full-duplex signaling mechanism using short frames.
132 No one knows what it should be, but if left at its default value some
133 10base2(!) packets trigger a full-duplex-request interrupt. */
134 #define FULL_DUPLEX_MAGIC 0x6969
157 CacheAlign16 = 0x00008000,
158 BurstLen4 = 0x00000400,
161 NormalTxPoll = (1 << 0),
162 NormalRxPoll = (1 << 0),
164 /* Tx/Rx descriptor status bits */
167 RxErrLong = (1 << 7),
169 RxErrFIFO = (1 << 0),
170 RxErrRunt = (1 << 11),
171 RxErrFrame = (1 << 14),
173 FirstFrag = (1 << 29),
174 LastFrag = (1 << 30),
176 TxFIFOUnder = (1 << 1),
177 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
180 TxJabber = (1 << 14),
181 SetupFrame = (1 << 27),
192 TxState = (1 << 22) | (1 << 21) | (1 << 20),
193 RxState = (1 << 19) | (1 << 18) | (1 << 17),
194 LinkFail = (1 << 12),
196 RxStopped = (1 << 8),
197 TxStopped = (1 << 1),
200 TxEnable = (1 << 13),
202 RxTx = TxEnable | RxEnable,
203 FullDuplex = (1 << 9),
204 AcceptAllMulticast = (1 << 7),
205 AcceptAllPhys = (1 << 6),
207 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
208 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
211 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
212 EE_CS = 0x01, /* EEPROM chip select. */
213 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
216 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
217 EE_ENB = (0x4800 | EE_CS),
219 /* The EEPROM commands include the alway-set leading bit. */
223 RxMissedOver = (1 << 16),
224 RxMissedMask = 0xffff,
226 /* SROM-related bits */
228 MediaBlockMask = 0x3f,
229 MediaCustomCSRs = (1 << 6),
232 PM_Sleep = (1 << 31),
233 PM_Snooze = (1 << 30),
234 PM_Mask = PM_Sleep | PM_Snooze,
237 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
238 NWayRestart = (1 << 12),
239 NonselPortActive = (1 << 9),
240 LinkFailStatus = (1 << 2),
241 NetCxnErr = (1 << 1),
244 static const u32 de_intr_mask =
245 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
246 LinkPass | LinkFail | PciErr;
249 * Set the programmable burst length to 4 longwords for all:
250 * DMA errors result without these values. Cache align 16 long.
252 static const u32 de_bus_mode = CacheAlign16 | BurstLen4;
254 struct de_srom_media_block {
259 } __attribute__((packed));
261 struct de_srom_info_leaf {
265 } __attribute__((packed));
275 u16 type; /* DE_MEDIA_xxx */
292 struct net_device *dev;
295 struct de_desc *rx_ring;
296 struct de_desc *tx_ring;
297 struct ring_info tx_skb[DE_TX_RING_SIZE];
298 struct ring_info rx_skb[DE_RX_RING_SIZE];
304 struct net_device_stats net_stats;
306 struct pci_dev *pdev;
308 u16 setup_frame[DE_SETUP_FRAME_WORDS];
313 struct media_info media[DE_MAX_MEDIA];
314 struct timer_list media_timer;
318 unsigned de21040 : 1;
319 unsigned media_lock : 1;
323 static void de_set_rx_mode (struct net_device *dev);
324 static void de_tx (struct de_private *de);
325 static void de_clean_rings (struct de_private *de);
326 static void de_media_interrupt (struct de_private *de, u32 status);
327 static void de21040_media_timer (unsigned long data);
328 static void de21041_media_timer (unsigned long data);
329 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
332 static struct pci_device_id de_pci_tbl[] = {
333 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
334 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
335 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
336 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
339 MODULE_DEVICE_TABLE(pci, de_pci_tbl);
341 static const char * const media_name[DE_MAX_MEDIA] = {
349 /* 21040 transceiver register settings:
350 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
351 static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
352 static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
353 static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
355 /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
356 static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
357 static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
358 static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
361 #define dr32(reg) readl(de->regs + (reg))
362 #define dw32(reg,val) writel((val), de->regs + (reg))
365 static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
368 if (netif_msg_rx_err (de))
370 "%s: rx err, slot %d status 0x%x len %d\n",
371 de->dev->name, rx_tail, status, len);
373 if ((status & 0x38000300) != 0x0300) {
374 /* Ingore earlier buffers. */
375 if ((status & 0xffff) != 0x7fff) {
376 if (netif_msg_rx_err(de))
377 printk(KERN_WARNING "%s: Oversized Ethernet frame "
378 "spanned multiple buffers, status %8.8x!\n",
379 de->dev->name, status);
380 de->net_stats.rx_length_errors++;
382 } else if (status & RxError) {
383 /* There was a fatal error. */
384 de->net_stats.rx_errors++; /* end of a packet.*/
385 if (status & 0x0890) de->net_stats.rx_length_errors++;
386 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
387 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
391 static void de_rx (struct de_private *de)
393 unsigned rx_tail = de->rx_tail;
394 unsigned rx_work = DE_RX_RING_SIZE;
401 struct sk_buff *skb, *copy_skb;
402 unsigned copying_skb, buflen;
404 skb = de->rx_skb[rx_tail].skb;
408 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
409 if (status & DescOwn)
412 len = ((status >> 16) & 0x7ff) - 4;
413 mapping = de->rx_skb[rx_tail].mapping;
415 if (unlikely(drop)) {
416 de->net_stats.rx_dropped++;
420 if (unlikely((status & 0x38008300) != 0x0300)) {
421 de_rx_err_acct(de, rx_tail, status, len);
425 copying_skb = (len <= rx_copybreak);
427 if (unlikely(netif_msg_rx_status(de)))
428 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
429 de->dev->name, rx_tail, status, len,
432 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
433 copy_skb = dev_alloc_skb (buflen);
434 if (unlikely(!copy_skb)) {
435 de->net_stats.rx_dropped++;
440 copy_skb->dev = de->dev;
443 pci_unmap_single(de->pdev, mapping,
444 buflen, PCI_DMA_FROMDEVICE);
448 de->rx_skb[rx_tail].mapping =
449 pci_map_single(de->pdev, copy_skb->tail,
450 buflen, PCI_DMA_FROMDEVICE);
451 de->rx_skb[rx_tail].skb = copy_skb;
453 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
454 skb_reserve(copy_skb, RX_OFFSET);
455 memcpy(skb_put(copy_skb, len), skb->tail, len);
457 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
459 /* We'll reuse the original ring buffer. */
463 skb->protocol = eth_type_trans (skb, de->dev);
465 de->net_stats.rx_packets++;
466 de->net_stats.rx_bytes += skb->len;
467 de->dev->last_rx = jiffies;
469 if (rc == NET_RX_DROP)
473 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
474 if (rx_tail == (DE_RX_RING_SIZE - 1))
475 de->rx_ring[rx_tail].opts2 =
476 cpu_to_le32(RingEnd | de->rx_buf_sz);
478 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
479 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
480 rx_tail = NEXT_RX(rx_tail);
484 printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
486 de->rx_tail = rx_tail;
489 static irqreturn_t de_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
491 struct net_device *dev = dev_instance;
492 struct de_private *de = dev->priv;
495 status = dr32(MacStatus);
496 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
499 if (netif_msg_intr(de))
500 printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
501 dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
503 dw32(MacStatus, status);
505 if (status & (RxIntr | RxEmpty)) {
507 if (status & RxEmpty)
508 dw32(RxPoll, NormalRxPoll);
511 spin_lock(&de->lock);
513 if (status & (TxIntr | TxEmpty))
516 if (status & (LinkPass | LinkFail))
517 de_media_interrupt(de, status);
519 spin_unlock(&de->lock);
521 if (status & PciErr) {
524 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
525 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
526 printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
527 dev->name, status, pci_status);
533 static void de_tx (struct de_private *de)
535 unsigned tx_head = de->tx_head;
536 unsigned tx_tail = de->tx_tail;
538 while (tx_tail != tx_head) {
543 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
544 if (status & DescOwn)
547 skb = de->tx_skb[tx_tail].skb;
550 if (unlikely(skb == DE_DUMMY_SKB))
553 if (unlikely(skb == DE_SETUP_SKB)) {
554 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
555 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
559 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
560 skb->len, PCI_DMA_TODEVICE);
562 if (status & LastFrag) {
563 if (status & TxError) {
564 if (netif_msg_tx_err(de))
565 printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
566 de->dev->name, status);
567 de->net_stats.tx_errors++;
569 de->net_stats.tx_window_errors++;
570 if (status & TxMaxCol)
571 de->net_stats.tx_aborted_errors++;
572 if (status & TxLinkFail)
573 de->net_stats.tx_carrier_errors++;
574 if (status & TxFIFOUnder)
575 de->net_stats.tx_fifo_errors++;
577 de->net_stats.tx_packets++;
578 de->net_stats.tx_bytes += skb->len;
579 if (netif_msg_tx_done(de))
580 printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
582 dev_kfree_skb_irq(skb);
586 de->tx_skb[tx_tail].skb = NULL;
588 tx_tail = NEXT_TX(tx_tail);
591 de->tx_tail = tx_tail;
593 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
594 netif_wake_queue(de->dev);
597 static int de_start_xmit (struct sk_buff *skb, struct net_device *dev)
599 struct de_private *de = dev->priv;
600 unsigned int entry, tx_free;
601 u32 mapping, len, flags = FirstFrag | LastFrag;
604 spin_lock_irq(&de->lock);
606 tx_free = TX_BUFFS_AVAIL(de);
608 netif_stop_queue(dev);
609 spin_unlock_irq(&de->lock);
616 txd = &de->tx_ring[entry];
619 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
620 if (entry == (DE_TX_RING_SIZE - 1))
622 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
625 txd->opts2 = cpu_to_le32(flags);
626 txd->addr1 = cpu_to_le32(mapping);
628 de->tx_skb[entry].skb = skb;
629 de->tx_skb[entry].mapping = mapping;
632 txd->opts1 = cpu_to_le32(DescOwn);
635 de->tx_head = NEXT_TX(entry);
636 if (netif_msg_tx_queued(de))
637 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
638 dev->name, entry, skb->len);
641 netif_stop_queue(dev);
643 spin_unlock_irq(&de->lock);
645 /* Trigger an immediate transmit demand. */
646 dw32(TxPoll, NormalTxPoll);
647 dev->trans_start = jiffies;
652 /* Set or clear the multicast filter for this adaptor.
653 Note that we only use exclusion around actually queueing the
654 new frame, not around filling de->setup_frame. This is non-deterministic
655 when re-entered but still correct. */
658 #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
660 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
662 struct de_private *de = dev->priv;
664 struct dev_mc_list *mclist;
668 memset(hash_table, 0, sizeof(hash_table));
669 set_bit_le(255, hash_table); /* Broadcast entry */
670 /* This should work on big-endian machines as well. */
671 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
672 i++, mclist = mclist->next) {
673 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
675 set_bit_le(index, hash_table);
677 for (i = 0; i < 32; i++) {
678 *setup_frm++ = hash_table[i];
679 *setup_frm++ = hash_table[i];
681 setup_frm = &de->setup_frame[13*6];
684 /* Fill the final entry with our physical address. */
685 eaddrs = (u16 *)dev->dev_addr;
686 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
687 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
688 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
691 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
693 struct de_private *de = dev->priv;
694 struct dev_mc_list *mclist;
698 /* We have <= 14 addresses so we can use the wonderful
699 16 address perfect filtering of the Tulip. */
700 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
701 i++, mclist = mclist->next) {
702 eaddrs = (u16 *)mclist->dmi_addr;
703 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
704 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
705 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
707 /* Fill the unused entries with the broadcast address. */
708 memset(setup_frm, 0xff, (15-i)*12);
709 setup_frm = &de->setup_frame[15*6];
711 /* Fill the final entry with our physical address. */
712 eaddrs = (u16 *)dev->dev_addr;
713 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
714 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
715 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
719 static void __de_set_rx_mode (struct net_device *dev)
721 struct de_private *de = dev->priv;
726 struct de_desc *dummy_txd = NULL;
728 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
730 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
731 macmode |= AcceptAllMulticast | AcceptAllPhys;
735 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
736 /* Too many to filter well -- accept all multicasts. */
737 macmode |= AcceptAllMulticast;
741 /* Note that only the low-address shortword of setup_frame is valid!
742 The values are doubled for big-endian architectures. */
743 if (dev->mc_count > 14) /* Must use a multicast hash table. */
744 build_setup_frame_hash (de->setup_frame, dev);
746 build_setup_frame_perfect (de->setup_frame, dev);
749 * Now add this frame to the Tx list.
754 /* Avoid a chip errata by prefixing a dummy entry. */
756 de->tx_skb[entry].skb = DE_DUMMY_SKB;
758 dummy_txd = &de->tx_ring[entry];
759 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
760 cpu_to_le32(RingEnd) : 0;
761 dummy_txd->addr1 = 0;
763 /* Must set DescOwned later to avoid race with chip */
765 entry = NEXT_TX(entry);
768 de->tx_skb[entry].skb = DE_SETUP_SKB;
769 de->tx_skb[entry].mapping = mapping =
770 pci_map_single (de->pdev, de->setup_frame,
771 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
773 /* Put the setup frame on the Tx list. */
774 txd = &de->tx_ring[entry];
775 if (entry == (DE_TX_RING_SIZE - 1))
776 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
778 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
779 txd->addr1 = cpu_to_le32(mapping);
782 txd->opts1 = cpu_to_le32(DescOwn);
786 dummy_txd->opts1 = cpu_to_le32(DescOwn);
790 de->tx_head = NEXT_TX(entry);
792 if (TX_BUFFS_AVAIL(de) < 0)
794 if (TX_BUFFS_AVAIL(de) == 0)
795 netif_stop_queue(dev);
797 /* Trigger an immediate transmit demand. */
798 dw32(TxPoll, NormalTxPoll);
801 if (macmode != dr32(MacMode))
802 dw32(MacMode, macmode);
805 static void de_set_rx_mode (struct net_device *dev)
808 struct de_private *de = dev->priv;
810 spin_lock_irqsave (&de->lock, flags);
811 __de_set_rx_mode(dev);
812 spin_unlock_irqrestore (&de->lock, flags);
815 static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
817 if (unlikely(rx_missed & RxMissedOver))
818 de->net_stats.rx_missed_errors += RxMissedMask;
820 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
823 static void __de_get_stats(struct de_private *de)
825 u32 tmp = dr32(RxMissed); /* self-clearing */
827 de_rx_missed(de, tmp);
830 static struct net_device_stats *de_get_stats(struct net_device *dev)
832 struct de_private *de = dev->priv;
834 /* The chip only need report frame silently dropped. */
835 spin_lock_irq(&de->lock);
836 if (netif_running(dev) && netif_device_present(dev))
838 spin_unlock_irq(&de->lock);
840 return &de->net_stats;
843 static inline int de_is_running (struct de_private *de)
845 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
848 static void de_stop_rxtx (struct de_private *de)
851 unsigned int work = 1000;
853 macmode = dr32(MacMode);
854 if (macmode & RxTx) {
855 dw32(MacMode, macmode & ~RxTx);
860 if (!de_is_running(de))
865 printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
868 static inline void de_start_rxtx (struct de_private *de)
872 macmode = dr32(MacMode);
873 if ((macmode & RxTx) != RxTx) {
874 dw32(MacMode, macmode | RxTx);
879 static void de_stop_hw (struct de_private *de)
887 dw32(MacStatus, dr32(MacStatus));
892 de->tx_head = de->tx_tail = 0;
895 static void de_link_up(struct de_private *de)
897 if (!netif_carrier_ok(de->dev)) {
898 netif_carrier_on(de->dev);
899 if (netif_msg_link(de))
900 printk(KERN_INFO "%s: link up, media %s\n",
901 de->dev->name, media_name[de->media_type]);
905 static void de_link_down(struct de_private *de)
907 if (netif_carrier_ok(de->dev)) {
908 netif_carrier_off(de->dev);
909 if (netif_msg_link(de))
910 printk(KERN_INFO "%s: link down\n", de->dev->name);
914 static void de_set_media (struct de_private *de)
916 unsigned media = de->media_type;
917 u32 macmode = dr32(MacMode);
919 if (de_is_running(de))
923 dw32(CSR11, FULL_DUPLEX_MAGIC);
924 dw32(CSR13, 0); /* Reset phy */
925 dw32(CSR14, de->media[media].csr14);
926 dw32(CSR15, de->media[media].csr15);
927 dw32(CSR13, de->media[media].csr13);
929 /* must delay 10ms before writing to other registers,
934 if (media == DE_MEDIA_TP_FD)
935 macmode |= FullDuplex;
937 macmode &= ~FullDuplex;
939 if (netif_msg_link(de)) {
940 printk(KERN_INFO "%s: set link %s\n"
941 KERN_INFO "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
942 KERN_INFO "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
943 de->dev->name, media_name[media],
944 de->dev->name, dr32(MacMode), dr32(SIAStatus),
945 dr32(CSR13), dr32(CSR14), dr32(CSR15),
946 de->dev->name, macmode, de->media[media].csr13,
947 de->media[media].csr14, de->media[media].csr15);
949 if (macmode != dr32(MacMode))
950 dw32(MacMode, macmode);
953 static void de_next_media (struct de_private *de, u32 *media,
954 unsigned int n_media)
958 for (i = 0; i < n_media; i++) {
959 if (de_ok_to_advertise(de, media[i])) {
960 de->media_type = media[i];
966 static void de21040_media_timer (unsigned long data)
968 struct de_private *de = (struct de_private *) data;
969 struct net_device *dev = de->dev;
970 u32 status = dr32(SIAStatus);
971 unsigned int carrier;
974 carrier = (status & NetCxnErr) ? 0 : 1;
977 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
980 de->media_timer.expires = jiffies + DE_TIMER_LINK;
981 add_timer(&de->media_timer);
982 if (!netif_carrier_ok(dev))
985 if (netif_msg_timer(de))
986 printk(KERN_INFO "%s: %s link ok, status %x\n",
987 dev->name, media_name[de->media_type],
997 if (de->media_type == DE_MEDIA_AUI) {
998 u32 next_state = DE_MEDIA_TP;
999 de_next_media(de, &next_state, 1);
1001 u32 next_state = DE_MEDIA_AUI;
1002 de_next_media(de, &next_state, 1);
1005 spin_lock_irqsave(&de->lock, flags);
1007 spin_unlock_irqrestore(&de->lock, flags);
1012 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1013 add_timer(&de->media_timer);
1015 if (netif_msg_timer(de))
1016 printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1017 dev->name, media_name[de->media_type], status);
1020 static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1022 switch (new_media) {
1023 case DE_MEDIA_TP_AUTO:
1024 if (!(de->media_advertise & ADVERTISED_Autoneg))
1026 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1030 if (!(de->media_advertise & ADVERTISED_BNC))
1034 if (!(de->media_advertise & ADVERTISED_AUI))
1038 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1041 case DE_MEDIA_TP_FD:
1042 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1050 static void de21041_media_timer (unsigned long data)
1052 struct de_private *de = (struct de_private *) data;
1053 struct net_device *dev = de->dev;
1054 u32 status = dr32(SIAStatus);
1055 unsigned int carrier;
1056 unsigned long flags;
1058 carrier = (status & NetCxnErr) ? 0 : 1;
1061 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1062 de->media_type == DE_MEDIA_TP ||
1063 de->media_type == DE_MEDIA_TP_FD) &&
1064 (status & LinkFailStatus))
1067 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1068 add_timer(&de->media_timer);
1069 if (!netif_carrier_ok(dev))
1072 if (netif_msg_timer(de))
1073 printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
1074 dev->name, media_name[de->media_type],
1075 dr32(MacMode), status);
1081 /* if media type locked, don't switch media */
1085 /* if activity detected, use that as hint for new media type */
1086 if (status & NonselPortActive) {
1087 unsigned int have_media = 1;
1089 /* if AUI/BNC selected, then activity is on TP port */
1090 if (de->media_type == DE_MEDIA_AUI ||
1091 de->media_type == DE_MEDIA_BNC) {
1092 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1093 de->media_type = DE_MEDIA_TP_AUTO;
1098 /* TP selected. If there is only TP and BNC, then it's BNC */
1099 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1100 de_ok_to_advertise(de, DE_MEDIA_BNC))
1101 de->media_type = DE_MEDIA_BNC;
1103 /* TP selected. If there is only TP and AUI, then it's AUI */
1104 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1105 de_ok_to_advertise(de, DE_MEDIA_AUI))
1106 de->media_type = DE_MEDIA_AUI;
1108 /* otherwise, ignore the hint */
1117 * Absent or ambiguous activity hint, move to next advertised
1118 * media state. If de->media_type is left unchanged, this
1119 * simply resets the PHY and reloads the current media settings.
1121 if (de->media_type == DE_MEDIA_AUI) {
1122 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1123 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1124 } else if (de->media_type == DE_MEDIA_BNC) {
1125 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI };
1126 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1128 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
1129 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1133 spin_lock_irqsave(&de->lock, flags);
1135 spin_unlock_irqrestore(&de->lock, flags);
1140 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1141 add_timer(&de->media_timer);
1143 if (netif_msg_timer(de))
1144 printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
1145 dev->name, media_name[de->media_type], status);
1148 static void de_media_interrupt (struct de_private *de, u32 status)
1150 if (status & LinkPass) {
1152 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1156 if (!(status & LinkFail))
1159 if (netif_carrier_ok(de->dev)) {
1161 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1165 static int de_reset_mac (struct de_private *de)
1170 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1174 if (dr32(BusMode) == 0xffffffff)
1177 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1178 dw32 (BusMode, CmdReset);
1181 dw32 (BusMode, de_bus_mode);
1184 for (tmp = 0; tmp < 5; tmp++) {
1191 status = dr32(MacStatus);
1192 if (status & (RxState | TxState))
1194 if (status == 0xffffffff)
1199 static void de_adapter_wake (struct de_private *de)
1206 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1207 if (pmctl & PM_Mask) {
1209 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1211 /* de4x5.c delays, so we do too */
1212 current->state = TASK_UNINTERRUPTIBLE;
1213 schedule_timeout(msecs_to_jiffies(10));
1217 static void de_adapter_sleep (struct de_private *de)
1224 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1226 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1229 static int de_init_hw (struct de_private *de)
1231 struct net_device *dev = de->dev;
1235 de_adapter_wake(de);
1237 macmode = dr32(MacMode) & ~MacModeClear;
1239 rc = de_reset_mac(de);
1243 de_set_media(de); /* reset phy */
1245 dw32(RxRingAddr, de->ring_dma);
1246 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1248 dw32(MacMode, RxTx | macmode);
1250 dr32(RxMissed); /* self-clearing */
1252 dw32(IntrMask, de_intr_mask);
1254 de_set_rx_mode(dev);
1259 static int de_refill_rx (struct de_private *de)
1263 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1264 struct sk_buff *skb;
1266 skb = dev_alloc_skb(de->rx_buf_sz);
1272 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1273 skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1274 de->rx_skb[i].skb = skb;
1276 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1277 if (i == (DE_RX_RING_SIZE - 1))
1278 de->rx_ring[i].opts2 =
1279 cpu_to_le32(RingEnd | de->rx_buf_sz);
1281 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1282 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1283 de->rx_ring[i].addr2 = 0;
1293 static int de_init_rings (struct de_private *de)
1295 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1296 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1299 de->tx_head = de->tx_tail = 0;
1301 return de_refill_rx (de);
1304 static int de_alloc_rings (struct de_private *de)
1306 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1309 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1310 return de_init_rings(de);
1313 static void de_clean_rings (struct de_private *de)
1317 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1318 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1320 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1321 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1324 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1325 if (de->rx_skb[i].skb) {
1326 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1327 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1328 dev_kfree_skb(de->rx_skb[i].skb);
1332 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1333 struct sk_buff *skb = de->tx_skb[i].skb;
1334 if ((skb) && (skb != DE_DUMMY_SKB)) {
1335 if (skb != DE_SETUP_SKB) {
1337 de->net_stats.tx_dropped++;
1338 pci_unmap_single(de->pdev,
1339 de->tx_skb[i].mapping,
1340 skb->len, PCI_DMA_TODEVICE);
1342 pci_unmap_single(de->pdev,
1343 de->tx_skb[i].mapping,
1344 sizeof(de->setup_frame),
1350 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1351 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1354 static void de_free_rings (struct de_private *de)
1357 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1362 static int de_open (struct net_device *dev)
1364 struct de_private *de = dev->priv;
1366 unsigned long flags;
1368 if (netif_msg_ifup(de))
1369 printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
1371 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1373 rc = de_alloc_rings(de);
1375 printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
1380 rc = de_init_hw(de);
1382 printk(KERN_ERR "%s: h/w init failure, err=%d\n",
1387 rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
1389 printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
1390 dev->name, dev->irq, rc);
1394 netif_start_queue(dev);
1395 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1400 spin_lock_irqsave(&de->lock, flags);
1402 spin_unlock_irqrestore(&de->lock, flags);
1409 static int de_close (struct net_device *dev)
1411 struct de_private *de = dev->priv;
1412 unsigned long flags;
1414 if (netif_msg_ifdown(de))
1415 printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
1417 del_timer_sync(&de->media_timer);
1419 spin_lock_irqsave(&de->lock, flags);
1421 netif_stop_queue(dev);
1422 netif_carrier_off(dev);
1423 spin_unlock_irqrestore(&de->lock, flags);
1425 free_irq(dev->irq, dev);
1428 de_adapter_sleep(de);
1429 pci_disable_device(de->pdev);
1433 static void de_tx_timeout (struct net_device *dev)
1435 struct de_private *de = dev->priv;
1437 printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1438 dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1439 de->rx_tail, de->tx_head, de->tx_tail);
1441 del_timer_sync(&de->media_timer);
1443 disable_irq(dev->irq);
1444 spin_lock_irq(&de->lock);
1447 netif_stop_queue(dev);
1448 netif_carrier_off(dev);
1450 spin_unlock_irq(&de->lock);
1451 enable_irq(dev->irq);
1453 /* Update the error counts. */
1456 synchronize_irq(dev->irq);
1461 netif_wake_queue(dev);
1464 static void __de_get_regs(struct de_private *de, u8 *buf)
1467 u32 *rbuf = (u32 *)buf;
1470 for (i = 0; i < DE_NUM_REGS; i++)
1471 rbuf[i] = dr32(i * 8);
1473 /* handle self-clearing RxMissed counter, CSR8 */
1474 de_rx_missed(de, rbuf[8]);
1477 static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1479 ecmd->supported = de->media_supported;
1480 ecmd->transceiver = XCVR_INTERNAL;
1481 ecmd->phy_address = 0;
1482 ecmd->advertising = de->media_advertise;
1484 switch (de->media_type) {
1486 ecmd->port = PORT_AUI;
1490 ecmd->port = PORT_BNC;
1494 ecmd->port = PORT_TP;
1495 ecmd->speed = SPEED_10;
1499 if (dr32(MacMode) & FullDuplex)
1500 ecmd->duplex = DUPLEX_FULL;
1502 ecmd->duplex = DUPLEX_HALF;
1505 ecmd->autoneg = AUTONEG_DISABLE;
1507 ecmd->autoneg = AUTONEG_ENABLE;
1509 /* ignore maxtxpkt, maxrxpkt for now */
1514 static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1517 unsigned int media_lock;
1519 if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
1521 if (de->de21040 && ecmd->speed == 2)
1523 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1525 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1527 if (de->de21040 && ecmd->port == PORT_BNC)
1529 if (ecmd->transceiver != XCVR_INTERNAL)
1531 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1533 if (ecmd->advertising & ~de->media_supported)
1535 if (ecmd->autoneg == AUTONEG_ENABLE &&
1536 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1539 switch (ecmd->port) {
1541 new_media = DE_MEDIA_AUI;
1542 if (!(ecmd->advertising & ADVERTISED_AUI))
1546 new_media = DE_MEDIA_BNC;
1547 if (!(ecmd->advertising & ADVERTISED_BNC))
1551 if (ecmd->autoneg == AUTONEG_ENABLE)
1552 new_media = DE_MEDIA_TP_AUTO;
1553 else if (ecmd->duplex == DUPLEX_FULL)
1554 new_media = DE_MEDIA_TP_FD;
1556 new_media = DE_MEDIA_TP;
1557 if (!(ecmd->advertising & ADVERTISED_TP))
1559 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1564 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1566 if ((new_media == de->media_type) &&
1567 (media_lock == de->media_lock) &&
1568 (ecmd->advertising == de->media_advertise))
1569 return 0; /* nothing to change */
1574 de->media_type = new_media;
1575 de->media_lock = media_lock;
1576 de->media_advertise = ecmd->advertising;
1582 static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1584 struct de_private *de = dev->priv;
1586 strcpy (info->driver, DRV_NAME);
1587 strcpy (info->version, DRV_VERSION);
1588 strcpy (info->bus_info, pci_name(de->pdev));
1589 info->eedump_len = DE_EEPROM_SIZE;
1592 static int de_get_regs_len(struct net_device *dev)
1594 return DE_REGS_SIZE;
1597 static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1599 struct de_private *de = dev->priv;
1602 spin_lock_irq(&de->lock);
1603 rc = __de_get_settings(de, ecmd);
1604 spin_unlock_irq(&de->lock);
1609 static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1611 struct de_private *de = dev->priv;
1614 spin_lock_irq(&de->lock);
1615 rc = __de_set_settings(de, ecmd);
1616 spin_unlock_irq(&de->lock);
1621 static u32 de_get_msglevel(struct net_device *dev)
1623 struct de_private *de = dev->priv;
1625 return de->msg_enable;
1628 static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1630 struct de_private *de = dev->priv;
1632 de->msg_enable = msglvl;
1635 static int de_get_eeprom(struct net_device *dev,
1636 struct ethtool_eeprom *eeprom, u8 *data)
1638 struct de_private *de = dev->priv;
1642 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1643 (eeprom->len != DE_EEPROM_SIZE))
1645 memcpy(data, de->ee_data, eeprom->len);
1650 static int de_nway_reset(struct net_device *dev)
1652 struct de_private *de = dev->priv;
1655 if (de->media_type != DE_MEDIA_TP_AUTO)
1657 if (netif_carrier_ok(de->dev))
1660 status = dr32(SIAStatus);
1661 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1662 if (netif_msg_link(de))
1663 printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
1664 de->dev->name, status, dr32(SIAStatus));
1668 static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1671 struct de_private *de = dev->priv;
1673 regs->version = (DE_REGS_VER << 2) | de->de21040;
1675 spin_lock_irq(&de->lock);
1676 __de_get_regs(de, data);
1677 spin_unlock_irq(&de->lock);
1680 static struct ethtool_ops de_ethtool_ops = {
1681 .get_link = ethtool_op_get_link,
1682 .get_tx_csum = ethtool_op_get_tx_csum,
1683 .get_sg = ethtool_op_get_sg,
1684 .get_drvinfo = de_get_drvinfo,
1685 .get_regs_len = de_get_regs_len,
1686 .get_settings = de_get_settings,
1687 .set_settings = de_set_settings,
1688 .get_msglevel = de_get_msglevel,
1689 .set_msglevel = de_set_msglevel,
1690 .get_eeprom = de_get_eeprom,
1691 .nway_reset = de_nway_reset,
1692 .get_regs = de_get_regs,
1695 static void __init de21040_get_mac_address (struct de_private *de)
1699 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
1701 for (i = 0; i < 6; i++) {
1702 int value, boguscnt = 100000;
1704 value = dr32(ROMCmd);
1705 while (value < 0 && --boguscnt > 0);
1706 de->dev->dev_addr[i] = value;
1708 printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
1712 static void __init de21040_get_media_info(struct de_private *de)
1716 de->media_type = DE_MEDIA_TP;
1717 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1718 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1719 de->media_advertise = de->media_supported;
1721 for (i = 0; i < DE_MAX_MEDIA; i++) {
1725 case DE_MEDIA_TP_FD:
1726 de->media[i].type = i;
1727 de->media[i].csr13 = t21040_csr13[i];
1728 de->media[i].csr14 = t21040_csr14[i];
1729 de->media[i].csr15 = t21040_csr15[i];
1732 de->media[i].type = DE_MEDIA_INVALID;
1738 /* Note: this routine returns extra data bits for size detection. */
1739 static unsigned __init tulip_read_eeprom(void *regs, int location, int addr_len)
1742 unsigned retval = 0;
1743 void *ee_addr = regs + ROMCmd;
1744 int read_cmd = location | (EE_READ_CMD << addr_len);
1746 writel(EE_ENB & ~EE_CS, ee_addr);
1747 writel(EE_ENB, ee_addr);
1749 /* Shift the read command bits out. */
1750 for (i = 4 + addr_len; i >= 0; i--) {
1751 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1752 writel(EE_ENB | dataval, ee_addr);
1754 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1756 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1758 writel(EE_ENB, ee_addr);
1761 for (i = 16; i > 0; i--) {
1762 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1764 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1765 writel(EE_ENB, ee_addr);
1769 /* Terminate the EEPROM access. */
1770 writel(EE_ENB & ~EE_CS, ee_addr);
1774 static void __init de21041_get_srom_info (struct de_private *de)
1776 unsigned i, sa_offset = 0, ofs;
1777 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1778 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1779 struct de_srom_info_leaf *il;
1782 /* download entire eeprom */
1783 for (i = 0; i < DE_EEPROM_WORDS; i++)
1784 ((u16 *)ee_data)[i] =
1785 le16_to_cpu(tulip_read_eeprom(de->regs, i, ee_addr_size));
1787 /* DEC now has a specification but early board makers
1788 just put the address in the first EEPROM locations. */
1789 /* This does memcmp(eedata, eedata+16, 8) */
1790 for (i = 0; i < 8; i ++)
1791 if (ee_data[i] != ee_data[16+i])
1794 /* store MAC address */
1795 for (i = 0; i < 6; i ++)
1796 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1798 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1799 ofs = ee_data[SROMC0InfoLeaf];
1800 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1803 /* get pointer to info leaf */
1804 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1806 /* paranoia checks */
1807 if (il->n_blocks == 0)
1809 if ((sizeof(ee_data) - ofs) <
1810 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1813 /* get default media type */
1814 switch (DE_UNALIGNED_16(&il->default_media)) {
1815 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1816 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1817 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1818 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1821 if (netif_msg_probe(de))
1822 printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
1824 media_name[de->media_type]);
1826 /* init SIA register values to defaults */
1827 for (i = 0; i < DE_MAX_MEDIA; i++) {
1828 de->media[i].type = DE_MEDIA_INVALID;
1829 de->media[i].csr13 = 0xffff;
1830 de->media[i].csr14 = 0xffff;
1831 de->media[i].csr15 = 0xffff;
1834 /* parse media blocks to see what medias are supported,
1835 * and if any custom CSR values are provided
1837 bufp = ((void *)il) + sizeof(*il);
1838 for (i = 0; i < il->n_blocks; i++) {
1839 struct de_srom_media_block *ib = bufp;
1842 /* index based on media type in media block */
1843 switch(ib->opts & MediaBlockMask) {
1844 case 0: /* 10baseT */
1845 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1846 | SUPPORTED_Autoneg;
1848 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1851 de->media_supported |= SUPPORTED_BNC;
1855 de->media_supported |= SUPPORTED_AUI;
1858 case 4: /* 10baseT-FD */
1859 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1860 | SUPPORTED_Autoneg;
1861 idx = DE_MEDIA_TP_FD;
1862 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1868 de->media[idx].type = idx;
1870 if (netif_msg_probe(de))
1871 printk(KERN_INFO "de%d: media block #%u: %s",
1873 media_name[de->media[idx].type]);
1875 bufp += sizeof (ib->opts);
1877 if (ib->opts & MediaCustomCSRs) {
1878 de->media[idx].csr13 = DE_UNALIGNED_16(&ib->csr13);
1879 de->media[idx].csr14 = DE_UNALIGNED_16(&ib->csr14);
1880 de->media[idx].csr15 = DE_UNALIGNED_16(&ib->csr15);
1881 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1884 if (netif_msg_probe(de))
1885 printk(" (%x,%x,%x)\n",
1886 de->media[idx].csr13,
1887 de->media[idx].csr14,
1888 de->media[idx].csr15);
1890 } else if (netif_msg_probe(de))
1893 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1897 de->media_advertise = de->media_supported;
1900 /* fill in defaults, for cases where custom CSRs not used */
1901 for (i = 0; i < DE_MAX_MEDIA; i++) {
1902 if (de->media[i].csr13 == 0xffff)
1903 de->media[i].csr13 = t21041_csr13[i];
1904 if (de->media[i].csr14 == 0xffff)
1905 de->media[i].csr14 = t21041_csr14[i];
1906 if (de->media[i].csr15 == 0xffff)
1907 de->media[i].csr15 = t21041_csr15[i];
1910 de->ee_data = kmalloc(DE_EEPROM_SIZE, GFP_KERNEL);
1912 memcpy(de->ee_data, &ee_data[0], DE_EEPROM_SIZE);
1917 /* for error cases, it's ok to assume we support all these */
1918 for (i = 0; i < DE_MAX_MEDIA; i++)
1919 de->media[i].type = i;
1920 de->media_supported =
1921 SUPPORTED_10baseT_Half |
1922 SUPPORTED_10baseT_Full |
1930 static int __devinit de_init_one (struct pci_dev *pdev,
1931 const struct pci_device_id *ent)
1933 struct net_device *dev;
1934 struct de_private *de;
1938 static int board_idx = -1;
1944 printk("%s", version);
1947 /* allocate a new ethernet device structure, and fill in defaults */
1948 dev = alloc_etherdev(sizeof(struct de_private));
1952 SET_MODULE_OWNER(dev);
1953 SET_NETDEV_DEV(dev, &pdev->dev);
1954 dev->open = de_open;
1955 dev->stop = de_close;
1956 dev->set_multicast_list = de_set_rx_mode;
1957 dev->hard_start_xmit = de_start_xmit;
1958 dev->get_stats = de_get_stats;
1959 dev->ethtool_ops = &de_ethtool_ops;
1960 dev->tx_timeout = de_tx_timeout;
1961 dev->watchdog_timeo = TX_TIMEOUT;
1963 dev->irq = pdev->irq;
1966 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1969 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
1970 de->board_idx = board_idx;
1971 spin_lock_init (&de->lock);
1972 init_timer(&de->media_timer);
1974 de->media_timer.function = de21040_media_timer;
1976 de->media_timer.function = de21041_media_timer;
1977 de->media_timer.data = (unsigned long) de;
1979 netif_carrier_off(dev);
1980 netif_stop_queue(dev);
1982 /* wake up device, assign resources */
1983 rc = pci_enable_device(pdev);
1987 /* reserve PCI resources to ensure driver atomicity */
1988 rc = pci_request_regions(pdev, DRV_NAME);
1990 goto err_out_disable;
1992 /* check for invalid IRQ value */
1993 if (pdev->irq < 2) {
1995 printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
1996 pdev->irq, pci_name(pdev));
2000 /* obtain and check validity of PCI I/O address */
2001 pciaddr = pci_resource_start(pdev, 1);
2004 printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
2008 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2010 printk(KERN_ERR PFX "MMIO resource (%lx) too small on pci dev %s\n",
2011 pci_resource_len(pdev, 1), pci_name(pdev));
2015 /* remap CSR registers */
2016 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2019 printk(KERN_ERR PFX "Cannot map PCI MMIO (%lx@%lx) on pci dev %s\n",
2020 pci_resource_len(pdev, 1), pciaddr, pci_name(pdev));
2023 dev->base_addr = (unsigned long) regs;
2026 de_adapter_wake(de);
2028 /* make sure hardware is not running */
2029 rc = de_reset_mac(de);
2031 printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
2036 /* get MAC address, initialize default media type and
2037 * get list of supported media
2040 de21040_get_mac_address(de);
2041 de21040_get_media_info(de);
2043 de21041_get_srom_info(de);
2046 /* register new network interface with kernel */
2047 rc = register_netdev(dev);
2051 /* print info about board and interface just registered */
2052 printk (KERN_INFO "%s: %s at 0x%lx, "
2053 "%02x:%02x:%02x:%02x:%02x:%02x, "
2056 de->de21040 ? "21040" : "21041",
2058 dev->dev_addr[0], dev->dev_addr[1],
2059 dev->dev_addr[2], dev->dev_addr[3],
2060 dev->dev_addr[4], dev->dev_addr[5],
2063 pci_set_drvdata(pdev, dev);
2065 /* enable busmastering */
2066 pci_set_master(pdev);
2068 /* put adapter to sleep */
2069 de_adapter_sleep(de);
2078 pci_release_regions(pdev);
2080 pci_disable_device(pdev);
2086 static void __exit de_remove_one (struct pci_dev *pdev)
2088 struct net_device *dev = pci_get_drvdata(pdev);
2089 struct de_private *de = dev->priv;
2093 unregister_netdev(dev);
2097 pci_release_regions(pdev);
2098 pci_disable_device(pdev);
2099 pci_set_drvdata(pdev, NULL);
2105 static int de_suspend (struct pci_dev *pdev, u32 state)
2107 struct net_device *dev = pci_get_drvdata (pdev);
2108 struct de_private *de = dev->priv;
2111 if (netif_running (dev)) {
2112 del_timer_sync(&de->media_timer);
2114 disable_irq(dev->irq);
2115 spin_lock_irq(&de->lock);
2118 netif_stop_queue(dev);
2119 netif_device_detach(dev);
2120 netif_carrier_off(dev);
2122 spin_unlock_irq(&de->lock);
2123 enable_irq(dev->irq);
2125 /* Update the error counts. */
2128 synchronize_irq(dev->irq);
2131 de_adapter_sleep(de);
2132 pci_disable_device(pdev);
2134 netif_device_detach(dev);
2140 static int de_resume (struct pci_dev *pdev)
2142 struct net_device *dev = pci_get_drvdata (pdev);
2143 struct de_private *de = dev->priv;
2146 if (netif_device_present(dev))
2148 if (netif_running(dev)) {
2149 pci_enable_device(pdev);
2151 netif_device_attach(dev);
2153 netif_device_attach(dev);
2160 #endif /* CONFIG_PM */
2162 static struct pci_driver de_driver = {
2164 .id_table = de_pci_tbl,
2165 .probe = de_init_one,
2166 .remove = __exit_p(de_remove_one),
2168 .suspend = de_suspend,
2169 .resume = de_resume,
2173 static int __init de_init (void)
2176 printk("%s", version);
2178 return pci_module_init (&de_driver);
2181 static void __exit de_exit (void)
2183 pci_unregister_driver (&de_driver);
2186 module_init(de_init);
2187 module_exit(de_exit);