2 Written 1998-2000 by Donald Becker.
4 This software may be used and distributed according to the terms of
5 the GNU General Public License (GPL), incorporated herein by reference.
6 Drivers based on or derived from this code fall under the GPL and must
7 retain the authorship, copyright and license notice. This file is not
8 a complete program and may only be used when the entire operating
9 system is licensed under the GPL.
11 The author may be reached as becker@scyld.com, or C/O
12 Scyld Computing Corporation
13 410 Severn Ave., Suite 210
16 Support information and updates available at
17 http://www.scyld.com/network/pci-skeleton.html
21 Version 2.51, Nov 17, 2001 (jgarzik):
23 - Replace some MII-related magic numbers with constants
27 #define DRV_NAME "fealnx"
28 #define DRV_VERSION "2.51"
29 #define DRV_RELDATE "Nov-17-2001"
31 static int debug; /* 1-> print debug message */
32 static int max_interrupt_work = 20;
34 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
35 static int multicast_filter_limit = 32;
37 /* Set the copy breakpoint for the copy-only-tiny-frames scheme. */
38 /* Setting to > 1518 effectively disables this feature. */
39 static int rx_copybreak;
41 /* Used to pass the media type, etc. */
42 /* Both 'options[]' and 'full_duplex[]' should exist for driver */
43 /* interoperability. */
44 /* The media type is usually passed in 'options[]'. */
45 #define MAX_UNITS 8 /* More are supported, limit only on options */
46 static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
47 static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
49 /* Operational parameters that are set at compile time. */
50 /* Keep the ring sizes a power of two for compile efficiency. */
51 /* The compiler will convert <unsigned>'%'<2^N> into a bit mask. */
52 /* Making the Tx ring too large decreases the effectiveness of channel */
53 /* bonding and packet priority. */
54 /* There are no ill effects from too-large receive rings. */
56 // #define TX_RING_SIZE 16
57 // #define RX_RING_SIZE 32
58 #define TX_RING_SIZE 6
59 #define RX_RING_SIZE 12
60 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct fealnx_desc)
61 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct fealnx_desc)
63 /* Operational parameters that usually are not changed. */
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (2*HZ)
67 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
70 /* Include files, designed to support most kernel versions 2.0.0 and later. */
71 #include <linux/module.h>
72 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/timer.h>
75 #include <linux/errno.h>
76 #include <linux/ioport.h>
77 #include <linux/slab.h>
78 #include <linux/interrupt.h>
79 #include <linux/pci.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/init.h>
84 #include <linux/mii.h>
85 #include <linux/ethtool.h>
86 #include <linux/crc32.h>
88 #include <asm/processor.h> /* Processor type for cache alignment. */
89 #include <asm/bitops.h>
91 #include <asm/uaccess.h>
93 /* These identify the driver base version and may not be removed. */
94 static char version[] __devinitdata =
95 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
98 /* This driver was written to use PCI memory space, however some x86 systems
99 work only with I/O space accesses. */
119 /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. */
120 /* This is only in the support-all-kernels source code. */
122 #define RUN_AT(x) (jiffies + (x))
124 MODULE_AUTHOR("Myson or whoever");
125 MODULE_DESCRIPTION("Myson MTD-8xx 100/10M Ethernet PCI Adapter Driver");
126 MODULE_LICENSE("GPL");
127 MODULE_PARM(max_interrupt_work, "i");
128 //MODULE_PARM(min_pci_latency, "i");
129 MODULE_PARM(debug, "i");
130 MODULE_PARM(rx_copybreak, "i");
131 MODULE_PARM(multicast_filter_limit, "i");
132 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
133 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
134 MODULE_PARM_DESC(max_interrupt_work, "fealnx maximum events handled per interrupt");
135 MODULE_PARM_DESC(debug, "fealnx enable debugging (0-1)");
136 MODULE_PARM_DESC(rx_copybreak, "fealnx copy breakpoint for copy-only-tiny-frames");
137 MODULE_PARM_DESC(multicast_filter_limit, "fealnx maximum number of filtered multicast addresses");
138 MODULE_PARM_DESC(options, "fealnx: Bits 0-3: media type, bit 17: full duplex");
139 MODULE_PARM_DESC(full_duplex, "fealnx full duplex setting(s) (1)");
141 #define MIN_REGION_SIZE 136
147 PCI_ADDR0 = 0x10 << 0,
148 PCI_ADDR1 = 0x10 << 1,
149 PCI_ADDR2 = 0x10 << 2,
150 PCI_ADDR3 = 0x10 << 3,
153 /* A chip capabilities table, matching the entries in pci_tbl[] above. */
154 enum chip_capability_flags {
160 /* for different PHY */
161 enum phy_type_flags {
177 static struct chip_info skel_netdrv_tbl[] = {
178 {"100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
179 {"100/10M Ethernet PCI Adapter", 136, HAS_CHIP_XCVR},
180 {"1000/100/10M Ethernet PCI Adapter", 136, HAS_MII_XCVR},
183 /* Offsets to the Command and Status Registers. */
184 enum fealnx_offsets {
185 PAR0 = 0x0, /* physical address 0-3 */
186 PAR1 = 0x04, /* physical address 4-5 */
187 MAR0 = 0x08, /* multicast address 0-3 */
188 MAR1 = 0x0C, /* multicast address 4-7 */
189 FAR0 = 0x10, /* flow-control address 0-3 */
190 FAR1 = 0x14, /* flow-control address 4-5 */
191 TCRRCR = 0x18, /* receive & transmit configuration */
192 BCR = 0x1C, /* bus command */
193 TXPDR = 0x20, /* transmit polling demand */
194 RXPDR = 0x24, /* receive polling demand */
195 RXCWP = 0x28, /* receive current word pointer */
196 TXLBA = 0x2C, /* transmit list base address */
197 RXLBA = 0x30, /* receive list base address */
198 ISR = 0x34, /* interrupt status */
199 IMR = 0x38, /* interrupt mask */
200 FTH = 0x3C, /* flow control high/low threshold */
201 MANAGEMENT = 0x40, /* bootrom/eeprom and mii management */
202 TALLY = 0x44, /* tally counters for crc and mpa */
203 TSR = 0x48, /* tally counter for transmit status */
204 BMCRSR = 0x4c, /* basic mode control and status */
205 PHYIDENTIFIER = 0x50, /* phy identifier */
206 ANARANLPAR = 0x54, /* auto-negotiation advertisement and link
208 ANEROCR = 0x58, /* auto-negotiation expansion and pci conf. */
209 BPREMRPSR = 0x5c, /* bypass & receive error mask and phy status */
212 /* Bits in the interrupt status/enable registers. */
213 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
214 enum intr_status_bits {
215 RFCON = 0x00020000, /* receive flow control xon packet */
216 RFCOFF = 0x00010000, /* receive flow control xoff packet */
217 LSCStatus = 0x00008000, /* link status change */
218 ANCStatus = 0x00004000, /* autonegotiation completed */
219 FBE = 0x00002000, /* fatal bus error */
220 FBEMask = 0x00001800, /* mask bit12-11 */
221 ParityErr = 0x00000000, /* parity error */
222 TargetErr = 0x00001000, /* target abort */
223 MasterErr = 0x00000800, /* master error */
224 TUNF = 0x00000400, /* transmit underflow */
225 ROVF = 0x00000200, /* receive overflow */
226 ETI = 0x00000100, /* transmit early int */
227 ERI = 0x00000080, /* receive early int */
228 CNTOVF = 0x00000040, /* counter overflow */
229 RBU = 0x00000020, /* receive buffer unavailable */
230 TBU = 0x00000010, /* transmit buffer unavilable */
231 TI = 0x00000008, /* transmit interrupt */
232 RI = 0x00000004, /* receive interrupt */
233 RxErr = 0x00000002, /* receive error */
236 /* Bits in the NetworkConfig register. */
239 PROM = 0x80, /* promiscuous mode */
240 AB = 0x40, /* accept broadcast */
241 AM = 0x20, /* accept mutlicast */
242 ARP = 0x08, /* receive runt pkt */
243 ALP = 0x04, /* receive long pkt */
244 SEP = 0x02, /* receive error pkt */
247 /* The Tulip Rx and Tx buffer descriptors. */
253 struct fealnx_desc *next_desc_logical;
254 struct sk_buff *skbuff;
259 /* Bits in network_desc.status */
260 enum rx_desc_status_bits {
261 RXOWN = 0x80000000, /* own bit */
262 FLNGMASK = 0x0fff0000, /* frame length */
264 MARSTATUS = 0x00004000, /* multicast address received */
265 BARSTATUS = 0x00002000, /* broadcast address received */
266 PHYSTATUS = 0x00001000, /* physical address received */
267 RXFSD = 0x00000800, /* first descriptor */
268 RXLSD = 0x00000400, /* last descriptor */
269 ErrorSummary = 0x80, /* error summary */
270 RUNT = 0x40, /* runt packet received */
271 LONG = 0x20, /* long packet received */
272 FAE = 0x10, /* frame align error */
273 CRC = 0x08, /* crc error */
274 RXER = 0x04, /* receive error */
277 enum rx_desc_control_bits {
278 RXIC = 0x00800000, /* interrupt control */
282 enum tx_desc_status_bits {
283 TXOWN = 0x80000000, /* own bit */
284 JABTO = 0x00004000, /* jabber timeout */
285 CSL = 0x00002000, /* carrier sense lost */
286 LC = 0x00001000, /* late collision */
287 EC = 0x00000800, /* excessive collision */
288 UDF = 0x00000400, /* fifo underflow */
289 DFR = 0x00000200, /* deferred */
290 HF = 0x00000100, /* heartbeat fail */
291 NCRMask = 0x000000ff, /* collision retry count */
295 enum tx_desc_control_bits {
296 TXIC = 0x80000000, /* interrupt control */
297 ETIControl = 0x40000000, /* early transmit interrupt */
298 TXLD = 0x20000000, /* last descriptor */
299 TXFD = 0x10000000, /* first descriptor */
300 CRCEnable = 0x08000000, /* crc control */
301 PADEnable = 0x04000000, /* padding control */
302 RetryTxLC = 0x02000000, /* retry late collision */
303 PKTSMask = 0x3ff800, /* packet size bit21-11 */
305 TBSMask = 0x000007ff, /* transmit buffer bit 10-0 */
309 /* BootROM/EEPROM/MII Management Register */
310 #define MASK_MIIR_MII_READ 0x00000000
311 #define MASK_MIIR_MII_WRITE 0x00000008
312 #define MASK_MIIR_MII_MDO 0x00000004
313 #define MASK_MIIR_MII_MDI 0x00000002
314 #define MASK_MIIR_MII_MDC 0x00000001
316 /* ST+OP+PHYAD+REGAD+TA */
317 #define OP_READ 0x6000 /* ST:01+OP:10+PHYAD+REGAD+TA:Z0 */
318 #define OP_WRITE 0x5002 /* ST:01+OP:01+PHYAD+REGAD+TA:10 */
320 /* ------------------------------------------------------------------------- */
321 /* Constants for Myson PHY */
322 /* ------------------------------------------------------------------------- */
323 #define MysonPHYID 0xd0000302
324 /* 89-7-27 add, (begin) */
325 #define MysonPHYID0 0x0302
326 #define StatusRegister 18
327 #define SPEED100 0x0400 // bit10
328 #define FULLMODE 0x0800 // bit11
329 /* 89-7-27 add, (end) */
331 /* ------------------------------------------------------------------------- */
332 /* Constants for Seeq 80225 PHY */
333 /* ------------------------------------------------------------------------- */
334 #define SeeqPHYID0 0x0016
336 #define MIIRegister18 18
337 #define SPD_DET_100 0x80
338 #define DPLX_DET_FULL 0x40
340 /* ------------------------------------------------------------------------- */
341 /* Constants for Ahdoc 101 PHY */
342 /* ------------------------------------------------------------------------- */
343 #define AhdocPHYID0 0x0022
345 #define DiagnosticReg 18
346 #define DPLX_FULL 0x0800
347 #define Speed_100 0x0400
350 /* -------------------------------------------------------------------------- */
352 /* -------------------------------------------------------------------------- */
353 #define MarvellPHYID0 0x0141
354 #define LevelOnePHYID0 0x0013
356 #define MII1000BaseTControlReg 9
357 #define MII1000BaseTStatusReg 10
358 #define SpecificReg 17
360 /* for 1000BaseT Control Register */
361 #define PHYAbletoPerform1000FullDuplex 0x0200
362 #define PHYAbletoPerform1000HalfDuplex 0x0100
363 #define PHY1000AbilityMask 0x300
365 // for phy specific status register, marvell phy.
366 #define SpeedMask 0x0c000
367 #define Speed_1000M 0x08000
368 #define Speed_100M 0x4000
370 #define Full_Duplex 0x2000
372 // 89/12/29 add, for phy specific status register, levelone phy, (begin)
373 #define LXT1000_100M 0x08000
374 #define LXT1000_1000M 0x0c000
375 #define LXT1000_Full 0x200
376 // 89/12/29 add, for phy specific status register, levelone phy, (end)
378 /* for 3-in-1 case */
379 #define PS10 0x00080000
380 #define FD 0x00100000
381 #define PS1000 0x00010000
382 #define LinkIsUp2 0x00040000
385 #define LinkIsUp 0x0004
388 struct netdev_private {
389 /* Descriptor rings first for alignment. */
390 struct fealnx_desc *rx_ring;
391 struct fealnx_desc *tx_ring;
393 dma_addr_t rx_ring_dma;
394 dma_addr_t tx_ring_dma;
398 struct net_device_stats stats;
400 /* Media monitoring timer. */
401 struct timer_list timer;
403 /* Frequently used values: keep some adjacent for cache effect. */
405 struct pci_dev *pci_dev;
406 unsigned long crvalue;
407 unsigned long bcrvalue;
408 unsigned long imrvalue;
409 struct fealnx_desc *cur_rx;
410 struct fealnx_desc *lack_rxbuf;
412 struct fealnx_desc *cur_tx;
413 struct fealnx_desc *cur_tx_copy;
416 unsigned int rx_buf_sz; /* Based on MTU+slack. */
418 /* These values are keep track of the transceiver/media in use. */
420 unsigned int line_speed;
421 unsigned int duplexmode;
422 unsigned int default_port:4; /* Last dev->if_port value. */
423 unsigned int PHYType;
425 /* MII transceiver section. */
426 int mii_cnt; /* MII device addresses. */
427 unsigned char phys[2]; /* MII device addresses. */
428 struct mii_if_info mii;
432 static int mdio_read(struct net_device *dev, int phy_id, int location);
433 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
434 static int netdev_open(struct net_device *dev);
435 static void getlinktype(struct net_device *dev);
436 static void getlinkstatus(struct net_device *dev);
437 static void netdev_timer(unsigned long data);
438 static void tx_timeout(struct net_device *dev);
439 static void init_ring(struct net_device *dev);
440 static int start_tx(struct sk_buff *skb, struct net_device *dev);
441 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
442 static int netdev_rx(struct net_device *dev);
443 static void set_rx_mode(struct net_device *dev);
444 static struct net_device_stats *get_stats(struct net_device *dev);
445 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
446 static struct ethtool_ops netdev_ethtool_ops;
447 static int netdev_close(struct net_device *dev);
448 static void reset_rx_descriptors(struct net_device *dev);
450 void stop_nic_tx(long ioaddr, long crvalue)
452 writel(crvalue & (~0x40000), ioaddr + TCRRCR);
454 /* wait for tx stop */
456 int i = 0, delay = 0x1000;
458 while ((!(readl(ioaddr + TCRRCR) & 0x04000000)) && (i < delay)) {
465 void stop_nic_rx(long ioaddr, long crvalue)
467 writel(crvalue & (~0x1), ioaddr + TCRRCR);
469 /* wait for rx stop */
471 int i = 0, delay = 0x1000;
473 while ((!(readl(ioaddr + TCRRCR) & 0x00008000)) && (i < delay)) {
481 static int __devinit fealnx_init_one(struct pci_dev *pdev,
482 const struct pci_device_id *ent)
484 struct netdev_private *np;
485 int i, option, err, irq;
486 static int card_idx = -1;
489 unsigned int chip_id = ent->driver_data;
490 struct net_device *dev;
494 /* when built into the kernel, we only print version if device is found */
496 static int printed_version;
497 if (!printed_version++)
502 sprintf(boardname, "fealnx%d", card_idx);
504 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
506 i = pci_enable_device(pdev);
508 pci_set_master(pdev);
511 ioaddr = pci_resource_len(pdev, 0);
513 ioaddr = pci_resource_len(pdev, 1);
515 if (ioaddr < MIN_REGION_SIZE) {
516 printk(KERN_ERR "%s: region size %ld too small, aborting\n",
521 i = pci_request_regions(pdev, boardname);
527 ioaddr = pci_resource_start(pdev, 0);
529 ioaddr = (long) ioremap(pci_resource_start(pdev, 1),
530 pci_resource_len(pdev, 1));
537 dev = alloc_etherdev(sizeof(struct netdev_private));
542 SET_MODULE_OWNER(dev);
543 SET_NETDEV_DEV(dev, &pdev->dev);
545 /* read ethernet id */
546 for (i = 0; i < 6; ++i)
547 dev->dev_addr[i] = readb(ioaddr + PAR0 + i);
549 /* Reset the chip to erase previous misconfiguration. */
550 writel(0x00000001, ioaddr + BCR);
552 dev->base_addr = ioaddr;
555 /* Make certain the descriptor lists are aligned. */
557 spin_lock_init(&np->lock);
559 np->flags = skel_netdrv_tbl[chip_id].flags;
560 pci_set_drvdata(pdev, dev);
562 np->mii.mdio_read = mdio_read;
563 np->mii.mdio_write = mdio_write;
564 np->mii.phy_id_mask = 0x1f;
565 np->mii.reg_num_mask = 0x1f;
567 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
570 goto err_out_free_dev;
572 np->rx_ring = (struct fealnx_desc *)ring_space;
573 np->rx_ring_dma = ring_dma;
575 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
578 goto err_out_free_rx;
580 np->tx_ring = (struct fealnx_desc *)ring_space;
581 np->tx_ring_dma = ring_dma;
583 /* find the connected MII xcvrs */
584 if (np->flags == HAS_MII_XCVR) {
585 int phy, phy_idx = 0;
587 for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
588 int mii_status = mdio_read(dev, phy, 1);
590 if (mii_status != 0xffff && mii_status != 0x0000) {
591 np->phys[phy_idx++] = phy;
593 "%s: MII PHY found at address %d, status "
594 "0x%4.4x.\n", dev->name, phy, mii_status);
599 data = mdio_read(dev, np->phys[0], 2);
600 if (data == SeeqPHYID0)
601 np->PHYType = SeeqPHY;
602 else if (data == AhdocPHYID0)
603 np->PHYType = AhdocPHY;
604 else if (data == MarvellPHYID0)
605 np->PHYType = MarvellPHY;
606 else if (data == MysonPHYID0)
607 np->PHYType = Myson981;
608 else if (data == LevelOnePHYID0)
609 np->PHYType = LevelOnePHY;
611 np->PHYType = OtherPHY;
616 np->mii_cnt = phy_idx;
618 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
619 "not operate correctly.\n", dev->name);
623 /* 89/6/23 add, (begin) */
625 if (readl(dev->base_addr + PHYIDENTIFIER) == MysonPHYID)
626 np->PHYType = MysonPHY;
628 np->PHYType = OtherPHY;
630 np->mii.phy_id = np->phys[0];
633 option = dev->mem_start;
635 /* The lower four bits are the media type. */
638 np->mii.full_duplex = 1;
639 np->default_port = option & 15;
642 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
643 np->mii.full_duplex = full_duplex[card_idx];
645 if (np->mii.full_duplex) {
646 printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
647 /* 89/6/13 add, (begin) */
648 // if (np->PHYType==MarvellPHY)
649 if ((np->PHYType == MarvellPHY) || (np->PHYType == LevelOnePHY)) {
652 data = mdio_read(dev, np->phys[0], 9);
653 data = (data & 0xfcff) | 0x0200;
654 mdio_write(dev, np->phys[0], 9, data);
656 /* 89/6/13 add, (end) */
657 if (np->flags == HAS_MII_XCVR)
658 mdio_write(dev, np->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
660 writel(ADVERTISE_FULL, dev->base_addr + ANARANLPAR);
661 np->mii.force_media = 1;
664 /* The chip-specific entries in the device structure. */
665 dev->open = &netdev_open;
666 dev->hard_start_xmit = &start_tx;
667 dev->stop = &netdev_close;
668 dev->get_stats = &get_stats;
669 dev->set_multicast_list = &set_rx_mode;
670 dev->do_ioctl = &mii_ioctl;
671 dev->ethtool_ops = &netdev_ethtool_ops;
672 dev->tx_timeout = tx_timeout;
673 dev->watchdog_timeo = TX_TIMEOUT;
675 err = register_netdev(dev);
677 goto err_out_free_tx;
679 printk(KERN_INFO "%s: %s at 0x%lx, ",
680 dev->name, skel_netdrv_tbl[chip_id].chip_name, ioaddr);
681 for (i = 0; i < 5; i++)
682 printk("%2.2x:", dev->dev_addr[i]);
683 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
688 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
690 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
695 iounmap((void *)ioaddr);
698 pci_release_regions(pdev);
702 static void __devexit fealnx_remove_one(struct pci_dev *pdev)
704 struct net_device *dev = pci_get_drvdata(pdev);
707 struct netdev_private *np = dev->priv;
709 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
711 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
713 unregister_netdev(dev);
715 iounmap((void *)dev->base_addr);
718 pci_release_regions(pdev);
719 pci_set_drvdata(pdev, NULL);
721 printk(KERN_ERR "fealnx: remove for unknown device\n");
724 unsigned int m80x_read_tick(void)
725 /* function: Reads the Timer tick count register which decrements by 2 from */
726 /* 65536 to 0 every 1/36.414 of a second. Each 2 decrements of the *//* count represents 838 nsec's. */
733 writeb((char) 0x06, 0x43); // Command 8254 to latch T0's count
735 // now read the count.
736 tmp = (unsigned char) readb(0x40);
737 value = ((int) tmp) << 8;
738 tmp = (unsigned char) readb(0x40);
739 value |= (((int) tmp) & 0xff);
744 void m80x_delay(unsigned int interval)
745 /* function: to wait for a specified time. */
746 /* input : interval ... the specified time. */
749 unsigned int interval1, interval2, i = 0;
751 interval1 = m80x_read_tick(); // get initial value
753 interval2 = m80x_read_tick();
754 if (interval1 < interval2)
755 interval1 = interval2;
757 } while (((interval1 - interval2) < (ushort) interval) && (i < 65535));
761 static ulong m80x_send_cmd_to_phy(long miiport, int opcode, int phyad, int regad)
765 unsigned int mask, data;
767 /* enable MII output */
768 miir = (ulong) readl(miiport);
771 miir |= MASK_MIIR_MII_WRITE + MASK_MIIR_MII_MDO;
773 /* send 32 1's preamble */
774 for (i = 0; i < 32; i++) {
775 /* low MDC; MDO is already high (miir) */
776 miir &= ~MASK_MIIR_MII_MDC;
777 writel(miir, miiport);
780 miir |= MASK_MIIR_MII_MDC;
781 writel(miir, miiport);
784 /* calculate ST+OP+PHYAD+REGAD+TA */
785 data = opcode | (phyad << 7) | (regad << 2);
790 /* low MDC, prepare MDO */
791 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
793 miir |= MASK_MIIR_MII_MDO;
795 writel(miir, miiport);
797 miir |= MASK_MIIR_MII_MDC;
798 writel(miir, miiport);
803 if (mask == 0x2 && opcode == OP_READ)
804 miir &= ~MASK_MIIR_MII_WRITE;
810 static int mdio_read(struct net_device *dev, int phyad, int regad)
812 long miiport = dev->base_addr + MANAGEMENT;
814 unsigned int mask, data;
816 miir = m80x_send_cmd_to_phy(miiport, OP_READ, phyad, regad);
823 miir &= ~MASK_MIIR_MII_MDC;
824 writel(miir, miiport);
827 miir = readl(miiport);
828 if (miir & MASK_MIIR_MII_MDI)
831 /* high MDC, and wait */
832 miir |= MASK_MIIR_MII_MDC;
833 writel(miir, miiport);
834 m80x_delay((int) 30);
841 miir &= ~MASK_MIIR_MII_MDC;
842 writel(miir, miiport);
844 return data & 0xffff;
848 static void mdio_write(struct net_device *dev, int phyad, int regad, int data)
850 long miiport = dev->base_addr + MANAGEMENT;
854 miir = m80x_send_cmd_to_phy(miiport, OP_WRITE, phyad, regad);
859 /* low MDC, prepare MDO */
860 miir &= ~(MASK_MIIR_MII_MDC + MASK_MIIR_MII_MDO);
862 miir |= MASK_MIIR_MII_MDO;
863 writel(miir, miiport);
866 miir |= MASK_MIIR_MII_MDC;
867 writel(miir, miiport);
874 miir &= ~MASK_MIIR_MII_MDC;
875 writel(miir, miiport);
881 static int netdev_open(struct net_device *dev)
883 struct netdev_private *np = dev->priv;
884 long ioaddr = dev->base_addr;
886 writel(0x00000001, ioaddr + BCR); /* Reset */
888 if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev))
893 writel(np->rx_ring_dma, ioaddr + RXLBA);
894 writel(np->tx_ring_dma, ioaddr + TXLBA);
896 /* Initialize other registers. */
897 /* Configure the PCI bus bursts and FIFO thresholds.
898 486: Set 8 longword burst.
909 Wait the specified 50 PCI cycles after a reset by initializing
910 Tx and Rx queues and the address filter list.
911 FIXME (Ueimor): optimistic for alpha + posted writes ? */
912 #if defined(__powerpc__) || defined(__sparc__)
914 // np->bcrvalue=0x04 | 0x0x38; /* big-endian, 256 burst length */
915 np->bcrvalue = 0x04 | 0x10; /* big-endian, tx 8 burst length */
916 np->crvalue = 0xe00; /* rx 128 burst length */
917 #elif defined(__alpha__) || defined(__x86_64__)
919 // np->bcrvalue=0x38; /* little-endian, 256 burst length */
920 np->bcrvalue = 0x10; /* little-endian, 8 burst length */
921 np->crvalue = 0xe00; /* rx 128 burst length */
922 #elif defined(__i386__)
925 // np->bcrvalue=0x38; /* little-endian, 256 burst length */
926 np->bcrvalue = 0x10; /* little-endian, 8 burst length */
927 np->crvalue = 0xe00; /* rx 128 burst length */
929 /* When not a module we can work around broken '486 PCI boards. */
930 #define x86 boot_cpu_data.x86
932 // np->bcrvalue=(x86 <= 4 ? 0x10 : 0x38);
934 np->crvalue = (x86 <= 4 ? 0xa00 : 0xe00);
936 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting burst "
937 "length to %x.\n", dev->name, (x86 <= 4 ? 0x10 : 0x38));
941 // np->bcrvalue=0x38;
943 np->crvalue = 0xe00; /* rx 128 burst length */
944 #warning Processor architecture undefined!
948 // np->imrvalue=FBE|TUNF|CNTOVF|RBU|TI|RI;
949 np->imrvalue = TUNF | CNTOVF | RBU | TI | RI;
950 if (np->pci_dev->device == 0x891) {
951 np->bcrvalue |= 0x200; /* set PROG bit */
952 np->crvalue |= 0x02000000; /* set enhanced bit */
955 writel(np->bcrvalue, ioaddr + BCR);
957 if (dev->if_port == 0)
958 dev->if_port = np->default_port;
960 writel(0, dev->base_addr + RXPDR);
962 // np->crvalue = 0x00e40001; /* tx store and forward, tx/rx enable */
963 np->crvalue |= 0x00e40001; /* tx store and forward, tx/rx enable */
964 np->mii.full_duplex = np->mii.force_media;
970 netif_start_queue(dev);
972 /* Clear and Enable interrupts by setting the interrupt mask. */
973 writel(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
974 writel(np->imrvalue, ioaddr + IMR);
977 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
979 /* Set the timer to check for link beat. */
980 init_timer(&np->timer);
981 np->timer.expires = RUN_AT(3 * HZ);
982 np->timer.data = (unsigned long) dev;
983 np->timer.function = &netdev_timer;
986 add_timer(&np->timer);
992 static void getlinkstatus(struct net_device *dev)
993 /* function: Routine will read MII Status Register to get link status. */
994 /* input : dev... pointer to the adapter block. */
997 struct netdev_private *np = dev->priv;
998 unsigned int i, DelayTime = 0x1000;
1002 if (np->PHYType == MysonPHY) {
1003 for (i = 0; i < DelayTime; ++i) {
1004 if (readl(dev->base_addr + BMCRSR) & LinkIsUp2) {
1012 for (i = 0; i < DelayTime; ++i) {
1013 if (mdio_read(dev, np->phys[0], MII_BMSR) & BMSR_LSTATUS) {
1024 static void getlinktype(struct net_device *dev)
1026 struct netdev_private *np = dev->priv;
1028 if (np->PHYType == MysonPHY) { /* 3-in-1 case */
1029 if (readl(dev->base_addr + TCRRCR) & FD)
1030 np->duplexmode = 2; /* full duplex */
1032 np->duplexmode = 1; /* half duplex */
1033 if (readl(dev->base_addr + TCRRCR) & PS10)
1034 np->line_speed = 1; /* 10M */
1036 np->line_speed = 2; /* 100M */
1038 if (np->PHYType == SeeqPHY) { /* this PHY is SEEQ 80225 */
1041 data = mdio_read(dev, np->phys[0], MIIRegister18);
1042 if (data & SPD_DET_100)
1043 np->line_speed = 2; /* 100M */
1045 np->line_speed = 1; /* 10M */
1046 if (data & DPLX_DET_FULL)
1047 np->duplexmode = 2; /* full duplex mode */
1049 np->duplexmode = 1; /* half duplex mode */
1050 } else if (np->PHYType == AhdocPHY) {
1053 data = mdio_read(dev, np->phys[0], DiagnosticReg);
1054 if (data & Speed_100)
1055 np->line_speed = 2; /* 100M */
1057 np->line_speed = 1; /* 10M */
1058 if (data & DPLX_FULL)
1059 np->duplexmode = 2; /* full duplex mode */
1061 np->duplexmode = 1; /* half duplex mode */
1063 /* 89/6/13 add, (begin) */
1064 else if (np->PHYType == MarvellPHY) {
1067 data = mdio_read(dev, np->phys[0], SpecificReg);
1068 if (data & Full_Duplex)
1069 np->duplexmode = 2; /* full duplex mode */
1071 np->duplexmode = 1; /* half duplex mode */
1073 if (data == Speed_1000M)
1074 np->line_speed = 3; /* 1000M */
1075 else if (data == Speed_100M)
1076 np->line_speed = 2; /* 100M */
1078 np->line_speed = 1; /* 10M */
1080 /* 89/6/13 add, (end) */
1081 /* 89/7/27 add, (begin) */
1082 else if (np->PHYType == Myson981) {
1085 data = mdio_read(dev, np->phys[0], StatusRegister);
1087 if (data & SPEED100)
1092 if (data & FULLMODE)
1097 /* 89/7/27 add, (end) */
1099 else if (np->PHYType == LevelOnePHY) {
1102 data = mdio_read(dev, np->phys[0], SpecificReg);
1103 if (data & LXT1000_Full)
1104 np->duplexmode = 2; /* full duplex mode */
1106 np->duplexmode = 1; /* half duplex mode */
1108 if (data == LXT1000_1000M)
1109 np->line_speed = 3; /* 1000M */
1110 else if (data == LXT1000_100M)
1111 np->line_speed = 2; /* 100M */
1113 np->line_speed = 1; /* 10M */
1116 // np->crvalue&=(~PS10)&(~FD);
1117 np->crvalue &= (~PS10) & (~FD) & (~PS1000);
1118 if (np->line_speed == 1)
1119 np->crvalue |= PS10;
1120 else if (np->line_speed == 3)
1121 np->crvalue |= PS1000;
1122 if (np->duplexmode == 2)
1128 static void allocate_rx_buffers(struct net_device *dev)
1130 struct netdev_private *np = dev->priv;
1132 /* allocate skb for rx buffers */
1133 while (np->really_rx_count != RX_RING_SIZE) {
1134 struct sk_buff *skb;
1136 skb = dev_alloc_skb(np->rx_buf_sz);
1137 np->lack_rxbuf->skbuff = skb;
1140 break; /* Better luck next round. */
1142 skb->dev = dev; /* Mark as being used by this device. */
1143 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail,
1144 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1145 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1146 ++np->really_rx_count;
1151 static void netdev_timer(unsigned long data)
1153 struct net_device *dev = (struct net_device *) data;
1154 struct netdev_private *np = dev->priv;
1155 long ioaddr = dev->base_addr;
1156 int next_tick = 10 * HZ;
1157 int old_crvalue = np->crvalue;
1158 unsigned int old_linkok = np->linkok;
1161 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
1162 "config %8.8x.\n", dev->name, readl(ioaddr + ISR),
1163 readl(ioaddr + TCRRCR));
1165 if (np->flags == HAS_MII_XCVR) {
1167 if ((old_linkok == 0) && (np->linkok == 1)) { /* we need to detect the media type again */
1169 if (np->crvalue != old_crvalue) {
1170 stop_nic_tx(ioaddr, np->crvalue);
1171 stop_nic_rx(ioaddr, np->crvalue & (~0x40000));
1172 writel(np->crvalue, ioaddr + TCRRCR);
1177 allocate_rx_buffers(dev);
1179 np->timer.expires = RUN_AT(next_tick);
1180 add_timer(&np->timer);
1184 static void tx_timeout(struct net_device *dev)
1186 struct netdev_private *np = dev->priv;
1187 long ioaddr = dev->base_addr;
1190 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
1191 " resetting...\n", dev->name, readl(ioaddr + ISR));
1195 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
1196 for (i = 0; i < RX_RING_SIZE; i++)
1197 printk(" %8.8x", (unsigned int) np->rx_ring[i].status);
1198 printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring);
1199 for (i = 0; i < TX_RING_SIZE; i++)
1200 printk(" %4.4x", np->tx_ring[i].status);
1206 /* Reset the chip's Tx and Rx processes. */
1207 stop_nic_tx(ioaddr, 0);
1208 reset_rx_descriptors(dev);
1210 /* Disable interrupts by clearing the interrupt mask. */
1211 writel(0x0000, ioaddr + IMR);
1213 /* Reset the chip to erase previous misconfiguration. */
1214 writel(0x00000001, ioaddr + BCR);
1216 /* Ueimor: wait for 50 PCI cycles (and flush posted writes btw).
1217 We surely wait too long (address+data phase). Who cares ? */
1218 for (i = 0; i < 50; i++) {
1219 readl(ioaddr + BCR);
1223 writel((np->cur_tx - np->tx_ring)*sizeof(struct fealnx_desc) +
1224 np->tx_ring_dma, ioaddr + TXLBA);
1225 writel((np->cur_rx - np->rx_ring)*sizeof(struct fealnx_desc) +
1226 np->rx_ring_dma, ioaddr + RXLBA);
1228 writel(np->bcrvalue, ioaddr + BCR);
1230 writel(0, dev->base_addr + RXPDR);
1232 /* Clear and Enable interrupts by setting the interrupt mask. */
1233 writel(FBE | TUNF | CNTOVF | RBU | TI | RI, ioaddr + ISR);
1234 writel(np->imrvalue, ioaddr + IMR);
1236 writel(0, dev->base_addr + TXPDR);
1238 dev->trans_start = jiffies;
1239 np->stats.tx_errors++;
1245 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1246 static void init_ring(struct net_device *dev)
1248 struct netdev_private *np = dev->priv;
1251 /* initialize rx variables */
1252 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1253 np->cur_rx = &np->rx_ring[0];
1254 np->lack_rxbuf = NULL;
1255 np->really_rx_count = 0;
1257 /* initial rx descriptors. */
1258 for (i = 0; i < RX_RING_SIZE; i++) {
1259 np->rx_ring[i].status = 0;
1260 np->rx_ring[i].control = np->rx_buf_sz << RBSShift;
1261 np->rx_ring[i].next_desc = np->rx_ring_dma +
1262 (i + 1)*sizeof(struct fealnx_desc);
1263 np->rx_ring[i].next_desc_logical = &np->rx_ring[i + 1];
1264 np->rx_ring[i].skbuff = NULL;
1267 /* for the last rx descriptor */
1268 np->rx_ring[i - 1].next_desc = np->rx_ring_dma;
1269 np->rx_ring[i - 1].next_desc_logical = np->rx_ring;
1271 /* allocate skb for rx buffers */
1272 for (i = 0; i < RX_RING_SIZE; i++) {
1273 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1276 np->lack_rxbuf = &np->rx_ring[i];
1280 ++np->really_rx_count;
1281 np->rx_ring[i].skbuff = skb;
1282 skb->dev = dev; /* Mark as being used by this device. */
1283 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail,
1284 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1285 np->rx_ring[i].status = RXOWN;
1286 np->rx_ring[i].control |= RXIC;
1289 /* initialize tx variables */
1290 np->cur_tx = &np->tx_ring[0];
1291 np->cur_tx_copy = &np->tx_ring[0];
1292 np->really_tx_count = 0;
1293 np->free_tx_count = TX_RING_SIZE;
1295 for (i = 0; i < TX_RING_SIZE; i++) {
1296 np->tx_ring[i].status = 0;
1297 np->tx_ring[i].next_desc = np->tx_ring_dma +
1298 (i + 1)*sizeof(struct fealnx_desc);
1299 np->tx_ring[i].next_desc_logical = &np->tx_ring[i + 1];
1300 np->tx_ring[i].skbuff = NULL;
1303 /* for the last tx descriptor */
1304 np->tx_ring[i - 1].next_desc = np->tx_ring_dma;
1305 np->tx_ring[i - 1].next_desc_logical = &np->tx_ring[0];
1309 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1311 struct netdev_private *np = dev->priv;
1312 unsigned long flags;
1314 spin_lock_irqsave(&np->lock, flags);
1316 np->cur_tx_copy->skbuff = skb;
1320 #if defined(one_buffer)
1321 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1322 skb->len, PCI_DMA_TODEVICE);
1323 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1324 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1325 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1327 if (np->pci_dev->device == 0x891)
1328 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1329 np->cur_tx_copy->status = TXOWN;
1330 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1331 --np->free_tx_count;
1332 #elif defined(two_buffer)
1333 if (skb->len > BPT) {
1334 struct fealnx_desc *next;
1336 /* for the first descriptor */
1337 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1338 BPT, PCI_DMA_TODEVICE);
1339 np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
1340 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1341 np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */
1343 /* for the last descriptor */
1344 next = (struct fealnx *) np->cur_tx_copy.next_desc_logical;
1346 next->control = TXIC | TXLD | CRCEnable | PADEnable;
1347 next->control |= (skb->len << PKTSShift); /* pkt size */
1348 next->control |= ((skb->len - BPT) << TBSShift); /* buf size */
1350 if (np->pci_dev->device == 0x891)
1351 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1352 next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
1353 skb->len - BPT, PCI_DMA_TODEVICE);
1355 next->status = TXOWN;
1356 np->cur_tx_copy->status = TXOWN;
1358 np->cur_tx_copy = next->next_desc_logical;
1359 np->free_tx_count -= 2;
1361 np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
1362 skb->len, PCI_DMA_TODEVICE);
1363 np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
1364 np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */
1365 np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */
1367 if (np->pci_dev->device == 0x891)
1368 np->cur_tx_copy->control |= ETIControl | RetryTxLC;
1369 np->cur_tx_copy->status = TXOWN;
1370 np->cur_tx_copy = np->cur_tx_copy->next_desc_logical;
1371 --np->free_tx_count;
1375 if (np->free_tx_count < 2)
1376 netif_stop_queue(dev);
1377 ++np->really_tx_count;
1378 writel(0, dev->base_addr + TXPDR);
1379 dev->trans_start = jiffies;
1381 spin_unlock_irqrestore(&np->lock, flags);
1386 void free_one_rx_descriptor(struct netdev_private *np)
1388 if (np->really_rx_count == RX_RING_SIZE)
1389 np->cur_rx->status = RXOWN;
1391 np->lack_rxbuf->skbuff = np->cur_rx->skbuff;
1392 np->lack_rxbuf->buffer = np->cur_rx->buffer;
1393 np->lack_rxbuf->status = RXOWN;
1394 ++np->really_rx_count;
1395 np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
1397 np->cur_rx = np->cur_rx->next_desc_logical;
1401 void reset_rx_descriptors(struct net_device *dev)
1403 struct netdev_private *np = dev->priv;
1405 stop_nic_rx(dev->base_addr, np->crvalue);
1407 while (!(np->cur_rx->status & RXOWN))
1408 free_one_rx_descriptor(np);
1410 allocate_rx_buffers(dev);
1412 writel(np->rx_ring_dma + (np->cur_rx - np->rx_ring),
1413 dev->base_addr + RXLBA);
1414 writel(np->crvalue, dev->base_addr + TCRRCR);
1418 /* The interrupt handler does all of the Rx thread work and cleans up
1419 after the Tx thread. */
1420 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1422 struct net_device *dev = (struct net_device *) dev_instance;
1423 struct netdev_private *np = dev->priv;
1424 long ioaddr, boguscnt = max_interrupt_work;
1425 unsigned int num_tx = 0;
1428 spin_lock(&np->lock);
1430 writel(0, dev->base_addr + IMR);
1432 ioaddr = dev->base_addr;
1436 u32 intr_status = readl(ioaddr + ISR);
1438 /* Acknowledge all of the current interrupt sources ASAP. */
1439 writel(intr_status, ioaddr + ISR);
1442 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name,
1445 if (!(intr_status & np->imrvalue))
1452 // if (intr_status & FBE)
1453 // { /* fatal error */
1454 // stop_nic_tx(ioaddr, 0);
1455 // stop_nic_rx(ioaddr, 0);
1459 if (intr_status & TUNF)
1460 writel(0, ioaddr + TXPDR);
1462 if (intr_status & CNTOVF) {
1464 np->stats.rx_missed_errors += readl(ioaddr + TALLY) & 0x7fff;
1467 np->stats.rx_crc_errors +=
1468 (readl(ioaddr + TALLY) & 0x7fff0000) >> 16;
1471 if (intr_status & (RI | RBU)) {
1472 if (intr_status & RI)
1475 reset_rx_descriptors(dev);
1478 while (np->really_tx_count) {
1479 long tx_status = np->cur_tx->status;
1480 long tx_control = np->cur_tx->control;
1482 if (!(tx_control & TXLD)) { /* this pkt is combined by two tx descriptors */
1483 struct fealnx_desc *next;
1485 next = np->cur_tx->next_desc_logical;
1486 tx_status = next->status;
1487 tx_control = next->control;
1490 if (tx_status & TXOWN)
1493 if (!(np->crvalue & 0x02000000)) {
1494 if (tx_status & (CSL | LC | EC | UDF | HF)) {
1495 np->stats.tx_errors++;
1497 np->stats.tx_aborted_errors++;
1498 if (tx_status & CSL)
1499 np->stats.tx_carrier_errors++;
1501 np->stats.tx_window_errors++;
1502 if (tx_status & UDF)
1503 np->stats.tx_fifo_errors++;
1504 if ((tx_status & HF) && np->mii.full_duplex == 0)
1505 np->stats.tx_heartbeat_errors++;
1508 np->stats.tx_bytes +=
1509 ((tx_control & PKTSMask) >> PKTSShift);
1511 np->stats.collisions +=
1512 ((tx_status & NCRMask) >> NCRShift);
1513 np->stats.tx_packets++;
1516 np->stats.tx_bytes +=
1517 ((tx_control & PKTSMask) >> PKTSShift);
1518 np->stats.tx_packets++;
1521 /* Free the original skb. */
1522 pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
1523 np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
1524 dev_kfree_skb_irq(np->cur_tx->skbuff);
1525 np->cur_tx->skbuff = NULL;
1526 --np->really_tx_count;
1527 if (np->cur_tx->control & TXLD) {
1528 np->cur_tx = np->cur_tx->next_desc_logical;
1529 ++np->free_tx_count;
1531 np->cur_tx = np->cur_tx->next_desc_logical;
1532 np->cur_tx = np->cur_tx->next_desc_logical;
1533 np->free_tx_count += 2;
1536 } /* end of for loop */
1538 if (num_tx && np->free_tx_count >= 2)
1539 netif_wake_queue(dev);
1541 /* read transmit status for enhanced mode only */
1542 if (np->crvalue & 0x02000000) {
1545 data = readl(ioaddr + TSR);
1546 np->stats.tx_errors += (data & 0xff000000) >> 24;
1547 np->stats.tx_aborted_errors += (data & 0xff000000) >> 24;
1548 np->stats.tx_window_errors += (data & 0x00ff0000) >> 16;
1549 np->stats.collisions += (data & 0x0000ffff);
1552 if (--boguscnt < 0) {
1553 printk(KERN_WARNING "%s: Too much work at interrupt, "
1554 "status=0x%4.4x.\n", dev->name, intr_status);
1559 /* read the tally counters */
1561 np->stats.rx_missed_errors += readl(ioaddr + TALLY) & 0x7fff;
1564 np->stats.rx_crc_errors += (readl(ioaddr + TALLY) & 0x7fff0000) >> 16;
1567 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1568 dev->name, readl(ioaddr + ISR));
1570 writel(np->imrvalue, ioaddr + IMR);
1572 spin_unlock(&np->lock);
1574 return IRQ_RETVAL(handled);
1578 /* This routine is logically part of the interrupt handler, but separated
1579 for clarity and better register allocation. */
1580 static int netdev_rx(struct net_device *dev)
1582 struct netdev_private *np = dev->priv;
1584 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1585 while (!(np->cur_rx->status & RXOWN)) {
1586 s32 rx_status = np->cur_rx->status;
1588 if (np->really_rx_count == 0)
1592 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", rx_status);
1594 if ((!((rx_status & RXFSD) && (rx_status & RXLSD)))
1595 || (rx_status & ErrorSummary)) {
1596 if (rx_status & ErrorSummary) { /* there was a fatal error */
1599 "%s: Receive error, Rx status %8.8x.\n",
1600 dev->name, rx_status);
1602 np->stats.rx_errors++; /* end of a packet. */
1603 if (rx_status & (LONG | RUNT))
1604 np->stats.rx_length_errors++;
1605 if (rx_status & RXER)
1606 np->stats.rx_frame_errors++;
1607 if (rx_status & CRC)
1608 np->stats.rx_crc_errors++;
1610 int need_to_reset = 0;
1613 if (rx_status & RXFSD) { /* this pkt is too long, over one rx buffer */
1614 struct fealnx_desc *cur;
1616 /* check this packet is received completely? */
1618 while (desno <= np->really_rx_count) {
1620 if ((!(cur->status & RXOWN))
1621 && (cur->status & RXLSD))
1623 /* goto next rx descriptor */
1624 cur = cur->next_desc_logical;
1626 if (desno > np->really_rx_count)
1628 } else /* RXLSD did not find, something error */
1631 if (need_to_reset == 0) {
1634 np->stats.rx_length_errors++;
1636 /* free all rx descriptors related this long pkt */
1637 for (i = 0; i < desno; ++i)
1638 free_one_rx_descriptor(np);
1640 } else { /* something error, need to reset this chip */
1641 reset_rx_descriptors(dev);
1643 break; /* exit the while loop */
1645 } else { /* this received pkt is ok */
1647 struct sk_buff *skb;
1648 /* Omit the four octet CRC from the length. */
1649 short pkt_len = ((rx_status & FLNGMASK) >> FLNGShift) - 4;
1651 #ifndef final_version
1653 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1654 " status %x.\n", pkt_len, rx_status);
1657 /* Check if the packet is long enough to accept without copying
1658 to a minimally-sized skbuff. */
1659 if (pkt_len < rx_copybreak &&
1660 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1662 skb_reserve(skb, 2); /* 16 byte align the IP header */
1663 pci_dma_sync_single_for_cpu(np->pci_dev,
1666 PCI_DMA_FROMDEVICE);
1667 /* Call copy + cksum if available. */
1669 #if ! defined(__alpha__)
1670 eth_copy_and_sum(skb,
1671 np->cur_rx->skbuff->tail, pkt_len, 0);
1672 skb_put(skb, pkt_len);
1674 memcpy(skb_put(skb, pkt_len),
1675 np->cur_rx->skbuff->tail, pkt_len);
1677 pci_dma_sync_single_for_device(np->pci_dev,
1680 PCI_DMA_FROMDEVICE);
1682 pci_unmap_single(np->pci_dev,
1685 PCI_DMA_FROMDEVICE);
1686 skb_put(skb = np->cur_rx->skbuff, pkt_len);
1687 np->cur_rx->skbuff = NULL;
1688 if (np->really_rx_count == RX_RING_SIZE)
1689 np->lack_rxbuf = np->cur_rx;
1690 --np->really_rx_count;
1692 skb->protocol = eth_type_trans(skb, dev);
1694 dev->last_rx = jiffies;
1695 np->stats.rx_packets++;
1696 np->stats.rx_bytes += pkt_len;
1699 if (np->cur_rx->skbuff == NULL) {
1700 struct sk_buff *skb;
1702 skb = dev_alloc_skb(np->rx_buf_sz);
1705 skb->dev = dev; /* Mark as being used by this device. */
1706 np->cur_rx->buffer = pci_map_single(np->pci_dev,
1709 PCI_DMA_FROMDEVICE);
1710 np->cur_rx->skbuff = skb;
1711 ++np->really_rx_count;
1715 if (np->cur_rx->skbuff != NULL)
1716 free_one_rx_descriptor(np);
1717 } /* end of while loop */
1719 /* allocate skb for rx buffers */
1720 allocate_rx_buffers(dev);
1726 static struct net_device_stats *get_stats(struct net_device *dev)
1728 long ioaddr = dev->base_addr;
1729 struct netdev_private *np = dev->priv;
1731 /* The chip only need report frame silently dropped. */
1732 if (netif_running(dev)) {
1733 np->stats.rx_missed_errors += readl(ioaddr + TALLY) & 0x7fff;
1734 np->stats.rx_crc_errors += (readl(ioaddr + TALLY) & 0x7fff0000) >> 16;
1740 static void set_rx_mode(struct net_device *dev)
1742 struct netdev_private *np = dev->priv;
1743 long ioaddr = dev->base_addr;
1744 u32 mc_filter[2]; /* Multicast hash filter */
1747 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1748 /* Unconditionally log net taps. */
1749 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1750 memset(mc_filter, 0xff, sizeof(mc_filter));
1751 rx_mode = PROM | AB | AM;
1752 } else if ((dev->mc_count > multicast_filter_limit)
1753 || (dev->flags & IFF_ALLMULTI)) {
1754 /* Too many to match, or accept all multicasts. */
1755 memset(mc_filter, 0xff, sizeof(mc_filter));
1758 struct dev_mc_list *mclist;
1761 memset(mc_filter, 0, sizeof(mc_filter));
1762 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1763 i++, mclist = mclist->next) {
1765 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1766 mc_filter[bit >> 5] |= (1 << bit);
1771 stop_nic_tx(ioaddr, np->crvalue);
1772 stop_nic_rx(ioaddr, np->crvalue & (~0x40000));
1774 writel(mc_filter[0], ioaddr + MAR0);
1775 writel(mc_filter[1], ioaddr + MAR1);
1776 np->crvalue &= ~RxModeMask;
1777 np->crvalue |= rx_mode;
1778 writel(np->crvalue, ioaddr + TCRRCR);
1781 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1783 struct netdev_private *np = dev->priv;
1785 strcpy (info->driver, DRV_NAME);
1786 strcpy (info->version, DRV_VERSION);
1787 strcpy (info->bus_info, pci_name(np->pci_dev));
1790 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1792 struct netdev_private *np = dev->priv;
1795 spin_lock_irq(&np->lock);
1796 rc = mii_ethtool_gset(&np->mii, cmd);
1797 spin_unlock_irq(&np->lock);
1802 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1804 struct netdev_private *np = dev->priv;
1807 spin_lock_irq(&np->lock);
1808 rc = mii_ethtool_sset(&np->mii, cmd);
1809 spin_unlock_irq(&np->lock);
1814 static int netdev_nway_reset(struct net_device *dev)
1816 struct netdev_private *np = dev->priv;
1817 return mii_nway_restart(&np->mii);
1820 static u32 netdev_get_link(struct net_device *dev)
1822 struct netdev_private *np = dev->priv;
1823 return mii_link_ok(&np->mii);
1826 static u32 netdev_get_msglevel(struct net_device *dev)
1831 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1836 static struct ethtool_ops netdev_ethtool_ops = {
1837 .get_drvinfo = netdev_get_drvinfo,
1838 .get_settings = netdev_get_settings,
1839 .set_settings = netdev_set_settings,
1840 .nway_reset = netdev_nway_reset,
1841 .get_link = netdev_get_link,
1842 .get_msglevel = netdev_get_msglevel,
1843 .set_msglevel = netdev_set_msglevel,
1844 .get_sg = ethtool_op_get_sg,
1845 .get_tx_csum = ethtool_op_get_tx_csum,
1848 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1850 struct netdev_private *np = dev->priv;
1851 struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
1854 if (!netif_running(dev))
1857 spin_lock_irq(&np->lock);
1858 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1859 spin_unlock_irq(&np->lock);
1865 static int netdev_close(struct net_device *dev)
1867 long ioaddr = dev->base_addr;
1868 struct netdev_private *np = dev->priv;
1871 netif_stop_queue(dev);
1873 /* Disable interrupts by clearing the interrupt mask. */
1874 writel(0x0000, ioaddr + IMR);
1876 /* Stop the chip's Tx and Rx processes. */
1877 stop_nic_tx(ioaddr, 0);
1878 stop_nic_rx(ioaddr, 0);
1880 del_timer_sync(&np->timer);
1882 free_irq(dev->irq, dev);
1884 /* Free all the skbuffs in the Rx queue. */
1885 for (i = 0; i < RX_RING_SIZE; i++) {
1886 struct sk_buff *skb = np->rx_ring[i].skbuff;
1888 np->rx_ring[i].status = 0;
1890 pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer,
1891 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1893 np->rx_ring[i].skbuff = NULL;
1897 for (i = 0; i < TX_RING_SIZE; i++) {
1898 struct sk_buff *skb = np->tx_ring[i].skbuff;
1901 pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
1902 skb->len, PCI_DMA_TODEVICE);
1904 np->tx_ring[i].skbuff = NULL;
1911 static struct pci_device_id fealnx_pci_tbl[] = {
1912 {0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1913 {0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
1914 {0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
1915 {} /* terminate list */
1917 MODULE_DEVICE_TABLE(pci, fealnx_pci_tbl);
1920 static struct pci_driver fealnx_driver = {
1922 .id_table = fealnx_pci_tbl,
1923 .probe = fealnx_init_one,
1924 .remove = __devexit_p(fealnx_remove_one),
1927 static int __init fealnx_init(void)
1929 /* when a module, this is printed whether or not devices are found in probe */
1934 return pci_module_init(&fealnx_driver);
1937 static void __exit fealnx_exit(void)
1939 pci_unregister_driver(&fealnx_driver);
1942 module_init(fealnx_init);
1943 module_exit(fealnx_exit);