Copyright (C) 2001 Manfred Spraul
* ethtool support (jgarzik)
* Replace some MII-related magic numbers with constants (jgarzik)
-
+
TODO:
* enable pci_power_off
* Wake-On-LAN
*/
-
+
#define DRV_NAME "winbond-840"
-#define DRV_VERSION "1.01-d"
-#define DRV_RELDATE "Nov-17-2001"
+#define DRV_VERSION "1.01-e"
+#define DRV_RELDATE "Sep-11-2006"
/* Automatically extracted configuration info:
c-help-symbol: CONFIG_WINBOND_840
c-help: This driver is for the Winbond W89c840 chip. It also works with
c-help: the TX9882 chip on the Compex RL100-ATX board.
-c-help: More specific information and updates are available from
+c-help: More specific information and updates are available from
c-help: http://www.scyld.com/network/drivers.html
*/
Making the Tx ring too large decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings. */
-#define TX_RING_SIZE 16
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
#define TX_QUEUE_LEN_RESTART 5
-#define RX_RING_SIZE 32
#define TX_BUFLIMIT (1024-128)
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
-#ifndef __KERNEL__
-#define __KERNEL__
-#endif
-#if !defined(__OPTIMIZE__)
-#warning You must compile this file with the correct options!
-#warning See the last lines of the source file.
-#error You must compile this driver with "-O".
-#endif
-
/* Include files, designed to support most kernel versions 2.0.0 and later. */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/mii.h>
#include <linux/rtnetlink.h>
#include <linux/crc32.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
+#include "tulip.h"
+
/* These identify the driver base version and may not be removed. */
-static char version[] __devinitdata =
+static char version[] =
KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
KERN_INFO " http://www.scyld.com/network/drivers.html\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
MODULE_LICENSE("GPL");
-
-MODULE_PARM(max_interrupt_work, "i");
-MODULE_PARM(debug, "i");
-MODULE_PARM(rx_copybreak, "i");
-MODULE_PARM(multicast_filter_limit, "i");
-MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
*/
-\f
+
/*
PCI probe table.
*/
-enum pci_id_flags_bits {
- /* Set PCI command register bits before calling probe1(). */
- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
- /* Read and map the single following PCI BAR. */
- PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
- PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
-};
enum chip_capability_flags {
- CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
-#ifdef USE_IO_OPS
-#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
-#else
-#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
-#endif
+ CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
+};
-static struct pci_device_id w840_pci_tbl[] = {
+static const struct pci_device_id w840_pci_tbl[] = {
{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { 0, }
+ { }
};
MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
+enum {
+ netdev_res_size = 128, /* size of PCI BAR resource */
+};
+
struct pci_id_info {
const char *name;
- struct match_info {
- int pci, pci_mask, subsystem, subsystem_mask;
- int revision, revision_mask; /* Only 8 bits. */
- } id;
- enum pci_id_flags_bits pci_flags;
- int io_size; /* Needed for I/O region check or ioremap(). */
- int drv_flags; /* Driver use, intended as capability flags. */
+ int drv_flags; /* Driver use, intended as capability flags. */
};
-static struct pci_id_info pci_id_tbl[] = {
- {"Winbond W89c840", /* Sometime a Level-One switch card. */
- { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
- W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
- {"Winbond W89c840", { 0x08401050, 0xffffffff, },
- W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
- {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
- W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
- {0,}, /* 0 terminated list. */
+
+static const struct pci_id_info pci_id_tbl[] __devinitdata = {
+ { /* Sometime a Level-One switch card. */
+ "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
+ { "Winbond W89c840", CanHaveMII | HasBrokenTx},
+ { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
+ { } /* terminate list. */
};
/* This driver was written to use PCI memory space, however some x86 systems
- work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
- accesses instead of memory space. */
-
-#ifdef USE_IO_OPS
-#undef readb
-#undef readw
-#undef readl
-#undef writeb
-#undef writew
-#undef writel
-#define readb inb
-#define readw inw
-#define readl inl
-#define writeb outb
-#define writew outw
-#define writel outl
-#endif
+ work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
+*/
/* Offsets to the Command and Status Registers, "CSRs".
While similar to the Tulip, these registers are longword aligned.
CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
};
-/* Bits in the interrupt status/enable registers. */
-/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
-enum intr_status_bits {
- NormalIntr=0x10000, AbnormalIntr=0x8000,
- IntrPCIErr=0x2000, TimerInt=0x800,
- IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
- TxFIFOUnderflow=0x20, RxErrIntr=0x10,
- TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
-};
-
/* Bits in the NetworkConfig register. */
enum rx_mode_bits {
- AcceptErr=0x80, AcceptRunt=0x40,
- AcceptBroadcast=0x20, AcceptMulticast=0x10,
- AcceptAllPhys=0x08, AcceptMyPhys=0x02,
+ AcceptErr=0x80,
+ RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
+ RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
};
enum mii_reg_bits {
u32 buffer1, buffer2;
};
-/* Bits in network_desc.status */
-enum desc_status_bits {
- DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
- DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
- DescIntr=0x80000000,
-};
-
#define MII_CNT 1 /* winbond only supports one MII */
struct netdev_private {
struct w840_rx_desc *rx_ring;
unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
u32 mii;
struct mii_if_info mii_if;
+ void __iomem *base_addr;
};
-static int eeprom_read(long ioaddr, int location);
+static int eeprom_read(void __iomem *ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int netdev_open(struct net_device *dev);
static int alloc_ringdesc(struct net_device *dev);
static void free_ringdesc(struct netdev_private *np);
static int start_tx(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+static irqreturn_t intr_handler(int irq, void *dev_instance);
static void netdev_error(struct net_device *dev, int intr_status);
static int netdev_rx(struct net_device *dev);
static u32 __set_rx_mode(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static struct ethtool_ops netdev_ethtool_ops;
+static const struct ethtool_ops netdev_ethtool_ops;
static int netdev_close(struct net_device *dev);
-\f
+
static int __devinit w840_probe1 (struct pci_dev *pdev,
const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
int irq;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
- long ioaddr;
+ void __iomem *ioaddr;
i = pci_enable_device(pdev);
if (i) return i;
irq = pdev->irq;
- if (pci_set_dma_mask(pdev,0xFFFFffff)) {
+ if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
pci_name(pdev));
return -EIO;
if (pci_request_regions(pdev, DRV_NAME))
goto err_out_netdev;
-#ifdef USE_IO_OPS
- ioaddr = pci_resource_start(pdev, 0);
-#else
- ioaddr = pci_resource_start(pdev, 1);
- ioaddr = (long) ioremap (ioaddr, pci_id_tbl[chip_idx].io_size);
+ ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
if (!ioaddr)
goto err_out_free_res;
-#endif
for (i = 0; i < 3; i++)
((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
/* Reset the chip to erase previous misconfiguration.
No hold time required! */
- writel(0x00000001, ioaddr + PCIBusCfg);
+ iowrite32(0x00000001, ioaddr + PCIBusCfg);
- dev->base_addr = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
dev->irq = irq;
- np = dev->priv;
+ np = netdev_priv(dev);
np->pci_dev = pdev;
np->chip_id = chip_idx;
np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
np->mii_if.dev = dev;
np->mii_if.mdio_read = mdio_read;
np->mii_if.mdio_write = mdio_write;
-
+ np->base_addr = ioaddr;
+
pci_set_drvdata(pdev, dev);
if (dev->mem_start)
if (i)
goto err_out_cleardev;
- printk(KERN_INFO "%s: %s at 0x%lx, ",
+ printk(KERN_INFO "%s: %s at %p, ",
dev->name, pci_id_tbl[chip_idx].name, ioaddr);
for (i = 0; i < 5; i++)
printk("%2.2x:", dev->dev_addr[i]);
err_out_cleardev:
pci_set_drvdata(pdev, NULL);
-#ifndef USE_IO_OPS
- iounmap((void *)ioaddr);
+ pci_iounmap(pdev, ioaddr);
err_out_free_res:
-#endif
pci_release_regions(pdev);
err_out_netdev:
free_netdev (dev);
return -ENODEV;
}
-\f
+
/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
often serial bit streams generated by the host processor.
The example below is for the common 93c46 EEPROM, 64 16 bit words. */
The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
depricated.
*/
-#define eeprom_delay(ee_addr) readl(ee_addr)
+#define eeprom_delay(ee_addr) ioread32(ee_addr)
enum EEPROM_Ctrl_Bits {
EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
};
-static int eeprom_read(long addr, int location)
+static int eeprom_read(void __iomem *addr, int location)
{
int i;
int retval = 0;
- long ee_addr = addr + EECtrl;
+ void __iomem *ee_addr = addr + EECtrl;
int read_cmd = location | EE_ReadCmd;
- writel(EE_ChipSelect, ee_addr);
+ iowrite32(EE_ChipSelect, ee_addr);
/* Shift the read command bits out. */
for (i = 10; i >= 0; i--) {
short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
- writel(dataval, ee_addr);
+ iowrite32(dataval, ee_addr);
eeprom_delay(ee_addr);
- writel(dataval | EE_ShiftClk, ee_addr);
+ iowrite32(dataval | EE_ShiftClk, ee_addr);
eeprom_delay(ee_addr);
}
- writel(EE_ChipSelect, ee_addr);
+ iowrite32(EE_ChipSelect, ee_addr);
eeprom_delay(ee_addr);
for (i = 16; i > 0; i--) {
- writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
eeprom_delay(ee_addr);
- retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
- writel(EE_ChipSelect, ee_addr);
+ retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
+ iowrite32(EE_ChipSelect, ee_addr);
eeprom_delay(ee_addr);
}
/* Terminate the EEPROM access. */
- writel(0, ee_addr);
+ iowrite32(0, ee_addr);
return retval;
}
The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back 33Mhz PCI cycles. */
-#define mdio_delay(mdio_addr) readl(mdio_addr)
+#define mdio_delay(mdio_addr) ioread32(mdio_addr)
/* Set iff a MII transceiver on any interface requires mdio preamble.
This only set with older transceivers, so the extra
/* Generate the preamble required for initial synchronization and
a few older transceivers. */
-static void mdio_sync(long mdio_addr)
+static void mdio_sync(void __iomem *mdio_addr)
{
int bits = 32;
/* Establish sync by sending at least 32 logic ones. */
while (--bits >= 0) {
- writel(MDIO_WRITE1, mdio_addr);
+ iowrite32(MDIO_WRITE1, mdio_addr);
mdio_delay(mdio_addr);
- writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
}
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
- long mdio_addr = dev->base_addr + MIICtrl;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base_addr + MIICtrl;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int i, retval = 0;
for (i = 15; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
- writel(dataval, mdio_addr);
+ iowrite32(dataval, mdio_addr);
mdio_delay(mdio_addr);
- writel(dataval | MDIO_ShiftClk, mdio_addr);
+ iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 20; i > 0; i--) {
- writel(MDIO_EnbIn, mdio_addr);
+ iowrite32(MDIO_EnbIn, mdio_addr);
mdio_delay(mdio_addr);
- retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0);
- writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
+ iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
return (retval>>1) & 0xffff;
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
- struct netdev_private *np = dev->priv;
- long mdio_addr = dev->base_addr + MIICtrl;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base_addr + MIICtrl;
int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
int i;
for (i = 31; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
- writel(dataval, mdio_addr);
+ iowrite32(dataval, mdio_addr);
mdio_delay(mdio_addr);
- writel(dataval | MDIO_ShiftClk, mdio_addr);
+ iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
/* Clear out extra bits. */
for (i = 2; i > 0; i--) {
- writel(MDIO_EnbIn, mdio_addr);
+ iowrite32(MDIO_EnbIn, mdio_addr);
mdio_delay(mdio_addr);
- writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
return;
}
-\f
+
static int netdev_open(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
int i;
- writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
+ iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
netif_device_detach(dev);
- i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
+ i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
if (i)
goto out_err;
static int update_link(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int duplex, fasteth, result, mii_reg;
/* BSMR */
dev->name, np->phys[0]);
netif_carrier_on(dev);
}
-
+
if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
/* If the link partner doesn't support autonegotiation
* the MII detects it's abilities with the "parallel detection".
result |= 0x20000000;
if (result != np->csr6 && debug)
printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
- dev->name, fasteth ? 100 : 10,
+ dev->name, fasteth ? 100 : 10,
duplex ? "full" : "half", np->phys[0]);
return result;
}
#define RXTX_TIMEOUT 2000
static inline void update_csr6(struct net_device *dev, int new)
{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
int limit = RXTX_TIMEOUT;
if (!netif_device_present(dev))
if (new==np->csr6)
return;
/* stop both Tx and Rx processes */
- writel(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
+ iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
/* wait until they have really stopped */
for (;;) {
- int csr5 = readl(ioaddr + IntrStatus);
+ int csr5 = ioread32(ioaddr + IntrStatus);
int t;
t = (csr5 >> 17) & 0x07;
}
np->csr6 = new;
/* and restart them with the new configuration */
- writel(np->csr6, ioaddr + NetworkConfig);
+ iowrite32(np->csr6, ioaddr + NetworkConfig);
if (new & 0x200)
np->mii_if.full_duplex = 1;
}
static void netdev_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
if (debug > 2)
printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
"config %8.8x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus),
- (int)readl(ioaddr + NetworkConfig));
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
spin_lock_irq(&np->lock);
update_csr6(dev, update_link(dev));
spin_unlock_irq(&np->lock);
static void init_rxtx_rings(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int i;
np->rx_head_desc = &np->rx_ring[0];
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].length = np->rx_buf_sz;
np->rx_ring[i].status = 0;
- np->rx_skbuff[i] = 0;
+ np->rx_skbuff[i] = NULL;
}
/* Mark the last entry as wrapping the ring. */
np->rx_ring[i-1].length |= DescEndRing;
if (skb == NULL)
break;
skb->dev = dev; /* Mark as being used by this device. */
- np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
- skb->len,PCI_DMA_FROMDEVICE);
+ np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
+ np->rx_buf_sz,PCI_DMA_FROMDEVICE);
np->rx_ring[i].buffer1 = np->rx_addr[i];
- np->rx_ring[i].status = DescOwn;
+ np->rx_ring[i].status = DescOwned;
}
np->cur_rx = 0;
/* Initialize the Tx descriptors */
for (i = 0; i < TX_RING_SIZE; i++) {
- np->tx_skbuff[i] = 0;
+ np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = 0;
}
np->tx_full = 0;
np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
- writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
- writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
- dev->base_addr + TxRingPtr);
+ iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
+ iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
+ np->base_addr + TxRingPtr);
}
PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
- np->rx_skbuff[i] = 0;
+ np->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) {
PCI_DMA_TODEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
}
- np->tx_skbuff[i] = 0;
+ np->tx_skbuff[i] = NULL;
}
}
static void init_registers(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
int i;
for (i = 0; i < 6; i++)
- writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+ iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
/* Initialize other registers. */
#ifdef __BIG_ENDIAN
}
#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
i |= 0xE000;
-#elif defined(__sparc__)
+#elif defined(__sparc__) || defined (CONFIG_PARISC)
i |= 0x4800;
#else
#warning Processor architecture undefined
i |= 0x4800;
#endif
- writel(i, ioaddr + PCIBusCfg);
+ iowrite32(i, ioaddr + PCIBusCfg);
np->csr6 = 0;
- /* 128 byte Tx threshold;
+ /* 128 byte Tx threshold;
Transmit on; Receive on; */
update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
/* Clear and Enable interrupts by setting the interrupt mask. */
- writel(0x1A0F5, ioaddr + IntrStatus);
- writel(0x1A0F5, ioaddr + IntrEnable);
+ iowrite32(0x1A0F5, ioaddr + IntrStatus);
+ iowrite32(0x1A0F5, ioaddr + IntrEnable);
- writel(0, ioaddr + RxStartDemand);
+ iowrite32(0, ioaddr + RxStartDemand);
}
static void tx_timeout(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
- " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+ " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
{
int i;
}
printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
- printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",readl(ioaddr+0x4C));
+ printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
disable_irq(dev->irq);
spin_lock_irq(&np->lock);
* everything.
*/
- writel(1, dev->base_addr+PCIBusCfg);
+ iowrite32(1, np->base_addr+PCIBusCfg);
udelay(1);
free_rxtx_rings(np);
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static int alloc_ringdesc(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
unsigned entry;
/* Caution: the write order is important here, set the field
/* Now acquire the irq spinlock.
* The difficult race is the the ordering between
- * increasing np->cur_tx and setting DescOwn:
+ * increasing np->cur_tx and setting DescOwned:
* - if np->cur_tx is increased first the interrupt
* handler could consider the packet as transmitted
- * since DescOwn is cleared.
- * - If DescOwn is set first the NIC could report the
+ * since DescOwned is cleared.
+ * - If DescOwned is set first the NIC could report the
* packet as sent, but the interrupt handler would ignore it
* since the np->cur_tx was not yet increased.
*/
np->cur_tx++;
wmb(); /* flush length, buffer1, buffer2 */
- np->tx_ring[entry].status = DescOwn;
+ np->tx_ring[entry].status = DescOwned;
wmb(); /* flush status and kick the hardware */
- writel(0, dev->base_addr + TxStartDemand);
+ iowrite32(0, np->base_addr + TxStartDemand);
np->tx_q_bytes += skb->len;
/* Work around horrible bug in the chip by marking the queue as full
when we do not have FIFO room for a maximum sized packet. */
static void netdev_tx_done(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
int tx_status = np->tx_ring[entry].status;
PCI_DMA_TODEVICE);
np->tx_q_bytes -= np->tx_skbuff[entry]->len;
dev_kfree_skb_irq(np->tx_skbuff[entry]);
- np->tx_skbuff[entry] = 0;
+ np->tx_skbuff[entry] = NULL;
}
if (np->tx_full &&
np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
-static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+static irqreturn_t intr_handler(int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *)dev_instance;
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
int work_limit = max_interrupt_work;
int handled = 0;
if (!netif_device_present(dev))
return IRQ_NONE;
do {
- u32 intr_status = readl(ioaddr + IntrStatus);
+ u32 intr_status = ioread32(ioaddr + IntrStatus);
/* Acknowledge all of the current interrupt sources ASAP. */
- writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
+ iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
if (debug > 4)
printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
handled = 1;
- if (intr_status & (IntrRxDone | RxNoBuf))
+ if (intr_status & (RxIntr | RxNoBuf))
netdev_rx(dev);
if (intr_status & RxNoBuf)
- writel(0, ioaddr + RxStartDemand);
+ iowrite32(0, ioaddr + RxStartDemand);
- if (intr_status & (TxIdle | IntrTxDone) &&
+ if (intr_status & (TxNoBuf | TxIntr) &&
np->cur_tx != np->dirty_tx) {
spin_lock(&np->lock);
netdev_tx_done(dev);
}
/* Abnormal error summary/uncommon events handlers. */
- if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
- TimerInt | IntrTxStopped))
+ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SytemError |
+ TimerInt | TxDied))
netdev_error(dev, intr_status);
if (--work_limit < 0) {
10*82usec ticks. */
spin_lock(&np->lock);
if (netif_device_present(dev)) {
- writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
- writel(10, ioaddr + GPTimer);
+ iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
+ iowrite32(10, ioaddr + GPTimer);
}
spin_unlock(&np->lock);
break;
if (debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus));
+ dev->name, ioread32(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int entry = np->cur_rx % RX_RING_SIZE;
int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
PCI_DMA_FROMDEVICE);
- /* Call copy + cksum if available. */
-#if HAS_IP_COPYSUM
- eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
skb_put(skb, pkt_len);
-#else
- memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
- pkt_len);
-#endif
pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
np->rx_skbuff[entry]->len,
PCI_DMA_FROMDEVICE);
break; /* Better luck next round. */
skb->dev = dev; /* Mark as being used by this device. */
np->rx_addr[entry] = pci_map_single(np->pci_dev,
- skb->tail,
- skb->len, PCI_DMA_FROMDEVICE);
+ skb->data,
+ np->rx_buf_sz, PCI_DMA_FROMDEVICE);
np->rx_ring[entry].buffer1 = np->rx_addr[entry];
}
wmb();
- np->rx_ring[entry].status = DescOwn;
+ np->rx_ring[entry].status = DescOwned;
}
return 0;
static void netdev_error(struct net_device *dev, int intr_status)
{
- long ioaddr = dev->base_addr;
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
if (debug > 2)
printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
dev->name, new);
update_csr6(dev, new);
}
- if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
+ if (intr_status & RxDied) { /* Missed a Rx frame. */
np->stats.rx_errors++;
}
if (intr_status & TimerInt) {
/* Re-enable other interrupts. */
if (netif_device_present(dev))
- writel(0x1A0F5, ioaddr + IntrEnable);
+ iowrite32(0x1A0F5, ioaddr + IntrEnable);
}
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
- writel(0, ioaddr + RxStartDemand);
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+ iowrite32(0, ioaddr + RxStartDemand);
spin_unlock(&np->lock);
}
static struct net_device_stats *get_stats(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
/* The chip only need report frame silently dropped. */
spin_lock_irq(&np->lock);
if (netif_running(dev) && netif_device_present(dev))
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
spin_unlock_irq(&np->lock);
return &np->stats;
static u32 __set_rx_mode(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
u32 mc_filter[2]; /* Multicast hash filter */
u32 rx_mode;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
- /* Unconditionally log net taps. */
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
memset(mc_filter, 0xff, sizeof(mc_filter));
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
| AcceptMyPhys;
} else if ((dev->mc_count > multicast_filter_limit)
|| (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
struct dev_mc_list *mclist;
int i;
i++, mclist = mclist->next) {
int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
filterbit &= 0x3f;
- mc_filter[filterbit >> 5] |= cpu_to_le32(1 << (filterbit & 31));
+ mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
- rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+ rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
}
- writel(mc_filter[0], ioaddr + MulticastFilter0);
- writel(mc_filter[1], ioaddr + MulticastFilter1);
+ iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
+ iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
return rx_mode;
}
static void set_rx_mode(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
u32 rx_mode = __set_rx_mode(dev);
spin_lock_irq(&np->lock);
update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
strcpy (info->driver, DRV_NAME);
strcpy (info->version, DRV_VERSION);
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
static int netdev_nway_reset(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
return mii_nway_restart(&np->mii_if);
}
static u32 netdev_get_link(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
return mii_link_ok(&np->mii_if);
}
debug = value;
}
-static struct ethtool_ops netdev_ethtool_ops = {
+static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
- struct netdev_private *np = dev->priv;
+ struct mii_ioctl_data *data = if_mii(rq);
+ struct netdev_private *np = netdev_priv(dev);
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
- data->phy_id = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
+ data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
/* Fall Through */
case SIOCGMIIREG: /* Read MII PHY register. */
static int netdev_close(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
netif_stop_queue(dev);
if (debug > 1) {
printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
- "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
- (int)readl(ioaddr + NetworkConfig));
+ "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
}
spin_lock_irq(&np->lock);
netif_device_detach(dev);
update_csr6(dev, 0);
- writel(0x0000, ioaddr + IntrEnable);
+ iowrite32(0x0000, ioaddr + IntrEnable);
spin_unlock_irq(&np->lock);
free_irq(dev->irq, dev);
wmb();
netif_device_attach(dev);
- if (readl(ioaddr + NetworkConfig) != 0xffffffff)
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+ if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
#ifdef __i386__
if (debug > 2) {
static void __devexit w840_remove1 (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
-
+
if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
unregister_netdev(dev);
pci_release_regions(pdev);
-#ifndef USE_IO_OPS
- iounmap((char *)(dev->base_addr));
-#endif
+ pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}
* - get_stats:
* spin_lock_irq(np->lock), doesn't touch hw if not present
* - hard_start_xmit:
- * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
+ * synchronize_irq + netif_tx_disable;
* - tx_timeout:
- * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+ * netif_device_detach + netif_tx_disable;
* - set_multicast_list
- * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+ * netif_device_detach + netif_tx_disable;
* - interrupt handler
* doesn't touch hw if not present, synchronize_irq waits for
* running instances of the interrupt handler.
* Detach must occur under spin_unlock_irq(), interrupts from a detached
* device would cause an irq storm.
*/
-static int w840_suspend (struct pci_dev *pdev, u32 state)
+static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
rtnl_lock();
if (netif_running (dev)) {
spin_lock_irq(&np->lock);
netif_device_detach(dev);
update_csr6(dev, 0);
- writel(0, ioaddr + IntrEnable);
- netif_stop_queue(dev);
+ iowrite32(0, ioaddr + IntrEnable);
spin_unlock_irq(&np->lock);
- spin_unlock_wait(&dev->xmit_lock);
synchronize_irq(dev->irq);
-
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+ netif_tx_disable(dev);
+
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
/* no more hardware accesses behind this line. */
- if (np->csr6) BUG();
- if (readl(ioaddr + IntrEnable)) BUG();
+ BUG_ON(np->csr6);
+ if (ioread32(ioaddr + IntrEnable)) BUG();
/* pci_power_off(pdev, -1); */
static int w840_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
+ int retval = 0;
rtnl_lock();
if (netif_device_present(dev))
goto out; /* device not suspended */
if (netif_running(dev)) {
- pci_enable_device(pdev);
- /* pci_power_on(pdev); */
-
+ if ((retval = pci_enable_device(pdev))) {
+ printk (KERN_ERR
+ "%s: pci_enable_device failed in resume\n",
+ dev->name);
+ goto out;
+ }
spin_lock_irq(&np->lock);
- writel(1, dev->base_addr+PCIBusCfg);
- readl(dev->base_addr+PCIBusCfg);
+ iowrite32(1, np->base_addr+PCIBusCfg);
+ ioread32(np->base_addr+PCIBusCfg);
udelay(1);
netif_device_attach(dev);
init_rxtx_rings(dev);
}
out:
rtnl_unlock();
- return 0;
+ return retval;
}
#endif
static int __init w840_init(void)
{
-/* when a module, this is printed whether or not devices are found in probe */
-#ifdef MODULE
printk(version);
-#endif
- return pci_module_init(&w840_driver);
+ return pci_register_driver(&w840_driver);
}
static void __exit w840_exit(void)