#include <linux/mii.h>
#include <linux/rtnetlink.h>
#include <linux/crc32.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
MODULE_LICENSE("GPL");
-
-MODULE_PARM(max_interrupt_work, "i");
-MODULE_PARM(debug, "i");
-MODULE_PARM(rx_copybreak, "i");
-MODULE_PARM(multicast_filter_limit, "i");
-MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+MODULE_VERSION(DRV_VERSION);
+
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(multicast_filter_limit, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
accesses instead of memory space. */
-#ifdef USE_IO_OPS
-#undef readb
-#undef readw
-#undef readl
-#undef writeb
-#undef writew
-#undef writel
-#define readb inb
-#define readw inw
-#define readl inl
-#define writeb outb
-#define writew outw
-#define writel outl
-#endif
-
/* Offsets to the Command and Status Registers, "CSRs".
While similar to the Tulip, these registers are longword aligned.
Note: It's not useful to define symbolic names for every register bit in
unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
u32 mii;
struct mii_if_info mii_if;
+ void __iomem *base_addr;
};
-static int eeprom_read(long ioaddr, int location);
+static int eeprom_read(void __iomem *ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int netdev_open(struct net_device *dev);
int chip_idx = ent->driver_data;
int irq;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
- long ioaddr;
+ void __iomem *ioaddr;
+ int bar = 1;
i = pci_enable_device(pdev);
if (i) return i;
if (pci_request_regions(pdev, DRV_NAME))
goto err_out_netdev;
-
#ifdef USE_IO_OPS
- ioaddr = pci_resource_start(pdev, 0);
-#else
- ioaddr = pci_resource_start(pdev, 1);
- ioaddr = (long) ioremap (ioaddr, pci_id_tbl[chip_idx].io_size);
+ bar = 0;
+#endif
+ ioaddr = pci_iomap(pdev, bar, pci_id_tbl[chip_idx].io_size);
if (!ioaddr)
goto err_out_free_res;
-#endif
for (i = 0; i < 3; i++)
((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
/* Reset the chip to erase previous misconfiguration.
No hold time required! */
- writel(0x00000001, ioaddr + PCIBusCfg);
+ iowrite32(0x00000001, ioaddr + PCIBusCfg);
- dev->base_addr = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
dev->irq = irq;
- np = dev->priv;
+ np = netdev_priv(dev);
np->pci_dev = pdev;
np->chip_id = chip_idx;
np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
np->mii_if.dev = dev;
np->mii_if.mdio_read = mdio_read;
np->mii_if.mdio_write = mdio_write;
+ np->base_addr = ioaddr;
pci_set_drvdata(pdev, dev);
if (i)
goto err_out_cleardev;
- printk(KERN_INFO "%s: %s at 0x%lx, ",
+ printk(KERN_INFO "%s: %s at %p, ",
dev->name, pci_id_tbl[chip_idx].name, ioaddr);
for (i = 0; i < 5; i++)
printk("%2.2x:", dev->dev_addr[i]);
err_out_cleardev:
pci_set_drvdata(pdev, NULL);
-#ifndef USE_IO_OPS
- iounmap((void *)ioaddr);
+ pci_iounmap(pdev, ioaddr);
err_out_free_res:
-#endif
pci_release_regions(pdev);
err_out_netdev:
free_netdev (dev);
The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
depricated.
*/
-#define eeprom_delay(ee_addr) readl(ee_addr)
+#define eeprom_delay(ee_addr) ioread32(ee_addr)
enum EEPROM_Ctrl_Bits {
EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
};
-static int eeprom_read(long addr, int location)
+static int eeprom_read(void __iomem *addr, int location)
{
int i;
int retval = 0;
- long ee_addr = addr + EECtrl;
+ void __iomem *ee_addr = addr + EECtrl;
int read_cmd = location | EE_ReadCmd;
- writel(EE_ChipSelect, ee_addr);
+ iowrite32(EE_ChipSelect, ee_addr);
/* Shift the read command bits out. */
for (i = 10; i >= 0; i--) {
short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
- writel(dataval, ee_addr);
+ iowrite32(dataval, ee_addr);
eeprom_delay(ee_addr);
- writel(dataval | EE_ShiftClk, ee_addr);
+ iowrite32(dataval | EE_ShiftClk, ee_addr);
eeprom_delay(ee_addr);
}
- writel(EE_ChipSelect, ee_addr);
+ iowrite32(EE_ChipSelect, ee_addr);
eeprom_delay(ee_addr);
for (i = 16; i > 0; i--) {
- writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
+ iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
eeprom_delay(ee_addr);
- retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
- writel(EE_ChipSelect, ee_addr);
+ retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
+ iowrite32(EE_ChipSelect, ee_addr);
eeprom_delay(ee_addr);
}
/* Terminate the EEPROM access. */
- writel(0, ee_addr);
+ iowrite32(0, ee_addr);
return retval;
}
The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back 33Mhz PCI cycles. */
-#define mdio_delay(mdio_addr) readl(mdio_addr)
+#define mdio_delay(mdio_addr) ioread32(mdio_addr)
/* Set iff a MII transceiver on any interface requires mdio preamble.
This only set with older transceivers, so the extra
/* Generate the preamble required for initial synchronization and
a few older transceivers. */
-static void mdio_sync(long mdio_addr)
+static void mdio_sync(void __iomem *mdio_addr)
{
int bits = 32;
/* Establish sync by sending at least 32 logic ones. */
while (--bits >= 0) {
- writel(MDIO_WRITE1, mdio_addr);
+ iowrite32(MDIO_WRITE1, mdio_addr);
mdio_delay(mdio_addr);
- writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
+ iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
}
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
- long mdio_addr = dev->base_addr + MIICtrl;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base_addr + MIICtrl;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int i, retval = 0;
for (i = 15; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
- writel(dataval, mdio_addr);
+ iowrite32(dataval, mdio_addr);
mdio_delay(mdio_addr);
- writel(dataval | MDIO_ShiftClk, mdio_addr);
+ iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 20; i > 0; i--) {
- writel(MDIO_EnbIn, mdio_addr);
+ iowrite32(MDIO_EnbIn, mdio_addr);
mdio_delay(mdio_addr);
- retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0);
- writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
+ iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
return (retval>>1) & 0xffff;
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
- struct netdev_private *np = dev->priv;
- long mdio_addr = dev->base_addr + MIICtrl;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *mdio_addr = np->base_addr + MIICtrl;
int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
int i;
for (i = 31; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
- writel(dataval, mdio_addr);
+ iowrite32(dataval, mdio_addr);
mdio_delay(mdio_addr);
- writel(dataval | MDIO_ShiftClk, mdio_addr);
+ iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
/* Clear out extra bits. */
for (i = 2; i > 0; i--) {
- writel(MDIO_EnbIn, mdio_addr);
+ iowrite32(MDIO_EnbIn, mdio_addr);
mdio_delay(mdio_addr);
- writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
+ iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
mdio_delay(mdio_addr);
}
return;
\f
static int netdev_open(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
int i;
- writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
+ iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
netif_device_detach(dev);
i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
static int update_link(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int duplex, fasteth, result, mii_reg;
/* BSMR */
#define RXTX_TIMEOUT 2000
static inline void update_csr6(struct net_device *dev, int new)
{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
int limit = RXTX_TIMEOUT;
if (!netif_device_present(dev))
if (new==np->csr6)
return;
/* stop both Tx and Rx processes */
- writel(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
+ iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
/* wait until they have really stopped */
for (;;) {
- int csr5 = readl(ioaddr + IntrStatus);
+ int csr5 = ioread32(ioaddr + IntrStatus);
int t;
t = (csr5 >> 17) & 0x07;
}
np->csr6 = new;
/* and restart them with the new configuration */
- writel(np->csr6, ioaddr + NetworkConfig);
+ iowrite32(np->csr6, ioaddr + NetworkConfig);
if (new & 0x200)
np->mii_if.full_duplex = 1;
}
static void netdev_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
if (debug > 2)
printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
"config %8.8x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus),
- (int)readl(ioaddr + NetworkConfig));
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
spin_lock_irq(&np->lock);
update_csr6(dev, update_link(dev));
spin_unlock_irq(&np->lock);
static void init_rxtx_rings(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int i;
np->rx_head_desc = &np->rx_ring[0];
np->tx_full = 0;
np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
- writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
- writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
- dev->base_addr + TxRingPtr);
+ iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
+ iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
+ np->base_addr + TxRingPtr);
}
static void init_registers(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
int i;
for (i = 0; i < 6; i++)
- writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+ iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
/* Initialize other registers. */
#ifdef __BIG_ENDIAN
#warning Processor architecture undefined
i |= 0x4800;
#endif
- writel(i, ioaddr + PCIBusCfg);
+ iowrite32(i, ioaddr + PCIBusCfg);
np->csr6 = 0;
/* 128 byte Tx threshold;
update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
/* Clear and Enable interrupts by setting the interrupt mask. */
- writel(0x1A0F5, ioaddr + IntrStatus);
- writel(0x1A0F5, ioaddr + IntrEnable);
+ iowrite32(0x1A0F5, ioaddr + IntrStatus);
+ iowrite32(0x1A0F5, ioaddr + IntrEnable);
- writel(0, ioaddr + RxStartDemand);
+ iowrite32(0, ioaddr + RxStartDemand);
}
static void tx_timeout(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
- " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
+ " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
{
int i;
}
printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
- printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",readl(ioaddr+0x4C));
+ printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
disable_irq(dev->irq);
spin_lock_irq(&np->lock);
* everything.
*/
- writel(1, dev->base_addr+PCIBusCfg);
+ iowrite32(1, np->base_addr+PCIBusCfg);
udelay(1);
free_rxtx_rings(np);
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static int alloc_ringdesc(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
unsigned entry;
/* Caution: the write order is important here, set the field
wmb(); /* flush length, buffer1, buffer2 */
np->tx_ring[entry].status = DescOwn;
wmb(); /* flush status and kick the hardware */
- writel(0, dev->base_addr + TxStartDemand);
+ iowrite32(0, np->base_addr + TxStartDemand);
np->tx_q_bytes += skb->len;
/* Work around horrible bug in the chip by marking the queue as full
when we do not have FIFO room for a maximum sized packet. */
static void netdev_tx_done(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
int tx_status = np->tx_ring[entry].status;
static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
{
struct net_device *dev = (struct net_device *)dev_instance;
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
int work_limit = max_interrupt_work;
int handled = 0;
if (!netif_device_present(dev))
return IRQ_NONE;
do {
- u32 intr_status = readl(ioaddr + IntrStatus);
+ u32 intr_status = ioread32(ioaddr + IntrStatus);
/* Acknowledge all of the current interrupt sources ASAP. */
- writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
+ iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
if (debug > 4)
printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
if (intr_status & (IntrRxDone | RxNoBuf))
netdev_rx(dev);
if (intr_status & RxNoBuf)
- writel(0, ioaddr + RxStartDemand);
+ iowrite32(0, ioaddr + RxStartDemand);
if (intr_status & (TxIdle | IntrTxDone) &&
np->cur_tx != np->dirty_tx) {
10*82usec ticks. */
spin_lock(&np->lock);
if (netif_device_present(dev)) {
- writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
- writel(10, ioaddr + GPTimer);
+ iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
+ iowrite32(10, ioaddr + GPTimer);
}
spin_unlock(&np->lock);
break;
if (debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, (int)readl(ioaddr + IntrStatus));
+ dev->name, ioread32(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int entry = np->cur_rx % RX_RING_SIZE;
int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
static void netdev_error(struct net_device *dev, int intr_status)
{
- long ioaddr = dev->base_addr;
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
if (debug > 2)
printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
if (intr_status & TimerInt) {
/* Re-enable other interrupts. */
if (netif_device_present(dev))
- writel(0x1A0F5, ioaddr + IntrEnable);
+ iowrite32(0x1A0F5, ioaddr + IntrEnable);
}
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
- writel(0, ioaddr + RxStartDemand);
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
+ iowrite32(0, ioaddr + RxStartDemand);
spin_unlock(&np->lock);
}
static struct net_device_stats *get_stats(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
/* The chip only need report frame silently dropped. */
spin_lock_irq(&np->lock);
if (netif_running(dev) && netif_device_present(dev))
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
spin_unlock_irq(&np->lock);
return &np->stats;
static u32 __set_rx_mode(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
u32 mc_filter[2]; /* Multicast hash filter */
u32 rx_mode;
i++, mclist = mclist->next) {
int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
filterbit &= 0x3f;
- mc_filter[filterbit >> 5] |= cpu_to_le32(1 << (filterbit & 31));
+ mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
}
- writel(mc_filter[0], ioaddr + MulticastFilter0);
- writel(mc_filter[1], ioaddr + MulticastFilter1);
+ iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
+ iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
return rx_mode;
}
static void set_rx_mode(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
u32 rx_mode = __set_rx_mode(dev);
spin_lock_irq(&np->lock);
update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
strcpy (info->driver, DRV_NAME);
strcpy (info->version, DRV_VERSION);
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
int rc;
spin_lock_irq(&np->lock);
static int netdev_nway_reset(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
return mii_nway_restart(&np->mii_if);
}
static u32 netdev_get_link(struct net_device *dev)
{
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
return mii_link_ok(&np->mii_if);
}
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
- data->phy_id = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
+ data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
/* Fall Through */
case SIOCGMIIREG: /* Read MII PHY register. */
static int netdev_close(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
netif_stop_queue(dev);
if (debug > 1) {
printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
- "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
- (int)readl(ioaddr + NetworkConfig));
+ "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
}
spin_lock_irq(&np->lock);
netif_device_detach(dev);
update_csr6(dev, 0);
- writel(0x0000, ioaddr + IntrEnable);
+ iowrite32(0x0000, ioaddr + IntrEnable);
spin_unlock_irq(&np->lock);
free_irq(dev->irq, dev);
wmb();
netif_device_attach(dev);
- if (readl(ioaddr + NetworkConfig) != 0xffffffff)
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+ if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
#ifdef __i386__
if (debug > 2) {
struct net_device *dev = pci_get_drvdata(pdev);
if (dev) {
+ struct netdev_private *np = netdev_priv(dev);
unregister_netdev(dev);
pci_release_regions(pdev);
-#ifndef USE_IO_OPS
- iounmap((char *)(dev->base_addr));
-#endif
+ pci_iounmap(pdev, np->base_addr);
free_netdev(dev);
}
static int w840_suspend (struct pci_dev *pdev, u32 state)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdev_private *np = dev->priv;
- long ioaddr = dev->base_addr;
+ struct netdev_private *np = netdev_priv(dev);
+ void __iomem *ioaddr = np->base_addr;
rtnl_lock();
if (netif_running (dev)) {
spin_lock_irq(&np->lock);
netif_device_detach(dev);
update_csr6(dev, 0);
- writel(0, ioaddr + IntrEnable);
+ iowrite32(0, ioaddr + IntrEnable);
netif_stop_queue(dev);
spin_unlock_irq(&np->lock);
spin_unlock_wait(&dev->xmit_lock);
synchronize_irq(dev->irq);
- np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
+ np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
/* no more hardware accesses behind this line. */
if (np->csr6) BUG();
- if (readl(ioaddr + IntrEnable)) BUG();
+ if (ioread32(ioaddr + IntrEnable)) BUG();
/* pci_power_off(pdev, -1); */
static int w840_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdev_private *np = dev->priv;
+ struct netdev_private *np = netdev_priv(dev);
rtnl_lock();
if (netif_device_present(dev))
/* pci_power_on(pdev); */
spin_lock_irq(&np->lock);
- writel(1, dev->base_addr+PCIBusCfg);
- readl(dev->base_addr+PCIBusCfg);
+ iowrite32(1, np->base_addr+PCIBusCfg);
+ ioread32(np->base_addr+PCIBusCfg);
udelay(1);
netif_device_attach(dev);
init_rxtx_rings(dev);