#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
+#include <linux/bitops.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
-#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#ifdef CONFIG_VIA_RHINE_MMIO
#define USE_MMIO
#else
-#undef readb
-#undef readw
-#undef readl
-#undef writeb
-#undef writew
-#undef writel
-#define readb inb
-#define readw inw
-#define readl inl
-#define writeb outb
-#define writew outw
-#define writel outl
#endif
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
-MODULE_PARM(max_interrupt_work, "i");
-MODULE_PARM(debug, "i");
-MODULE_PARM(rx_copybreak, "i");
+module_param(max_interrupt_work, int, 0);
+module_param(debug, int, 0);
+module_param(rx_copybreak, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
*/
/* Beware of PCI posted writes */
-#define IOSYNC do { readb(dev->base_addr + StationAddr); } while (0)
+#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
static struct pci_device_id rhine_pci_tbl[] =
{
#ifdef USE_MMIO
/* Registers we check that mmio and reg are the same. */
-int mmio_verify_registers[] = {
+static const int mmio_verify_registers[] = {
RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
0
};
u8 tx_thresh, rx_thresh;
struct mii_if_info mii_if;
+ void __iomem *base;
};
static int mdio_read(struct net_device *dev, int phy_id, int location);
static inline u32 get_intr_status(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
u32 intr_status;
- intr_status = readw(ioaddr + IntrStatus);
+ intr_status = ioread16(ioaddr + IntrStatus);
/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
if (rp->quirks & rqStatusWBRace)
- intr_status |= readb(ioaddr + IntrStatus2) << 16;
+ intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
return intr_status;
}
*/
static void rhine_power_init(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
u16 wolstat;
if (rp->quirks & rqWOL) {
/* Make sure chip is in power state D0 */
- writeb(readb(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
+ iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
/* Disable "force PME-enable" */
- writeb(0x80, ioaddr + WOLcgClr);
+ iowrite8(0x80, ioaddr + WOLcgClr);
/* Clear power-event config bits (WOL) */
- writeb(0xFF, ioaddr + WOLcrClr);
+ iowrite8(0xFF, ioaddr + WOLcrClr);
/* More recent cards can manage two additional patterns */
if (rp->quirks & rq6patterns)
- writeb(0x03, ioaddr + WOLcrClr1);
+ iowrite8(0x03, ioaddr + WOLcrClr1);
/* Save power-event status bits */
- wolstat = readb(ioaddr + PwrcsrSet);
+ wolstat = ioread8(ioaddr + PwrcsrSet);
if (rp->quirks & rq6patterns)
- wolstat |= (readb(ioaddr + PwrcsrSet1) & 0x03) << 8;
+ wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
/* Clear power-event status bits */
- writeb(0xFF, ioaddr + PwrcsrClr);
+ iowrite8(0xFF, ioaddr + PwrcsrClr);
if (rp->quirks & rq6patterns)
- writeb(0x03, ioaddr + PwrcsrClr1);
+ iowrite8(0x03, ioaddr + PwrcsrClr1);
if (wolstat) {
char *reason;
static void rhine_chip_reset(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
- writeb(Cmd1Reset, ioaddr + ChipCmd1);
+ iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
IOSYNC;
- if (readb(ioaddr + ChipCmd1) & Cmd1Reset) {
+ if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
printk(KERN_INFO "%s: Reset not complete yet. "
"Trying harder.\n", DRV_NAME);
/* Force reset */
if (rp->quirks & rqForceReset)
- writeb(0x40, ioaddr + MiscCmd);
+ iowrite8(0x40, ioaddr + MiscCmd);
/* Reset can take somewhat longer (rare) */
- RHINE_WAIT_FOR(!(readb(ioaddr + ChipCmd1) & Cmd1Reset));
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
}
if (debug > 1)
printk(KERN_INFO "%s: Reset %s.\n", dev->name,
- (readb(ioaddr + ChipCmd1) & Cmd1Reset) ?
+ (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
"failed" : "succeeded");
}
#ifdef USE_MMIO
-static void __devinit enable_mmio(long pioaddr, u32 quirks)
+static void enable_mmio(long pioaddr, u32 quirks)
{
int n;
if (quirks & rqRhineI) {
*/
static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
{
- long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
outb(0x20, pioaddr + MACRegEEcsr);
RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
/* Turn off EEPROM-controlled wake-up (magic packet) */
if (rp->quirks & rqWOL)
- writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
+ iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
}
u32 quirks;
long pioaddr;
long memaddr;
- long ioaddr;
+ void __iomem *ioaddr;
int io_size, phy_id;
const char *name;
+#ifdef USE_MMIO
+ int bar = 1;
+#else
+ int bar = 0;
+#endif
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
if (rc)
goto err_out_free_netdev;
-#ifdef USE_MMIO
- enable_mmio(pioaddr, quirks);
-
- ioaddr = (long) ioremap(memaddr, io_size);
+ ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
rc = -EIO;
printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
goto err_out_free_res;
}
+#ifdef USE_MMIO
+ enable_mmio(pioaddr, quirks);
+
/* Check that selected MMIO registers match the PIO ones */
i = 0;
while (mmio_verify_registers[i]) {
goto err_out_unmap;
}
}
-#else
- ioaddr = pioaddr;
#endif /* USE_MMIO */
- dev->base_addr = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
+ rp->base = ioaddr;
/* Get chip registers into a sane state */
rhine_power_init(dev);
rhine_hw_init(dev, pioaddr);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+ dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
if (!is_valid_ether_addr(dev->dev_addr)) {
rc = -EIO;
/* For Rhine-I/II, phy_id is loaded from EEPROM */
if (!phy_id)
- phy_id = readb(ioaddr + 0x6C);
+ phy_id = ioread8(ioaddr + 0x6C);
dev->irq = pdev->irq;
#ifdef USE_MMIO
memaddr
#else
- ioaddr
+ (long)ioaddr
#endif
);
return 0;
err_out_unmap:
-#ifdef USE_MMIO
- iounmap((void *)ioaddr);
+ pci_iounmap(pdev, ioaddr);
err_out_free_res:
-#endif
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev(dev);
return 0;
}
-void free_ring(struct net_device* dev)
+static void free_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
static void rhine_check_media(struct net_device *dev, unsigned int init_media)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
mii_check_media(&rp->mii_if, debug, init_media);
if (rp->mii_if.full_duplex)
- writeb(readb(ioaddr + ChipCmd1) | Cmd1FDuplex,
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
ioaddr + ChipCmd1);
else
- writeb(readb(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
+ iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
ioaddr + ChipCmd1);
}
static void init_registers(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
int i;
for (i = 0; i < 6; i++)
- writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+ iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
/* Initialize other registers. */
- writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
+ iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
/* Configure initial FIFO thresholds. */
- writeb(0x20, ioaddr + TxConfig);
+ iowrite8(0x20, ioaddr + TxConfig);
rp->tx_thresh = 0x20;
rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
- writel(rp->rx_ring_dma, ioaddr + RxRingPtr);
- writel(rp->tx_ring_dma, ioaddr + TxRingPtr);
+ iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
+ iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
rhine_set_rx_mode(dev);
/* Enable interrupts by setting the interrupt mask. */
- writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+ iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
IntrTxDone | IntrTxError | IntrTxUnderrun |
IntrPCIErr | IntrStatsMax | IntrLinkChange,
ioaddr + IntrEnable);
- writew(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
+ iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
ioaddr + ChipCmd);
rhine_check_media(dev, 1);
}
/* Enable MII link status auto-polling (required for IntrLinkChange) */
-static void rhine_enable_linkmon(long ioaddr)
+static void rhine_enable_linkmon(void __iomem *ioaddr)
{
- writeb(0, ioaddr + MIICmd);
- writeb(MII_BMSR, ioaddr + MIIRegAddr);
- writeb(0x80, ioaddr + MIICmd);
+ iowrite8(0, ioaddr + MIICmd);
+ iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
+ iowrite8(0x80, ioaddr + MIICmd);
- RHINE_WAIT_FOR((readb(ioaddr + MIIRegAddr) & 0x20));
+ RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
- writeb(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
+ iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
}
/* Disable MII link status auto-polling (required for MDIO access) */
-static void rhine_disable_linkmon(long ioaddr, u32 quirks)
+static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
{
- writeb(0, ioaddr + MIICmd);
+ iowrite8(0, ioaddr + MIICmd);
if (quirks & rqRhineI) {
- writeb(0x01, ioaddr + MIIRegAddr); // MII_BMSR
+ iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
/* Can be called from ISR. Evil. */
mdelay(1);
/* 0x80 must be set immediately before turning it off */
- writeb(0x80, ioaddr + MIICmd);
+ iowrite8(0x80, ioaddr + MIICmd);
- RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x20);
+ RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
/* Heh. Now clear 0x80 again. */
- writeb(0, ioaddr + MIICmd);
+ iowrite8(0, ioaddr + MIICmd);
}
else
- RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x80);
+ RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
}
/* Read and write over the MII Management Data I/O (MDIO) interface. */
static int mdio_read(struct net_device *dev, int phy_id, int regnum)
{
- long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
int result;
rhine_disable_linkmon(ioaddr, rp->quirks);
/* rhine_disable_linkmon already cleared MIICmd */
- writeb(phy_id, ioaddr + MIIPhyAddr);
- writeb(regnum, ioaddr + MIIRegAddr);
- writeb(0x40, ioaddr + MIICmd); /* Trigger read */
- RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x40));
- result = readw(ioaddr + MIIData);
+ iowrite8(phy_id, ioaddr + MIIPhyAddr);
+ iowrite8(regnum, ioaddr + MIIRegAddr);
+ iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
+ result = ioread16(ioaddr + MIIData);
rhine_enable_linkmon(ioaddr);
return result;
static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
rhine_disable_linkmon(ioaddr, rp->quirks);
/* rhine_disable_linkmon already cleared MIICmd */
- writeb(phy_id, ioaddr + MIIPhyAddr);
- writeb(regnum, ioaddr + MIIRegAddr);
- writew(value, ioaddr + MIIData);
- writeb(0x20, ioaddr + MIICmd); /* Trigger write */
- RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x20));
+ iowrite8(phy_id, ioaddr + MIIPhyAddr);
+ iowrite8(regnum, ioaddr + MIIRegAddr);
+ iowrite16(value, ioaddr + MIIData);
+ iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
+ RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
rhine_enable_linkmon(ioaddr);
}
static int rhine_open(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
int rc;
rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
dev->name, rp->pdev->irq);
rc = alloc_ring(dev);
- if (rc)
+ if (rc) {
+ free_irq(rp->pdev->irq, dev);
return rc;
+ }
alloc_rbufs(dev);
alloc_tbufs(dev);
rhine_chip_reset(dev);
if (debug > 2)
printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
"MII status: %4.4x.\n",
- dev->name, readw(ioaddr + ChipCmd),
+ dev->name, ioread16(ioaddr + ChipCmd),
mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
netif_start_queue(dev);
static void rhine_tx_timeout(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
"%4.4x, resetting...\n",
- dev->name, readw(ioaddr + IntrStatus),
+ dev->name, ioread16(ioaddr + IntrStatus),
mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
/* protect against concurrent rx interrupts */
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
unsigned entry;
/* Caution: the write order is important here, set the field
rp->tx_skbuff[entry] = skb;
if ((rp->quirks & rqRhineI) &&
- (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
+ (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
/* Must use alignment buffer. */
if (skb->len > PKT_BUF_SZ) {
/* packet too long, drop it */
/* Non-x86 Todo: explicitly flush cache lines here. */
/* Wake the potentially-idle transmit channel */
- writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
IOSYNC;
static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
{
struct net_device *dev = dev_instance;
- long ioaddr;
+ struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
u32 intr_status;
int boguscnt = max_interrupt_work;
int handled = 0;
- ioaddr = dev->base_addr;
-
while ((intr_status = get_intr_status(dev))) {
handled = 1;
/* Acknowledge all of the current interrupt sources ASAP. */
if (intr_status & IntrTxDescRace)
- writeb(0x08, ioaddr + IntrStatus2);
- writew(intr_status & 0xffff, ioaddr + IntrStatus);
+ iowrite8(0x08, ioaddr + IntrStatus2);
+ iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
IOSYNC;
if (debug > 4)
if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
if (intr_status & IntrTxErrSummary) {
/* Avoid scavenging before Tx engine turned off */
- RHINE_WAIT_FOR(!(readb(ioaddr+ChipCmd) & CmdTxOn));
+ RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
if (debug > 2 &&
- readb(ioaddr+ChipCmd) & CmdTxOn)
+ ioread8(ioaddr+ChipCmd) & CmdTxOn)
printk(KERN_WARNING "%s: "
"rhine_interrupt() Tx engine"
"still on.\n", dev->name);
if (debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
- dev->name, readw(ioaddr + IntrStatus));
+ dev->name, ioread16(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
* these, for others the counters are set to 1 when written to and
* instead cleared when read. So we clear them both ways ...
*/
-static inline void clear_tally_counters(const long ioaddr)
+static inline void clear_tally_counters(void __iomem *ioaddr)
{
- writel(0, ioaddr + RxMissed);
- readw(ioaddr + RxCRCErrs);
- readw(ioaddr + RxMissed);
+ iowrite32(0, ioaddr + RxMissed);
+ ioread16(ioaddr + RxCRCErrs);
+ ioread16(ioaddr + RxMissed);
}
static void rhine_restart_tx(struct net_device *dev) {
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
int entry = rp->dirty_tx % TX_RING_SIZE;
u32 intr_status;
if ((intr_status & IntrTxErrSummary) == 0) {
/* We know better than the chip where it should continue. */
- writel(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
+ iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
ioaddr + TxRingPtr);
- writeb(readb(ioaddr + ChipCmd) | CmdTxOn,
+ iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
ioaddr + ChipCmd);
- writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,
+ iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
ioaddr + ChipCmd1);
IOSYNC;
}
static void rhine_error(struct net_device *dev, int intr_status)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
spin_lock(&rp->lock);
if (intr_status & IntrLinkChange)
rhine_check_media(dev, 0);
if (intr_status & IntrStatsMax) {
- rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
- rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+ rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
clear_tally_counters(ioaddr);
}
if (intr_status & IntrTxAborted) {
}
if (intr_status & IntrTxUnderrun) {
if (rp->tx_thresh < 0xE0)
- writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+ iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
if (debug > 1)
printk(KERN_INFO "%s: Transmitter underrun, Tx "
"threshold now %2.2x.\n",
(intr_status & (IntrTxAborted |
IntrTxUnderrun | IntrTxDescRace)) == 0) {
if (rp->tx_thresh < 0xE0) {
- writeb(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+ iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
}
if (debug > 1)
printk(KERN_INFO "%s: Unspecified error. Tx "
static struct net_device_stats *rhine_get_stats(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
unsigned long flags;
spin_lock_irqsave(&rp->lock, flags);
- rp->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
- rp->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+ rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
clear_tally_counters(ioaddr);
spin_unlock_irqrestore(&rp->lock, flags);
static void rhine_set_rx_mode(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = rp->base;
u32 mc_filter[2]; /* Multicast hash filter */
u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
dev->name);
rx_mode = 0x1C;
- writel(0xffffffff, ioaddr + MulticastFilter0);
- writel(0xffffffff, ioaddr + MulticastFilter1);
+ iowrite32(0xffffffff, ioaddr + MulticastFilter0);
+ iowrite32(0xffffffff, ioaddr + MulticastFilter1);
} else if ((dev->mc_count > multicast_filter_limit)
|| (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
- writel(0xffffffff, ioaddr + MulticastFilter0);
- writel(0xffffffff, ioaddr + MulticastFilter1);
+ iowrite32(0xffffffff, ioaddr + MulticastFilter0);
+ iowrite32(0xffffffff, ioaddr + MulticastFilter1);
rx_mode = 0x0C;
} else {
struct dev_mc_list *mclist;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
- writel(mc_filter[0], ioaddr + MulticastFilter0);
- writel(mc_filter[1], ioaddr + MulticastFilter1);
+ iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
+ iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
rx_mode = 0x0C;
}
- writeb(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
+ iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
}
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
static int rhine_close(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
spin_lock_irq(&rp->lock);
if (debug > 1)
printk(KERN_DEBUG "%s: Shutting down ethercard, "
"status was %4.4x.\n",
- dev->name, readw(ioaddr + ChipCmd));
+ dev->name, ioread16(ioaddr + ChipCmd));
/* Switch to loopback mode to avoid hardware races. */
- writeb(rp->tx_thresh | 0x02, ioaddr + TxConfig);
+ iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
/* Disable interrupts by clearing the interrupt mask. */
- writew(0x0000, ioaddr + IntrEnable);
+ iowrite16(0x0000, ioaddr + IntrEnable);
/* Stop the chip's Tx and Rx processes. */
- writew(CmdStop, ioaddr + ChipCmd);
+ iowrite16(CmdStop, ioaddr + ChipCmd);
spin_unlock_irq(&rp->lock);
static void __devexit rhine_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
unregister_netdev(dev);
+ pci_iounmap(pdev, rp->base);
pci_release_regions(pdev);
-#ifdef USE_MMIO
- iounmap((char *)(dev->base_addr));
-#endif
-
free_netdev(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
struct pci_dev *pdev = to_pci_dev(gendev);
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
+ void __iomem *ioaddr = rp->base;
- long ioaddr = dev->base_addr;
+ if (!(rp->quirks & rqWOL))
+ return; /* Nothing to do for non-WOL adapters */
rhine_power_init(dev);
/* Make sure we use pattern 0, 1 and not 4, 5 */
if (rp->quirks & rq6patterns)
- writeb(0x04, ioaddr + 0xA7);
-
- if (rp->wolopts & WAKE_MAGIC)
- writeb(WOLmagic, ioaddr + WOLcrSet);
+ iowrite8(0x04, ioaddr + 0xA7);
+
+ if (rp->wolopts & WAKE_MAGIC) {
+ iowrite8(WOLmagic, ioaddr + WOLcrSet);
+ /*
+ * Turn EEPROM-controlled wake-up back on -- some hardware may
+ * not cooperate otherwise.
+ */
+ iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
+ }
if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
- writeb(WOLbmcast, ioaddr + WOLcgSet);
+ iowrite8(WOLbmcast, ioaddr + WOLcgSet);
if (rp->wolopts & WAKE_PHY)
- writeb(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
+ iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
if (rp->wolopts & WAKE_UCAST)
- writeb(WOLucast, ioaddr + WOLcrSet);
+ iowrite8(WOLucast, ioaddr + WOLcrSet);
- /* Enable legacy WOL (for old motherboards) */
- writeb(0x01, ioaddr + PwcfgSet);
- writeb(readb(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
+ if (rp->wolopts) {
+ /* Enable legacy WOL (for old motherboards) */
+ iowrite8(0x01, ioaddr + PwcfgSet);
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
+ }
/* Hit power state D3 (sleep) */
- writeb(readb(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
/* TODO: Check use of pci_enable_wake() */
}
#ifdef CONFIG_PM
-static int rhine_suspend(struct pci_dev *pdev, u32 state)
+static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
return 0;
netif_device_detach(dev);
- pci_save_state(pdev, pdev->saved_config_space);
+ pci_save_state(pdev);
spin_lock_irqsave(&rp->lock, flags);
rhine_shutdown(&pdev->dev);
spin_unlock_irqrestore(&rp->lock, flags);
+ free_irq(dev->irq, dev);
return 0;
}
if (!netif_running(dev))
return 0;
- ret = pci_set_power_state(pdev, 0);
+ if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
+ printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
+
+ ret = pci_set_power_state(pdev, PCI_D0);
if (debug > 1)
printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
dev->name, ret ? "failed" : "succeeded", ret);
- pci_restore_state(pdev, pdev->saved_config_space);
+ pci_restore_state(pdev);
spin_lock_irqsave(&rp->lock, flags);
#ifdef USE_MMIO