/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
#define PKT_BUF_SZ 1536
-#if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
-#warning You must compile this file with the correct options!
-#warning See the last lines of the source file.
-#error You must compile this driver with "-O".
-#endif
-
#include <linux/config.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mii.h>
#include <linux/delay.h>
+#include <linux/bitops.h>
-#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
-/* enable PIO instead of MMIO, if CONFIG_EEPRO100_PIO is selected */
-#ifdef CONFIG_EEPRO100_PIO
-#define USE_IO 1
-#endif
-
+static int use_io;
static int debug = -1;
#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
NETIF_MSG_HW | \
MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
MODULE_LICENSE("GPL");
-MODULE_PARM(debug, "i");
-MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
-MODULE_PARM(congenb, "i");
-MODULE_PARM(txfifo, "i");
-MODULE_PARM(rxfifo, "i");
-MODULE_PARM(txdmacount, "i");
-MODULE_PARM(rxdmacount, "i");
-MODULE_PARM(rx_copybreak, "i");
-MODULE_PARM(max_interrupt_work, "i");
-MODULE_PARM(multicast_filter_limit, "i");
+module_param(use_io, int, 0);
+module_param(debug, int, 0);
+module_param_array(options, int, NULL, 0);
+module_param_array(full_duplex, int, NULL, 0);
+module_param(congenb, int, 0);
+module_param(txfifo, int, 0);
+module_param(rxfifo, int, 0);
+module_param(txdmacount, int, 0);
+module_param(rxdmacount, int, 0);
+module_param(rx_copybreak, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param(multicast_filter_limit, int, 0);
MODULE_PARM_DESC(debug, "debug level (0-6)");
MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
-MODULE_PARM_DESC(txdmaccount, "Tx DMA burst length; 128 - disable (0-128)");
-MODULE_PARM_DESC(rxdmaccount, "Rx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
#define RUN_AT(x) (jiffies + (x))
-/* ACPI power states don't universally work (yet) */
-#ifndef CONFIG_PM
-#undef pci_set_power_state
-#define pci_set_power_state null_set_power_state
-static inline int null_set_power_state(struct pci_dev *dev, int state)
-{
- return 0;
-}
-#endif /* CONFIG_PM */
-
#define netdevice_start(dev)
#define netdevice_stop(dev)
#define netif_set_tx_timeout(dev, tf, tm) \
*/
-static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
+static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
enum pci_flags_bit {
PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
};
-static inline unsigned int io_inw(unsigned long port)
-{
- return inw(port);
-}
-static inline void io_outw(unsigned int val, unsigned long port)
-{
- outw(val, port);
-}
-
-#ifndef USE_IO
-/* Currently alpha headers define in/out macros.
- Undefine them. 2000/03/30 SAW */
-#undef inb
-#undef inw
-#undef inl
-#undef outb
-#undef outw
-#undef outl
-#define inb readb
-#define inw readw
-#define inl readl
-#define outb writeb
-#define outw writew
-#define outl writel
-#endif
-
/* Offsets to the various registers.
All accesses need not be longword aligned. */
enum speedo_offsets {
Unfortunately, all the positions have been shifted since there.
A new re-alignment is required. 2000/03/06 SAW */
struct speedo_private {
+ void __iomem *regs;
struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
/* The addresses of a Tx/Rx-in-place packets/buffers. */
unsigned short partner; /* Link partner caps. */
struct mii_if_info mii_if; /* MII API hooks, info */
u32 msg_enable; /* debug message level */
-#ifdef CONFIG_PM
- u32 pm_state[16];
-#endif
};
/* The parameters for a CmdConfigure operation.
static int eepro100_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent);
-static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
+static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int speedo_open(struct net_device *dev);
static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void set_rx_mode(struct net_device *dev);
static void speedo_show_state(struct net_device *dev);
+static struct ethtool_ops ethtool_ops;
\f
/* How to wait for the command unit to accept a command.
Typically this takes 0 ticks. */
-static inline unsigned char wait_for_cmd_done(struct net_device *dev)
+static inline unsigned char wait_for_cmd_done(struct net_device *dev,
+ struct speedo_private *sp)
{
int wait = 1000;
- long cmd_ioaddr = dev->base_addr + SCBCmd;
+ void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
unsigned char r;
do {
udelay(1);
- r = inb(cmd_ioaddr);
+ r = ioread8(cmd_ioaddr);
} while(r && --wait >= 0);
if (wait < 0)
static int __devinit eepro100_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- unsigned long ioaddr;
- int irq;
+ void __iomem *ioaddr;
+ int irq, pci_bar;
int acpi_idle_state = 0, pm;
static int cards_found /* = 0 */;
+ unsigned long pci_base;
#ifndef MODULE
/* when built-in, we only print version if device is found */
}
irq = pdev->irq;
-#ifdef USE_IO
- ioaddr = pci_resource_start(pdev, 1);
+ pci_bar = use_io ? 1 : 0;
+ pci_base = pci_resource_start(pdev, pci_bar);
if (DEBUG & NETIF_MSG_PROBE)
- printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
- ioaddr, irq);
-#else
- ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
+ pci_base, irq);
+
+ ioaddr = pci_iomap(pdev, pci_bar, 0);
if (!ioaddr) {
- printk (KERN_ERR "eepro100: cannot remap MMIO region %lx @ %lx\n",
- pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
+ printk (KERN_ERR "eepro100: cannot remap IO\n");
goto err_out_free_mmio_region;
}
- if (DEBUG & NETIF_MSG_PROBE)
- printk("Found Intel i82557 PCI Speedo, MMIO at %#lx, IRQ %d.\n",
- pci_resource_start(pdev, 0), irq);
-#endif
-
if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
cards_found++;
return 0;
err_out_iounmap: ;
-#ifndef USE_IO
- iounmap ((void *)ioaddr);
-#endif
+ pci_iounmap(pdev, ioaddr);
err_out_free_mmio_region:
release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
err_out_free_pio_region:
#endif
static int __devinit speedo_found1(struct pci_dev *pdev,
- long ioaddr, int card_idx, int acpi_idle_state)
+ void __iomem *ioaddr, int card_idx, int acpi_idle_state)
{
struct net_device *dev;
struct speedo_private *sp;
The size test is for 6 bit vs. 8 bit address serial EEPROMs.
*/
{
- unsigned long iobase;
+ void __iomem *iobase;
int read_cmd, ee_size;
u16 sum;
int j;
/* Use IO only to avoid postponed writes and satisfy EEPROM timing
requirements. */
- iobase = pci_resource_start(pdev, 1);
+ iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!iobase)
+ goto err_free_unlock;
if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
== 0xffe0000) {
ee_size = 0x100;
/* Don't unregister_netdev(dev); as the EEPro may actually be
usable, especially if the MAC address is set later.
On the other hand, it may be unusable if MDI data is corrupted. */
+
+ pci_iounmap(pdev, iobase);
}
/* Reset the chip: stop Tx and Rx processes and clear counters.
This takes less than 10usec and will easily finish before the next
action. */
- outl(PortReset, ioaddr + SCBPort);
- inl(ioaddr + SCBPort);
+ iowrite32(PortReset, ioaddr + SCBPort);
+ ioread32(ioaddr + SCBPort);
udelay(10);
if (eeprom[3] & 0x0100)
for (i = 0; i < 5; i++)
printk("%2.2X:", dev->dev_addr[i]);
printk("%2.2X, ", dev->dev_addr[i]);
-#ifdef USE_IO
- printk("I/O at %#3lx, ", ioaddr);
-#endif
printk("IRQ %d.\n", pdev->irq);
- /* we must initialize base_addr early, for mdio_{read,write} */
- dev->base_addr = ioaddr;
+ sp = netdev_priv(dev);
+
+ /* we must initialize this early, for mdio_{read,write} */
+ sp->regs = ioaddr;
#if 1 || defined(kernel_bloat)
/* OK, this is pure kernel bloat. I don't like it when other drivers
self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
self_test_results[0] = 0;
self_test_results[1] = -1;
- outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
+ iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
do {
udelay(10);
} while (self_test_results[1] == -1 && --boguscnt >= 0);
}
#endif /* kernel_bloat */
- outl(PortReset, ioaddr + SCBPort);
- inl(ioaddr + SCBPort);
+ iowrite32(PortReset, ioaddr + SCBPort);
+ ioread32(ioaddr + SCBPort);
udelay(10);
/* Return the chip to its original power state. */
dev->irq = pdev->irq;
- sp = netdev_priv(dev);
sp->pdev = pdev;
sp->msg_enable = DEBUG;
sp->acpi_pwr = acpi_idle_state;
dev->get_stats = &speedo_get_stats;
dev->set_multicast_list = &set_rx_mode;
dev->do_ioctl = &speedo_ioctl;
+ SET_ETHTOOL_OPS(dev, ðtool_ops);
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = &poll_speedo;
#endif
return -1;
}
-static void do_slow_command(struct net_device *dev, int cmd)
+static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
{
- long cmd_ioaddr = dev->base_addr + SCBCmd;
+ void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
int wait = 0;
do
- if (inb(cmd_ioaddr) == 0) break;
+ if (ioread8(cmd_ioaddr) == 0) break;
while(++wait <= 200);
if (wait > 100)
printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
- inb(cmd_ioaddr), wait);
+ ioread8(cmd_ioaddr), wait);
- outb(cmd, cmd_ioaddr);
+ iowrite8(cmd, cmd_ioaddr);
for (wait = 0; wait <= 100; wait++)
- if (inb(cmd_ioaddr) == 0) return;
+ if (ioread8(cmd_ioaddr) == 0) return;
for (; wait <= 20000; wait++)
- if (inb(cmd_ioaddr) == 0) return;
+ if (ioread8(cmd_ioaddr) == 0) return;
else udelay(1);
printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
" Current status %8.8x.\n",
- cmd, wait, inl(dev->base_addr + SCBStatus));
+ cmd, wait, ioread32(sp->regs + SCBStatus));
}
/* Serial EEPROM section.
interval for serial EEPROM. However, it looks like that there is an
additional requirement dictating larger udelay's in the code below.
2000/05/24 SAW */
-static int __devinit do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
+static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
{
unsigned retval = 0;
- long ee_addr = ioaddr + SCBeeprom;
+ void __iomem *ee_addr = ioaddr + SCBeeprom;
- io_outw(EE_ENB, ee_addr); udelay(2);
- io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
+ iowrite16(EE_ENB, ee_addr); udelay(2);
+ iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
/* Shift the command bits out. */
do {
short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
- io_outw(dataval, ee_addr); udelay(2);
- io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
- retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ iowrite16(dataval, ee_addr); udelay(2);
+ iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
+ retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
} while (--cmd_len >= 0);
- io_outw(EE_ENB, ee_addr); udelay(2);
+ iowrite16(EE_ENB, ee_addr); udelay(2);
/* Terminate the EEPROM access. */
- io_outw(EE_ENB & ~EE_CS, ee_addr);
+ iowrite16(EE_ENB & ~EE_CS, ee_addr);
return retval;
}
static int mdio_read(struct net_device *dev, int phy_id, int location)
{
- long ioaddr = dev->base_addr;
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
- outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
+ iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
do {
- val = inl(ioaddr + SCBCtrlMDI);
+ val = ioread32(ioaddr + SCBCtrlMDI);
if (--boguscnt < 0) {
printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
break;
static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
{
- long ioaddr = dev->base_addr;
+ struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
- outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
+ iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
ioaddr + SCBCtrlMDI);
do {
- val = inl(ioaddr + SCBCtrlMDI);
+ val = ioread32(ioaddr + SCBCtrlMDI);
if (--boguscnt < 0) {
printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
break;
speedo_open(struct net_device *dev)
{
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
int retval;
if (netif_msg_ifup(sp))
printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
- pci_set_power_state(sp->pdev, 0);
+ pci_set_power_state(sp->pdev, PCI_D0);
/* Set up the Tx queue early.. */
sp->cur_tx = 0;
sp->dirty_tx = 0;
- sp->last_cmd = 0;
+ sp->last_cmd = NULL;
sp->tx_full = 0;
sp->in_interrupt = 0;
speedo_init_rx_ring(dev);
/* Fire up the hardware. */
- outw(SCBMaskAll, ioaddr + SCBCmd);
+ iowrite16(SCBMaskAll, ioaddr + SCBCmd);
speedo_resume(dev);
netdevice_start(dev);
if (netif_msg_ifup(sp)) {
printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
- dev->name, inw(ioaddr + SCBStatus));
+ dev->name, ioread16(ioaddr + SCBStatus));
}
/* Set the timer. The timer serves a dual purpose:
static void speedo_resume(struct net_device *dev)
{
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
/* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
sp->tx_threshold = 0x01208000;
/* Set the segment registers to '0'. */
- if (wait_for_cmd_done(dev) != 0) {
- outl(PortPartialReset, ioaddr + SCBPort);
+ if (wait_for_cmd_done(dev, sp) != 0) {
+ iowrite32(PortPartialReset, ioaddr + SCBPort);
udelay(10);
}
- outl(0, ioaddr + SCBPointer);
- inl(ioaddr + SCBPointer); /* Flush to PCI. */
+ iowrite32(0, ioaddr + SCBPointer);
+ ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
udelay(10); /* Bogus, but it avoids the bug. */
/* Note: these next two operations can take a while. */
- do_slow_command(dev, RxAddrLoad);
- do_slow_command(dev, CUCmdBase);
+ do_slow_command(dev, sp, RxAddrLoad);
+ do_slow_command(dev, sp, CUCmdBase);
/* Load the statistics block and rx ring addresses. */
- outl(sp->lstats_dma, ioaddr + SCBPointer);
- inl(ioaddr + SCBPointer); /* Flush to PCI */
+ iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
+ ioread32(ioaddr + SCBPointer); /* Flush to PCI */
- outb(CUStatsAddr, ioaddr + SCBCmd);
+ iowrite8(CUStatsAddr, ioaddr + SCBCmd);
sp->lstats->done_marker = 0;
- wait_for_cmd_done(dev);
+ wait_for_cmd_done(dev, sp);
if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
if (netif_msg_rx_err(sp))
printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
dev->name);
} else {
- outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+ iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
ioaddr + SCBPointer);
- inl(ioaddr + SCBPointer); /* Flush to PCI */
+ ioread32(ioaddr + SCBPointer); /* Flush to PCI */
}
/* Note: RxStart should complete instantly. */
- do_slow_command(dev, RxStart);
- do_slow_command(dev, CUDumpStats);
+ do_slow_command(dev, sp, RxStart);
+ do_slow_command(dev, sp, CUDumpStats);
/* Fill the first command with our physical address. */
{
}
/* Start the chip's Tx process and unmask interrupts. */
- outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
+ iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
ioaddr + SCBPointer);
/* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
remain masked --Dragan */
- outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
+ iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
}
/*
{
struct speedo_private *sp = netdev_priv(dev);
struct RxFD *rfd;
- long ioaddr;
+ void __iomem *ioaddr;
- ioaddr = dev->base_addr;
- if (wait_for_cmd_done(dev) != 0) {
+ ioaddr = sp->regs;
+ if (wait_for_cmd_done(dev, sp) != 0) {
printk("%s: previous command stalled\n", dev->name);
return;
}
/*
* Put the hardware into a known state.
*/
- outb(RxAbort, ioaddr + SCBCmd);
+ iowrite8(RxAbort, ioaddr + SCBCmd);
rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
rfd->rx_buf_addr = 0xffffffff;
- if (wait_for_cmd_done(dev) != 0) {
+ if (wait_for_cmd_done(dev, sp) != 0) {
printk("%s: RxAbort command stalled\n", dev->name);
return;
}
- outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+ iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
ioaddr + SCBPointer);
- outb(RxStart, ioaddr + SCBCmd);
+ iowrite8(RxStart, ioaddr + SCBCmd);
}
{
struct net_device *dev = (struct net_device *)data;
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
int phy_num = sp->phy[0] & 0x1f;
/* We have MII and lost link beat. */
mii_check_link(&sp->mii_if);
if (netif_msg_timer(sp)) {
printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
- dev->name, inw(ioaddr + SCBStatus));
+ dev->name, ioread16(ioaddr + SCBStatus));
}
if (sp->rx_mode < 0 ||
(sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
#if 0
{
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
int phy_num = sp->phy[0] & 0x1f;
for (i = 0; i < 16; i++) {
/* FIXME: what does it mean? --SAW */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
- /* XXX: do we really want to call this before the NULL check? --hch */
- rx_align(skb); /* Align IP on 16 byte boundary */
+ if (skb)
+ rx_align(skb); /* Align IP on 16 byte boundary */
sp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* OK. Just initially short of Rx bufs. */
skb->dev = dev; /* Mark as being used by this device. */
- rxf = (struct RxFD *)skb->tail;
+ rxf = (struct RxFD *)skb->data;
sp->rx_ringp[i] = rxf;
sp->rx_ring_dma[i] =
pci_map_single(sp->pdev, rxf,
le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sp->tx_skbuff[entry]);
- sp->tx_skbuff[entry] = 0;
+ sp->tx_skbuff[entry] = NULL;
}
sp->dirty_tx++;
}
static void speedo_tx_timeout(struct net_device *dev)
{
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
- int status = inw(ioaddr + SCBStatus);
+ void __iomem *ioaddr = sp->regs;
+ int status = ioread16(ioaddr + SCBStatus);
unsigned long flags;
if (netif_msg_tx_err(sp)) {
printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
" %4.4x at %d/%d command %8.8x.\n",
- dev->name, status, inw(ioaddr + SCBCmd),
+ dev->name, status, ioread16(ioaddr + SCBCmd),
sp->dirty_tx, sp->cur_tx,
sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
/* Only the command unit has stopped. */
printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
dev->name);
- outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
+ iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
ioaddr + SCBPointer);
- outw(CUStart, ioaddr + SCBCmd);
+ iowrite16(CUStart, ioaddr + SCBCmd);
reset_mii(dev);
} else {
#else
#endif
del_timer_sync(&sp->timer);
/* Reset the Tx and Rx units. */
- outl(PortReset, ioaddr + SCBPort);
+ iowrite32(PortReset, ioaddr + SCBPort);
/* We may get spurious interrupts here. But I don't think that they
may do much harm. 1999/12/09 SAW */
udelay(10);
/* Disable interrupts. */
- outw(SCBMaskAll, ioaddr + SCBCmd);
+ iowrite16(SCBMaskAll, ioaddr + SCBCmd);
synchronize_irq(dev->irq);
speedo_tx_buffer_gc(dev);
/* Free as much as possible.
speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
int entry;
/* Prevent interrupts from changing the Tx ring from underneath us. */
/* workaround for hardware bug on 10 mbit half duplex */
if ((sp->partner == 0) && (sp->chip_id == 1)) {
- wait_for_cmd_done(dev);
- outb(0 , ioaddr + SCBCmd);
+ wait_for_cmd_done(dev, sp);
+ iowrite8(0 , ioaddr + SCBCmd);
udelay(1);
}
/* Trigger the command unit resume. */
- wait_for_cmd_done(dev);
+ wait_for_cmd_done(dev, sp);
clear_suspend(sp->last_cmd);
/* We want the time window between clearing suspend flag on the previous
command and resuming CU to be as small as possible.
Interrupts in between are very undesired. --SAW */
- outb(CUResume, ioaddr + SCBCmd);
+ iowrite8(CUResume, ioaddr + SCBCmd);
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
/* Leave room for set_rx_mode(). If there is no more space than reserved
le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(sp->tx_skbuff[entry]);
- sp->tx_skbuff[entry] = 0;
+ sp->tx_skbuff[entry] = NULL;
}
dirty_tx++;
}
{
struct net_device *dev = (struct net_device *)dev_instance;
struct speedo_private *sp;
- long ioaddr, boguscnt = max_interrupt_work;
+ void __iomem *ioaddr;
+ long boguscnt = max_interrupt_work;
unsigned short status;
unsigned int handled = 0;
- ioaddr = dev->base_addr;
sp = netdev_priv(dev);
+ ioaddr = sp->regs;
#ifndef final_version
/* A lock to prevent simultaneous entry on SMP machines. */
#endif
do {
- status = inw(ioaddr + SCBStatus);
+ status = ioread16(ioaddr + SCBStatus);
/* Acknowledge all of the current interrupt sources ASAP. */
/* Will change from 0xfc00 to 0xff00 when we start handling
FCP and ER interrupts --Dragan */
- outw(status & 0xfc00, ioaddr + SCBStatus);
+ iowrite16(status & 0xfc00, ioaddr + SCBStatus);
if (netif_msg_intr(sp))
printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
/* Clear all interrupt sources. */
/* Will change from 0xfc00 to 0xff00 when we start handling
FCP and ER interrupts --Dragan */
- outw(0xfc00, ioaddr + SCBStatus);
+ iowrite16(0xfc00, ioaddr + SCBStatus);
break;
}
} while (1);
if (netif_msg_intr(sp))
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, inw(ioaddr + SCBStatus));
+ dev->name, ioread16(ioaddr + SCBStatus));
clear_bit(0, (void*)&sp->in_interrupt);
return IRQ_RETVAL(handled);
struct sk_buff *skb;
/* Get a fresh skbuff to replace the consumed one. */
skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
- /* XXX: do we really want to call this before the NULL check? --hch */
- rx_align(skb); /* Align IP on 16 byte boundary */
+ if (skb)
+ rx_align(skb); /* Align IP on 16 byte boundary */
sp->rx_skbuff[entry] = skb;
if (skb == NULL) {
sp->rx_ringp[entry] = NULL;
return NULL;
}
- rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+ rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
sp->rx_ring_dma[entry] =
pci_map_single(sp->pdev, rxf,
PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
#if 1 || USE_IP_CSUM
/* Packet is in one chunk -- we can copy + cksum. */
- eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
+ eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
skb_put(skb, pkt_len);
#else
- memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
+ memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
pkt_len);
#endif
pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
static int
speedo_close(struct net_device *dev)
{
- long ioaddr = dev->base_addr;
struct speedo_private *sp = netdev_priv(dev);
+ void __iomem *ioaddr = sp->regs;
int i;
netdevice_stop(dev);
if (netif_msg_ifdown(sp))
printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
- dev->name, inw(ioaddr + SCBStatus));
+ dev->name, ioread16(ioaddr + SCBStatus));
/* Shut off the media monitoring timer. */
del_timer_sync(&sp->timer);
- outw(SCBMaskAll, ioaddr + SCBCmd);
+ iowrite16(SCBMaskAll, ioaddr + SCBCmd);
/* Shutting down the chip nicely fails to disable flow control. So.. */
- outl(PortPartialReset, ioaddr + SCBPort);
- inl(ioaddr + SCBPort); /* flush posted write */
+ iowrite32(PortPartialReset, ioaddr + SCBPort);
+ ioread32(ioaddr + SCBPort); /* flush posted write */
/*
* The chip requires a 10 microsecond quiet period. Wait here!
*/
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = sp->rx_skbuff[i];
- sp->rx_skbuff[i] = 0;
+ sp->rx_skbuff[i] = NULL;
/* Clear the Rx descriptors. */
if (skb) {
pci_unmap_single(sp->pdev,
for (i = 0; i < TX_RING_SIZE; i++) {
struct sk_buff *skb = sp->tx_skbuff[i];
- sp->tx_skbuff[i] = 0;
+ sp->tx_skbuff[i] = NULL;
/* Clear the Tx descriptors. */
if (skb) {
pci_unmap_single(sp->pdev,
if (netif_msg_ifdown(sp))
printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
- pci_set_power_state(sp->pdev, 2);
+ pci_set_power_state(sp->pdev, PCI_D2);
return 0;
}
speedo_get_stats(struct net_device *dev)
{
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
/* Update only if the previous dump finished. */
if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
/* Take a spinlock to make wait_for_cmd_done and sending the
command atomic. --SAW */
spin_lock_irqsave(&sp->lock, flags);
- wait_for_cmd_done(dev);
- outb(CUDumpStats, ioaddr + SCBCmd);
+ wait_for_cmd_done(dev, sp);
+ iowrite8(CUDumpStats, ioaddr + SCBCmd);
spin_unlock_irqrestore(&sp->lock, flags);
}
}
return &sp->stats;
}
-static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
+static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- u32 ethcmd;
struct speedo_private *sp = netdev_priv(dev);
-
- if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
- return -EFAULT;
-
- switch (ethcmd) {
- /* get driver-specific version/etc. info */
- case ETHTOOL_GDRVINFO: {
- struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
- strncpy(info.driver, "eepro100", sizeof(info.driver)-1);
- strncpy(info.version, version, sizeof(info.version)-1);
- if (sp && sp->pdev)
- strcpy(info.bus_info, pci_name(sp->pdev));
- if (copy_to_user(useraddr, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
-
- /* get settings */
- case ETHTOOL_GSET: {
- struct ethtool_cmd ecmd = { ETHTOOL_GSET };
- spin_lock_irq(&sp->lock);
- mii_ethtool_gset(&sp->mii_if, &ecmd);
- spin_unlock_irq(&sp->lock);
- if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
- return -EFAULT;
- return 0;
- }
- /* set settings */
- case ETHTOOL_SSET: {
- int r;
- struct ethtool_cmd ecmd;
- if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
- return -EFAULT;
- spin_lock_irq(&sp->lock);
- r = mii_ethtool_sset(&sp->mii_if, &ecmd);
- spin_unlock_irq(&sp->lock);
- return r;
- }
- /* restart autonegotiation */
- case ETHTOOL_NWAY_RST: {
- return mii_nway_restart(&sp->mii_if);
- }
- /* get link status */
- case ETHTOOL_GLINK: {
- struct ethtool_value edata = {ETHTOOL_GLINK};
- edata.data = mii_link_ok(&sp->mii_if);
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
- }
- /* get message-level */
- case ETHTOOL_GMSGLVL: {
- struct ethtool_value edata = {ETHTOOL_GMSGLVL};
- edata.data = sp->msg_enable;
- if (copy_to_user(useraddr, &edata, sizeof(edata)))
- return -EFAULT;
- return 0;
- }
- /* set message-level */
- case ETHTOOL_SMSGLVL: {
- struct ethtool_value edata;
- if (copy_from_user(&edata, useraddr, sizeof(edata)))
- return -EFAULT;
- sp->msg_enable = edata.data;
- return 0;
- }
+ strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
+ strncpy(info->version, version, sizeof(info->version)-1);
+ if (sp->pdev)
+ strcpy(info->bus_info, pci_name(sp->pdev));
+}
- }
-
- return -EOPNOTSUPP;
+static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ spin_lock_irq(&sp->lock);
+ mii_ethtool_gset(&sp->mii_if, ecmd);
+ spin_unlock_irq(&sp->lock);
+ return 0;
+}
+
+static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&sp->lock);
+ res = mii_ethtool_sset(&sp->mii_if, ecmd);
+ spin_unlock_irq(&sp->lock);
+ return res;
+}
+
+static int speedo_nway_reset(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ return mii_nway_restart(&sp->mii_if);
+}
+
+static u32 speedo_get_link(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ return mii_link_ok(&sp->mii_if);
}
+static u32 speedo_get_msglevel(struct net_device *dev)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ return sp->msg_enable;
+}
+
+static void speedo_set_msglevel(struct net_device *dev, u32 v)
+{
+ struct speedo_private *sp = netdev_priv(dev);
+ sp->msg_enable = v;
+}
+
+static struct ethtool_ops ethtool_ops = {
+ .get_drvinfo = speedo_get_drvinfo,
+ .get_settings = speedo_get_settings,
+ .set_settings = speedo_set_settings,
+ .nway_reset = speedo_nway_reset,
+ .get_link = speedo_get_link,
+ .get_msglevel = speedo_get_msglevel,
+ .set_msglevel = speedo_set_msglevel,
+};
+
static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct speedo_private *sp = netdev_priv(dev);
- struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
+ struct mii_ioctl_data *data = if_mii(rq);
int phy = sp->phy[0] & 0x1f;
int saved_acpi;
int t;
access from the timeout handler.
They are currently serialized only with MDIO access from the
timer routine. 2000/05/09 SAW */
- saved_acpi = pci_set_power_state(sp->pdev, 0);
+ saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
t = del_timer_sync(&sp->timer);
data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
if (t)
case SIOCSMIIREG: /* Write MII PHY register. */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- saved_acpi = pci_set_power_state(sp->pdev, 0);
+ saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
t = del_timer_sync(&sp->timer);
mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
if (t)
add_timer(&sp->timer); /* may be set to the past --SAW */
pci_set_power_state(sp->pdev, saved_acpi);
return 0;
- case SIOCETHTOOL:
- return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
default:
return -EOPNOTSUPP;
}
static void set_rx_mode(struct net_device *dev)
{
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
struct descriptor *last_cmd;
char new_rx_mode;
unsigned long flags;
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- sp->tx_skbuff[entry] = 0; /* Redundant. */
+ sp->tx_skbuff[entry] = NULL; /* Redundant. */
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
sp->tx_ring[entry].link =
cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
config_cmd_data[8] = 0;
}
/* Trigger the command unit resume. */
- wait_for_cmd_done(dev);
+ wait_for_cmd_done(dev, sp);
clear_suspend(last_cmd);
- outb(CUResume, ioaddr + SCBCmd);
+ iowrite8(CUResume, ioaddr + SCBCmd);
if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
netif_stop_queue(dev);
sp->tx_full = 1;
last_cmd = sp->last_cmd;
sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
- sp->tx_skbuff[entry] = 0;
+ sp->tx_skbuff[entry] = NULL;
sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
sp->tx_ring[entry].link =
cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
*setup_params++ = *eaddrs++;
}
- wait_for_cmd_done(dev);
+ wait_for_cmd_done(dev, sp);
clear_suspend(last_cmd);
/* Immediately trigger the command unit resume. */
- outb(CUResume, ioaddr + SCBCmd);
+ iowrite8(CUResume, ioaddr + SCBCmd);
if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
netif_stop_queue(dev);
sp->last_cmd = mc_setup_frm;
/* Change the command to a NoOp, pointing to the CmdMulti command. */
- sp->tx_skbuff[entry] = 0;
+ sp->tx_skbuff[entry] = NULL;
sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
mc_blk->len, PCI_DMA_TODEVICE);
- wait_for_cmd_done(dev);
+ wait_for_cmd_done(dev, sp);
clear_suspend(last_cmd);
/* Immediately trigger the command unit resume. */
- outb(CUResume, ioaddr + SCBCmd);
+ iowrite8(CUResume, ioaddr + SCBCmd);
if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
netif_stop_queue(dev);
}
\f
#ifdef CONFIG_PM
-static int eepro100_suspend(struct pci_dev *pdev, u32 state)
+static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
- pci_save_state(pdev, sp->pm_state);
+ pci_save_state(pdev);
if (!netif_running(dev))
return 0;
del_timer_sync(&sp->timer);
netif_device_detach(dev);
- outl(PortPartialReset, ioaddr + SCBPort);
+ iowrite32(PortPartialReset, ioaddr + SCBPort);
/* XXX call pci_set_power_state ()? */
+ pci_disable_device(pdev);
+ pci_set_power_state (pdev, PCI_D3hot);
return 0;
}
{
struct net_device *dev = pci_get_drvdata (pdev);
struct speedo_private *sp = netdev_priv(dev);
- long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = sp->regs;
- pci_restore_state(pdev, sp->pm_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_device(pdev);
+ pci_set_master(pdev);
if (!netif_running(dev))
return 0;
reinitialization;
- serialization with other driver calls.
2000/03/08 SAW */
- outw(SCBMaskAll, ioaddr + SCBCmd);
+ iowrite16(SCBMaskAll, ioaddr + SCBCmd);
speedo_resume(dev);
netif_device_attach(dev);
sp->rx_mode = -1;
release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
-#ifndef USE_IO
- iounmap((char *)dev->base_addr);
-#endif
-
+ pci_iounmap(pdev, sp->regs);
pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
+ sizeof(struct speedo_stats),
sp->tx_ring, sp->tx_ring_dma);
}
\f
static struct pci_device_id eepro100_pci_tbl[] = {
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
- PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
- PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
- PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
+ { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
- { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
#endif /* CONFIG_PM */
};
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
-static int pci_module_init(struct pci_driver *pdev)
-{
- int rc;
-
- rc = pci_register_driver(pdev);
- if (rc <= 0) {
- printk(KERN_INFO "%s: No cards found, driver not installed.\n",
- pdev->name);
- pci_unregister_driver(pdev);
- return -ENODEV;
- }
- return 0;
-}
-#endif
-
static int __init eepro100_init_module(void)
{
#ifdef MODULE