* smc_phy_configure
* - clean up (and fix stack overrun) in PHY
* MII read/write functions
- * 09/15/04 Hayato Fujiwara - Add m32r support.
- * - Modify for SMP kernel; Change spin-locked
- * regions.
+ * 22/09/04 Nicolas Pitre big update (see commit log for details)
*/
static const char version[] =
- "smc91x.c: v1.0, mar 07 2003 by Nicolas Pitre <nico@cam.org>\n";
+ "smc91x.c: v1.1, sep 22 2004 by Nicolas Pitre <nico@cam.org>\n";
/* Debugging level */
#ifndef SMC_DEBUG
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/delay.h>
-#include <linux/timer.h>
+#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/crc32.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
+#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include "smc91x.h"
/*
* Transmit timeout, default 5 seconds.
*/
-static int watchdog = 5000;
+static int watchdog = 1000;
module_param(watchdog, int, 0400);
MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
*/
#define MEMORY_WAIT_TIME 16
+/*
+ * The maximum number of processing loops allowed for each call to the
+ * IRQ handler.
+ */
+#define MAX_IRQ_LOOPS 8
+
/*
* This selects whether TX packets are sent one by one to the SMC91x internal
* memory and throttled until transmission completes. This may prevent
* packet, I will store the skbuff here, until I get the
* desired memory. Then, I'll send it out and free it.
*/
- struct sk_buff *saved_skb;
+ struct sk_buff *pending_tx_skb;
+ struct tasklet_struct tx_task;
/*
* these are things that the kernel wants me to keep, so users
u32 msg_enable;
u32 phy_type;
struct mii_if_info mii;
+
+ /* work queue */
+ struct work_struct phy_configure;
+ int work_pending;
+
spinlock_t lock;
#ifdef SMC_USE_PXA_DMA
/* DMA needs the physical address of the chip */
u_long physaddr;
#endif
+ void __iomem *base;
+ void __iomem *datacs;
};
#if SMC_DEBUG > 0
#define DBG(n, args...) \
do { \
if (SMC_DEBUG >= (n)) \
- printk(KERN_DEBUG args); \
+ printk(args); \
} while (0)
#define PRINTK(args...) printk(args)
/* this enables an interrupt in the interrupt mask register */
#define SMC_ENABLE_INT(x) do { \
unsigned char mask; \
+ spin_lock_irq(&lp->lock); \
mask = SMC_GET_INT_MASK(); \
mask |= (x); \
SMC_SET_INT_MASK(mask); \
+ spin_unlock_irq(&lp->lock); \
} while (0)
/* this disables an interrupt from the interrupt mask register */
#define SMC_DISABLE_INT(x) do { \
unsigned char mask; \
+ spin_lock_irq(&lp->lock); \
mask = SMC_GET_INT_MASK(); \
mask &= ~(x); \
SMC_SET_INT_MASK(mask); \
+ spin_unlock_irq(&lp->lock); \
} while (0)
/*
*/
static void smc_reset(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
unsigned int ctl, cfg;
+ struct sk_buff *pending_skb;
DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+ /* Disable all interrupts, block TX tasklet */
+ spin_lock(&lp->lock);
+ SMC_SELECT_BANK(2);
+ SMC_SET_INT_MASK(0);
+ pending_skb = lp->pending_tx_skb;
+ lp->pending_tx_skb = NULL;
+ spin_unlock(&lp->lock);
+
+ /* free any pending tx skb */
+ if (pending_skb) {
+ dev_kfree_skb(pending_skb);
+ lp->stats.tx_errors++;
+ lp->stats.tx_aborted_errors++;
+ }
+
/*
* This resets the registers mostly to defaults, but doesn't
* affect EEPROM. That seems unnecessary
* transmitted packets, to make the best use out of our limited
* memory
*/
-#if ! THROTTLE_TX_PKTS
- ctl |= CTL_AUTO_RELEASE;
-#else
- ctl &= ~CTL_AUTO_RELEASE;
-#endif
+ if(!THROTTLE_TX_PKTS)
+ ctl |= CTL_AUTO_RELEASE;
+ else
+ ctl &= ~CTL_AUTO_RELEASE;
SMC_SET_CTL(ctl);
- /* Disable all interrupts */
- SMC_SELECT_BANK(2);
- SMC_SET_INT_MASK(0);
-
/* Reset the MMU */
+ SMC_SELECT_BANK(2);
SMC_SET_MMU_CMD(MC_RESET);
SMC_WAIT_MMU_BUSY();
}
*/
static void smc_enable(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
int mask;
DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
SMC_SET_TCR(lp->tcr_cur_mode);
SMC_SET_RCR(lp->rcr_cur_mode);
+ SMC_SELECT_BANK(1);
+ SMC_SET_MAC_ADDR(dev->dev_addr);
+
/* now, enable interrupts */
mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT;
if (lp->version >= (CHIP_91100 << 4))
mask |= IM_MDINT;
SMC_SELECT_BANK(2);
SMC_SET_INT_MASK(mask);
+
+ /*
+ * From this point the register bank must _NOT_ be switched away
+ * to something else than bank 2 without proper locking against
+ * races with any tasklet or interrupt handlers until smc_shutdown()
+ * or smc_reset() is called.
+ */
}
/*
* this puts the device in an inactive state
*/
-static void smc_shutdown(unsigned long ioaddr)
+static void smc_shutdown(struct net_device *dev)
{
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ struct sk_buff *pending_skb;
+
DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
/* no more interrupts for me */
+ spin_lock(&lp->lock);
SMC_SELECT_BANK(2);
SMC_SET_INT_MASK(0);
+ pending_skb = lp->pending_tx_skb;
+ lp->pending_tx_skb = NULL;
+ spin_unlock(&lp->lock);
+ if (pending_skb)
+ dev_kfree_skb(pending_skb);
/* and tell the card to stay away from that nasty outside world */
SMC_SELECT_BANK(0);
static inline void smc_rcv(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = lp->base;
unsigned int packet_number, status, packet_len;
DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
dev->name, packet_number, status,
packet_len, packet_len);
- if (unlikely(status & RS_ERRORS)) {
+ back:
+ if (unlikely(packet_len < 6 || status & RS_ERRORS)) {
+ if (status & RS_TOOLONG && packet_len <= (1514 + 4 + 6)) {
+ /* accept VLAN packets */
+ status &= ~RS_TOOLONG;
+ goto back;
+ }
+ if (packet_len < 6) {
+ /* bloody hardware */
+ printk(KERN_ERR "%s: fubar (rxlen %u status %x\n",
+ dev->name, packet_len, status);
+ status |= RS_TOOSHORT;
+ }
+ SMC_WAIT_MMU_BUSY();
+ SMC_SET_MMU_CMD(MC_RELEASE);
lp->stats.rx_errors++;
if (status & RS_ALGNERR)
lp->stats.rx_frame_errors++;
lp->stats.multicast++;
/*
- * Actual payload is packet_len - 4 (or 3 if odd byte).
+ * Actual payload is packet_len - 6 (or 5 if odd byte).
* We want skb_reserve(2) and the final ctrl word
* (2 bytes, possibly containing the payload odd byte).
- * Ence packet_len - 4 + 2 + 2.
+ * Furthermore, we add 2 bytes to allow rounding up to
+ * multiple of 4 bytes on 32 bit buses.
+ * Hence packet_len - 6 + 2 + 2 + 2.
*/
skb = dev_alloc_skb(packet_len);
if (unlikely(skb == NULL)) {
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
dev->name);
+ SMC_WAIT_MMU_BUSY();
+ SMC_SET_MMU_CMD(MC_RELEASE);
lp->stats.rx_dropped++;
- goto done;
+ return;
}
/* Align IP header to 32 bits */
status |= RS_ODDFRAME;
/*
- * If odd length: packet_len - 3,
- * otherwise packet_len - 4.
+ * If odd length: packet_len - 5,
+ * otherwise packet_len - 6.
+ * With the trailing ctrl byte it's packet_len - 4.
*/
- data_len = packet_len - ((status & RS_ODDFRAME) ? 3 : 4);
+ data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6);
data = skb_put(skb, data_len);
- SMC_PULL_DATA(data, packet_len - 2);
+ SMC_PULL_DATA(data, packet_len - 4);
+
+ SMC_WAIT_MMU_BUSY();
+ SMC_SET_MMU_CMD(MC_RELEASE);
- PRINT_PKT(data, packet_len - 2);
+ PRINT_PKT(data, packet_len - 4);
dev->last_rx = jiffies;
skb->dev = dev;
lp->stats.rx_packets++;
lp->stats.rx_bytes += data_len;
}
-
-done:
- SMC_WAIT_MMU_BUSY();
- SMC_SET_MMU_CMD(MC_RELEASE);
}
+#ifdef CONFIG_SMP
+/*
+ * On SMP we have the following problem:
+ *
+ * A = smc_hardware_send_pkt()
+ * B = smc_hard_start_xmit()
+ * C = smc_interrupt()
+ *
+ * A and B can never be executed simultaneously. However, at least on UP,
+ * it is possible (and even desirable) for C to interrupt execution of
+ * A or B in order to have better RX reliability and avoid overruns.
+ * C, just like A and B, must have exclusive access to the chip and
+ * each of them must lock against any other concurrent access.
+ * Unfortunately this is not possible to have C suspend execution of A or
+ * B taking place on another CPU. On UP this is no an issue since A and B
+ * are run from softirq context and C from hard IRQ context, and there is
+ * no other CPU where concurrent access can happen.
+ * If ever there is a way to force at least B and C to always be executed
+ * on the same CPU then we could use read/write locks to protect against
+ * any other concurrent access and C would always interrupt B. But life
+ * isn't that easy in a SMP world...
+ */
+#define smc_special_trylock(lock) \
+({ \
+ int __ret; \
+ local_irq_disable(); \
+ __ret = spin_trylock(lock); \
+ if (!__ret) \
+ local_irq_enable(); \
+ __ret; \
+})
+#define smc_special_lock(lock) spin_lock_irq(lock)
+#define smc_special_unlock(lock) spin_unlock_irq(lock)
+#else
+#define smc_special_trylock(lock) (1)
+#define smc_special_lock(lock) do { } while (0)
+#define smc_special_unlock(lock) do { } while (0)
+#endif
+
/*
* This is called to actually send a packet to the chip.
- * Returns non-zero when successful.
*/
-static void smc_hardware_send_packet(struct net_device *dev)
+static void smc_hardware_send_pkt(unsigned long data)
{
+ struct net_device *dev = (struct net_device *)data;
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- struct sk_buff *skb = lp->saved_skb;
+ void __iomem *ioaddr = lp->base;
+ struct sk_buff *skb;
unsigned int packet_no, len;
unsigned char *buf;
DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
+ if (!smc_special_trylock(&lp->lock)) {
+ netif_stop_queue(dev);
+ tasklet_schedule(&lp->tx_task);
+ return;
+ }
+
+ skb = lp->pending_tx_skb;
+ if (unlikely(!skb)) {
+ smc_special_unlock(&lp->lock);
+ return;
+ }
+ lp->pending_tx_skb = NULL;
+
packet_no = SMC_GET_AR();
if (unlikely(packet_no & AR_FAILED)) {
printk("%s: Memory allocation failed.\n", dev->name);
- lp->saved_skb = NULL;
lp->stats.tx_errors++;
lp->stats.tx_fifo_errors++;
- dev_kfree_skb_any(skb);
- return;
+ smc_special_unlock(&lp->lock);
+ goto done;
}
/* point to the beginning of the packet */
/* Send final ctl word with the last byte if there is one */
SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG);
- /* and let the chipset deal with it */
+ /*
+ * If THROTTLE_TX_PKTS is set, we stop the queue here. This will
+ * have the effect of having at most one packet queued for TX
+ * in the chip's memory at all time.
+ *
+ * If THROTTLE_TX_PKTS is not set then the queue is stopped only
+ * when memory allocation (MC_ALLOC) does not succeed right away.
+ */
+ if (THROTTLE_TX_PKTS)
+ netif_stop_queue(dev);
+
+ /* queue the packet for TX */
SMC_SET_MMU_CMD(MC_ENQUEUE);
- SMC_ACK_INT(IM_TX_EMPTY_INT);
+ smc_special_unlock(&lp->lock);
dev->trans_start = jiffies;
- dev_kfree_skb_any(skb);
- lp->saved_skb = NULL;
lp->stats.tx_packets++;
lp->stats.tx_bytes += len;
+
+ SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT);
+
+done: if (!THROTTLE_TX_PKTS)
+ netif_wake_queue(dev);
+
+ dev_kfree_skb(skb);
}
/*
static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- unsigned int numPages, poll_count, status, saved_bank;
- unsigned long flags;
+ void __iomem *ioaddr = lp->base;
+ unsigned int numPages, poll_count, status;
DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
- spin_lock_irqsave(&lp->lock, flags);
-
- BUG_ON(lp->saved_skb != NULL);
- lp->saved_skb = skb;
+ BUG_ON(lp->pending_tx_skb != NULL);
/*
* The MMU wants the number of pages to be the number of 256 bytes
numPages = ((skb->len & ~1) + (6 - 1)) >> 8;
if (unlikely(numPages > 7)) {
printk("%s: Far too big packet error.\n", dev->name);
- lp->saved_skb = NULL;
lp->stats.tx_errors++;
lp->stats.tx_dropped++;
dev_kfree_skb(skb);
- spin_unlock_irqrestore(&lp->lock, flags);
return 0;
}
+ smc_special_lock(&lp->lock);
+
/* now, try to allocate the memory */
- saved_bank = SMC_CURRENT_BANK();
- SMC_SELECT_BANK(2);
SMC_SET_MMU_CMD(MC_ALLOC | numPages);
/*
}
} while (--poll_count);
+ smc_special_unlock(&lp->lock);
+
+ lp->pending_tx_skb = skb;
if (!poll_count) {
/* oh well, wait until the chip finds memory later */
netif_stop_queue(dev);
/*
* Allocation succeeded: push packet to the chip's own memory
* immediately.
- *
- * If THROTTLE_TX_PKTS is selected that means we don't want
- * more than a single TX packet taking up space in the chip's
- * internal memory at all time, in which case we stop the
- * queue right here until we're notified of TX completion.
- *
- * Otherwise we're quite happy to feed more TX packets right
- * away for better TX throughput, in which case the queue is
- * left active.
*/
-#if THROTTLE_TX_PKTS
- netif_stop_queue(dev);
-#endif
- smc_hardware_send_packet(dev);
- SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT);
+ smc_hardware_send_pkt((unsigned long)dev);
}
- SMC_SELECT_BANK(saved_bank);
- spin_unlock_irqrestore(&lp->lock, flags);
return 0;
}
*/
static void smc_tx(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
unsigned int saved_packet, packet_no, tx_status, pkt_len;
DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
dev->name, tx_status, packet_no);
- if (!(tx_status & TS_SUCCESS))
+ if (!(tx_status & ES_TX_SUC))
lp->stats.tx_errors++;
- if (tx_status & TS_LOSTCAR)
+
+ if (tx_status & ES_LOSTCARR)
lp->stats.tx_carrier_errors++;
- if (tx_status & TS_LATCOL) {
- PRINTK("%s: late collision occurred on last xmit\n", dev->name);
+ if (tx_status & (ES_LATCOL | ES_16COL)) {
+ PRINTK("%s: %s occurred on last xmit\n", dev->name,
+ (tx_status & ES_LATCOL) ?
+ "late collision" : "too many collisions");
lp->stats.tx_window_errors++;
if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) {
- printk(KERN_INFO "%s: unexpectedly large numbers of "
- "late collisions. Please check duplex "
+ printk(KERN_INFO "%s: unexpectedly large number of "
+ "bad collisions. Please check duplex "
"setting.\n", dev->name);
}
}
static void smc_mii_out(struct net_device *dev, unsigned int val, int bits)
{
- unsigned long ioaddr = dev->base_addr;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
unsigned int mii_reg, mask;
mii_reg = SMC_GET_MII() & ~(MII_MCLK | MII_MDOE | MII_MDO);
static unsigned int smc_mii_in(struct net_device *dev, int bits)
{
- unsigned long ioaddr = dev->base_addr;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
unsigned int mii_reg, mask, val;
mii_reg = SMC_GET_MII() & ~(MII_MCLK | MII_MDOE | MII_MDO);
*/
static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
{
- unsigned long ioaddr = dev->base_addr;
- unsigned int phydata, old_bank;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int phydata;
- /* Save the current bank, and select bank 3 */
- old_bank = SMC_CURRENT_BANK();
SMC_SELECT_BANK(3);
/* Idle - 32 ones */
/* Return to idle state */
SMC_SET_MII(SMC_GET_MII() & ~(MII_MCLK|MII_MDOE|MII_MDO));
- /* And select original bank */
- SMC_SELECT_BANK(old_bank);
-
DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
__FUNCTION__, phyaddr, phyreg, phydata);
+ SMC_SELECT_BANK(2);
return phydata;
}
static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
int phydata)
{
- unsigned long ioaddr = dev->base_addr;
- unsigned int old_bank;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
- /* Save the current bank, and select bank 3 */
- old_bank = SMC_CURRENT_BANK();
SMC_SELECT_BANK(3);
/* Idle - 32 ones */
/* Return to idle state */
SMC_SET_MII(SMC_GET_MII() & ~(MII_MCLK|MII_MDOE|MII_MDO));
- /* And select original bank */
- SMC_SELECT_BANK(old_bank);
-
DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
__FUNCTION__, phyaddr, phyreg, phydata);
+
+ SMC_SELECT_BANK(2);
}
/*
* Finds and reports the PHY address
*/
-static void smc_detect_phy(struct net_device *dev)
+static void smc_phy_detect(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
int phyaddr;
static int smc_phy_fixed(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = lp->base;
int phyaddr = lp->mii.phy_id;
int bmcr, cfg1;
smc_phy_write(dev, phyaddr, MII_BMCR, bmcr);
/* Re-Configure the Receive/Phy Control register */
+ SMC_SELECT_BANK(0);
SMC_SET_RPC(lp->rpc_cur_mode);
+ SMC_SELECT_BANK(2);
return 1;
}
/*
* smc_phy_powerdown - powerdown phy
* @dev: net device
- * @phy: phy address
*
* Power down the specified PHY
*/
-static void smc_phy_powerdown(struct net_device *dev, int phy)
+static void smc_phy_powerdown(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
unsigned int bmcr;
+ int phy = lp->mii.phy_id;
+
+ if (lp->phy_type == 0)
+ return;
+
+ /* We need to ensure that no calls to smc_phy_configure are
+ pending.
+
+ flush_scheduled_work() cannot be called because we are
+ running with the netlink semaphore held (from
+ devinet_ioctl()) and the pending work queue contains
+ linkwatch_event() (scheduled by netif_carrier_off()
+ above). linkwatch_event() also wants the netlink semaphore.
+ */
+ while(lp->work_pending)
+ yield();
- spin_lock_irq(&lp->lock);
bmcr = smc_phy_read(dev, phy, MII_BMCR);
smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
- spin_unlock_irq(&lp->lock);
}
/*
static void smc_phy_check_media(struct net_device *dev, int init)
{
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = lp->base;
if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
- unsigned int old_bank;
-
/* duplex state has changed */
if (lp->mii.full_duplex) {
lp->tcr_cur_mode |= TCR_SWFDUP;
lp->tcr_cur_mode &= ~TCR_SWFDUP;
}
- old_bank = SMC_CURRENT_BANK();
SMC_SELECT_BANK(0);
SMC_SET_TCR(lp->tcr_cur_mode);
- SMC_SELECT_BANK(old_bank);
}
}
* of autonegotiation.) If the RPC ANEG bit is cleared, the selection
* is controlled by the RPC SPEED and RPC DPLX bits.
*/
-static void smc_phy_configure(struct net_device *dev)
+static void smc_phy_configure(void *data)
{
+ struct net_device *dev = data;
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = lp->base;
int phyaddr = lp->mii.phy_id;
int my_phy_caps; /* My PHY capabilities */
int my_ad_caps; /* My Advertised capabilities */
smc_phy_check_media(dev, 1);
smc_phy_configure_exit:
+ SMC_SELECT_BANK(2);
spin_unlock_irq(&lp->lock);
+ lp->work_pending = 0;
}
/*
static void smc_10bt_check_media(struct net_device *dev, int init)
{
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
- unsigned int old_carrier, new_carrier, old_bank;
+ void __iomem *ioaddr = lp->base;
+ unsigned int old_carrier, new_carrier;
- old_bank = SMC_CURRENT_BANK();
- SMC_SELECT_BANK(0);
old_carrier = netif_carrier_ok(dev) ? 1 : 0;
- new_carrier = SMC_inw(ioaddr, EPH_STATUS_REG) & ES_LINK_OK ? 1 : 0;
+
+ SMC_SELECT_BANK(0);
+ new_carrier = (SMC_GET_EPH_STATUS() & ES_LINK_OK) ? 1 : 0;
+ SMC_SELECT_BANK(2);
if (init || (old_carrier != new_carrier)) {
if (!new_carrier) {
printk(KERN_INFO "%s: link %s\n", dev->name,
new_carrier ? "up" : "down");
}
- SMC_SELECT_BANK(old_bank);
}
static void smc_eph_interrupt(struct net_device *dev)
{
- unsigned long ioaddr = dev->base_addr;
- unsigned int old_bank, ctl;
+ struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
+ unsigned int ctl;
smc_10bt_check_media(dev, 0);
- old_bank = SMC_CURRENT_BANK();
SMC_SELECT_BANK(1);
-
ctl = SMC_GET_CTL();
SMC_SET_CTL(ctl & ~CTL_LE_ENABLE);
SMC_SET_CTL(ctl);
-
- SMC_SELECT_BANK(old_bank);
+ SMC_SELECT_BANK(2);
}
/*
static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
- unsigned long ioaddr = dev->base_addr;
struct smc_local *lp = netdev_priv(dev);
+ void __iomem *ioaddr = lp->base;
int status, mask, timeout, card_stats;
- int saved_bank, saved_pointer;
+ int saved_pointer;
DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
spin_lock(&lp->lock);
- saved_bank = SMC_CURRENT_BANK();
- SMC_SELECT_BANK(2);
+ /* A preamble may be used when there is a potential race
+ * between the interruptible transmit functions and this
+ * ISR. */
+ SMC_INTERRUPT_PREAMBLE;
+
saved_pointer = SMC_GET_PTR();
mask = SMC_GET_INT_MASK();
SMC_SET_INT_MASK(0);
/* set a timeout value, so I don't stay here forever */
- timeout = 8;
+ timeout = MAX_IRQ_LOOPS;
do {
status = SMC_GET_INT();
- DBG(2, "%s: IRQ 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
+ DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
dev->name, status, mask,
({ int meminfo; SMC_SELECT_BANK(0);
meminfo = SMC_GET_MIR();
if (!status)
break;
- if (status & IM_RCV_INT) {
- DBG(3, "%s: RX irq\n", dev->name);
- smc_rcv(dev);
- } else if (status & IM_TX_INT) {
+ if (status & IM_TX_INT) {
+ /* do this before RX as it will free memory quickly */
DBG(3, "%s: TX int\n", dev->name);
smc_tx(dev);
SMC_ACK_INT(IM_TX_INT);
-#if THROTTLE_TX_PKTS
- netif_wake_queue(dev);
-#endif
+ if (THROTTLE_TX_PKTS)
+ netif_wake_queue(dev);
+ } else if (status & IM_RCV_INT) {
+ DBG(3, "%s: RX irq\n", dev->name);
+ smc_rcv(dev);
} else if (status & IM_ALLOC_INT) {
DBG(3, "%s: Allocation irq\n", dev->name);
- smc_hardware_send_packet(dev);
- mask |= (IM_TX_INT | IM_TX_EMPTY_INT);
+ tasklet_hi_schedule(&lp->tx_task);
mask &= ~IM_ALLOC_INT;
-#if ! THROTTLE_TX_PKTS
- netif_wake_queue(dev);
-#endif
} else if (status & IM_TX_EMPTY_INT) {
DBG(3, "%s: TX empty\n", dev->name);
mask &= ~IM_TX_EMPTY_INT;
/* multiple collisions */
lp->stats.collisions += card_stats & 0xF;
} else if (status & IM_RX_OVRN_INT) {
- DBG(1, "%s: RX overrun\n", dev->name);
+ DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
+ ({ int eph_st; SMC_SELECT_BANK(0);
+ eph_st = SMC_GET_EPH_STATUS();
+ SMC_SELECT_BANK(2); eph_st; }) );
SMC_ACK_INT(IM_RX_OVRN_INT);
lp->stats.rx_errors++;
lp->stats.rx_fifo_errors++;
SMC_ACK_INT(IM_ERCV_INT);
PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name);
}
-
} while (--timeout);
/* restore register states */
- SMC_SET_INT_MASK(mask);
SMC_SET_PTR(saved_pointer);
- SMC_SELECT_BANK(saved_bank);
+ SMC_SET_INT_MASK(mask);
+ spin_unlock(&lp->lock);
- DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
+ if (timeout == MAX_IRQ_LOOPS)
+ PRINTK("%s: spurious interrupt (mask = 0x%02x)\n",
+ dev->name, mask);
+ DBG(3, "%s: Interrupt done (%d loops)\n",
+ dev->name, MAX_IRQ_LOOPS - timeout);
- spin_unlock(&lp->lock);
/*
* We return IRQ_HANDLED unconditionally here even if there was
* nothing to do. There is a possibility that a packet might
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void smc_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ smc_interrupt(dev->irq, dev, NULL);
+ enable_irq(dev->irq);
+}
+#endif
+
/* Our watchdog timed out. Called by the networking layer */
static void smc_timeout(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- unsigned long flags;
+ void __iomem *ioaddr = lp->base;
+ int status, mask, eph_st, meminfo, fifo;
- spin_lock_irqsave(&lp->lock, flags);
DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
+ spin_lock_irq(&lp->lock);
+ status = SMC_GET_INT();
+ mask = SMC_GET_INT_MASK();
+ fifo = SMC_GET_FIFO();
+ SMC_SELECT_BANK(0);
+ eph_st = SMC_GET_EPH_STATUS();
+ meminfo = SMC_GET_MIR();
+ SMC_SELECT_BANK(2);
+ spin_unlock_irq(&lp->lock);
+ PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
+ "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
+ dev->name, status, mask, meminfo, fifo, eph_st );
+
smc_reset(dev);
smc_enable(dev);
-#if 0
/*
* Reconfiguring the PHY doesn't seem like a bad idea here, but
- * it introduced a problem. Now that this is a timeout routine,
- * we are getting called from within an interrupt context.
- * smc_phy_configure() calls msleep() which calls
- * schedule_timeout() which calls schedule(). When schedule()
- * is called from an interrupt context, it prints out
- * "Scheduling in interrupt" and then calls BUG(). This is
- * obviously not desirable. This was worked around by removing
- * the call to smc_phy_configure() here because it didn't seem
- * absolutely necessary. Ultimately, if msleep() is
- * supposed to be usable from an interrupt context (which it
- * looks like it thinks it should handle), it should be fixed.
+ * smc_phy_configure() calls msleep() which calls schedule_timeout()
+ * which calls schedule(). Hence we use a work queue.
*/
- if (lp->phy_type != 0)
- smc_phy_configure(dev);
-#endif
-
- /* clear anything saved */
- if (lp->saved_skb != NULL) {
- dev_kfree_skb (lp->saved_skb);
- lp->saved_skb = NULL;
- lp->stats.tx_errors++;
- lp->stats.tx_aborted_errors++;
+ if (lp->phy_type != 0) {
+ if (schedule_work(&lp->phy_configure)) {
+ lp->work_pending = 1;
+ }
}
+
/* We can accept TX packets again */
dev->trans_start = jiffies;
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
netif_wake_queue(dev);
}
-/*
- * This sets the internal hardware table to filter out unwanted multicast
- * packets before they take up memory.
- *
- * The SMC chip uses a hash table where the high 6 bits of the CRC of
- * address are the offset into the table. If that bit is 1, then the
- * multicast packet is accepted. Otherwise, it's dropped silently.
- *
- * To use the 6 bits as an offset into the table, the high 3 bits are the
- * number of the 8 bit register, while the low 3 bits are the bit within
- * that register.
- *
- * This routine is based very heavily on the one provided by Peter Cammaert.
- */
-static void
-smc_setmulticast(unsigned long ioaddr, int count, struct dev_mc_list *addrs)
-{
- int i;
- unsigned char multicast_table[8];
- struct dev_mc_list *cur_addr;
-
- /* table for flipping the order of 3 bits */
- static unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
-
- /* start with a table of all zeros: reject all */
- memset(multicast_table, 0, sizeof(multicast_table));
-
- cur_addr = addrs;
- for (i = 0; i < count; i++, cur_addr = cur_addr->next) {
- int position;
-
- /* do we have a pointer here? */
- if (!cur_addr)
- break;
- /* make sure this is a multicast address - shouldn't this
- be a given if we have it here ? */
- if (!(*cur_addr->dmi_addr & 1))
- continue;
-
- /* only use the low order bits */
- position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
-
- /* do some messy swapping to put the bit in the right spot */
- multicast_table[invert3[position&7]] |=
- (1<<invert3[(position>>3)&7]);
-
- }
- /* now, the table can be loaded into the chipset */
- SMC_SELECT_BANK(3);
- SMC_SET_MCAST(multicast_table);
-}
-
/*
* This routine will, depending on the values passed to it,
* either make it accept multicast packets, go into
static void smc_set_multicast_list(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
+ void __iomem *ioaddr = lp->base;
+ unsigned char multicast_table[8];
+ int update_multicast = 0;
DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
- SMC_SELECT_BANK(0);
if (dev->flags & IFF_PROMISC) {
DBG(2, "%s: RCR_PRMS\n", dev->name);
lp->rcr_cur_mode |= RCR_PRMS;
- SMC_SET_RCR(lp->rcr_cur_mode);
}
/* BUG? I never disable promiscuous mode if multicasting was turned on.
* checked before the table is
*/
else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
- lp->rcr_cur_mode |= RCR_ALMUL;
- SMC_SET_RCR(lp->rcr_cur_mode);
DBG(2, "%s: RCR_ALMUL\n", dev->name);
+ lp->rcr_cur_mode |= RCR_ALMUL;
}
/*
- * We just get all multicast packets even if we only want them
- * from one source. This will be changed at some future point.
+ * This sets the internal hardware table to filter out unwanted
+ * multicast packets before they take up memory.
+ *
+ * The SMC chip uses a hash table where the high 6 bits of the CRC of
+ * address are the offset into the table. If that bit is 1, then the
+ * multicast packet is accepted. Otherwise, it's dropped silently.
+ *
+ * To use the 6 bits as an offset into the table, the high 3 bits are
+ * the number of the 8 bit register, while the low 3 bits are the bit
+ * within that register.
*/
else if (dev->mc_count) {
- /* support hardware multicasting */
+ int i;
+ struct dev_mc_list *cur_addr;
+
+ /* table for flipping the order of 3 bits */
+ static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
+
+ /* start with a table of all zeros: reject all */
+ memset(multicast_table, 0, sizeof(multicast_table));
+
+ cur_addr = dev->mc_list;
+ for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ int position;
+
+ /* do we have a pointer here? */
+ if (!cur_addr)
+ break;
+ /* make sure this is a multicast address -
+ shouldn't this be a given if we have it here ? */
+ if (!(*cur_addr->dmi_addr & 1))
+ continue;
+
+ /* only use the low order bits */
+ position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+
+ /* do some messy swapping to put the bit in the right spot */
+ multicast_table[invert3[position&7]] |=
+ (1<<invert3[(position>>3)&7]);
+ }
/* be sure I get rid of flags I might have set */
lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
- SMC_SET_RCR(lp->rcr_cur_mode);
- /*
- * NOTE: this has to set the bank, so make sure it is the
- * last thing called. The bank is set to zero at the top
- */
- smc_setmulticast(ioaddr, dev->mc_count, dev->mc_list);
+
+ /* now, the table can be loaded into the chipset */
+ update_multicast = 1;
} else {
DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name);
lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL);
- SMC_SET_RCR(lp->rcr_cur_mode);
/*
* since I'm disabling all multicast entirely, I need to
* clear the multicast list
*/
+ memset(multicast_table, 0, sizeof(multicast_table));
+ update_multicast = 1;
+ }
+
+ spin_lock_irq(&lp->lock);
+ SMC_SELECT_BANK(0);
+ SMC_SET_RCR(lp->rcr_cur_mode);
+ if (update_multicast) {
SMC_SELECT_BANK(3);
- SMC_CLEAR_MCAST();
+ SMC_SET_MCAST(multicast_table);
}
+ SMC_SELECT_BANK(2);
+ spin_unlock_irq(&lp->lock);
}
smc_open(struct net_device *dev)
{
struct smc_local *lp = netdev_priv(dev);
- unsigned long ioaddr = dev->base_addr;
DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
* address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
*/
if (!is_valid_ether_addr(dev->dev_addr)) {
- DBG(2, "smc_open: no valid ethernet hw addr\n");
+ PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__);
return -EINVAL;
}
- /* clear out all the junk that was put here before... */
- lp->saved_skb = NULL;
-
/* Setup the default Register Modes */
lp->tcr_cur_mode = TCR_DEFAULT;
lp->rcr_cur_mode = RCR_DEFAULT;
smc_reset(dev);
smc_enable(dev);
- SMC_SELECT_BANK(1);
- SMC_SET_MAC_ADDR(dev->dev_addr);
-
- /* Configure the PHY */
+ /* Configure the PHY, initialize the link state */
if (lp->phy_type != 0)
smc_phy_configure(dev);
else {
spin_unlock_irq(&lp->lock);
}
- /*
- * make sure to initialize the link state with netif_carrier_off()
- * somewhere, too --jgarzik
- *
- * smc_phy_configure() and smc_10bt_check_media() does that. --rmk
- */
netif_start_queue(dev);
return 0;
}
netif_carrier_off(dev);
/* clear everything */
- smc_shutdown(dev->base_addr);
-
- if (lp->phy_type != 0)
- smc_phy_powerdown(dev, lp->mii.phy_id);
-
+ smc_shutdown(dev);
+ tasklet_kill(&lp->tx_task);
+ smc_phy_powerdown(dev);
return 0;
}
* I just deleted auto_irq.c, since it was never built...
* --jgarzik
*/
-static int __init smc_findirq(unsigned long ioaddr)
+static int __init smc_findirq(void __iomem *ioaddr)
{
int timeout = 20;
unsigned long cookie;
* o actually GRAB the irq.
* o GRAB the region
*/
-static int __init smc_probe(struct net_device *dev, unsigned long ioaddr)
+static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
{
struct smc_local *lp = netdev_priv(dev);
static int version_printed = 0;
if ((val & 0xFF) == 0x33) {
printk(KERN_WARNING
"%s: Detected possible byte-swapped interface"
- " at IOADDR 0x%lx\n", CARDNAME, ioaddr);
+ " at IOADDR %p\n", CARDNAME, ioaddr);
}
retval = -ENODEV;
goto err_out;
SMC_SELECT_BANK(1);
val = SMC_GET_BASE();
val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
- if ((ioaddr & ((PAGE_SIZE-1)<<SMC_IO_SHIFT)) != val) {
- printk("%s: IOADDR %lx doesn't match configuration (%x).\n",
+ if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
+ printk("%s: IOADDR %p doesn't match configuration (%x).\n",
CARDNAME, ioaddr, val);
}
version_string = chip_ids[ (revision_register >> 4) & 0xF];
if (!version_string || (revision_register & 0xff00) != 0x3300) {
/* I don't recognize this chip, so... */
- printk("%s: IO 0x%lx: Unrecognized revision register 0x%04x"
+ printk("%s: IO %p: Unrecognized revision register 0x%04x"
", Contact author.\n", CARDNAME,
ioaddr, revision_register);
printk("%s", version);
/* fill in some of the fields */
- dev->base_addr = ioaddr;
+ dev->base_addr = (unsigned long)ioaddr;
+ lp->base = ioaddr;
lp->version = revision_register & 0xff;
+ spin_lock_init(&lp->lock);
/* Get the MAC address */
SMC_SELECT_BANK(1);
dev->get_stats = smc_query_statistics;
dev->set_multicast_list = smc_set_multicast_list;
dev->ethtool_ops = &smc_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = smc_poll_controller;
+#endif
- spin_lock_init(&lp->lock);
+ tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
+ INIT_WORK(&lp->phy_configure, smc_phy_configure, dev);
lp->mii.phy_id_mask = 0x1f;
lp->mii.reg_num_mask = 0x1f;
lp->mii.force_media = 0;
* Locate the phy, if any.
*/
if (lp->version >= (CHIP_91100 << 4))
- smc_detect_phy(dev);
+ smc_phy_detect(dev);
+
+ /* then shut everything down to save power */
+ smc_shutdown(dev);
+ smc_phy_powerdown(dev);
/* Set default parameters */
lp->msg_enable = NETIF_MSG_LINK;
}
/* Grab the IRQ */
- retval = request_irq(dev->irq, &smc_interrupt, 0, dev->name, dev);
+ retval = request_irq(dev->irq, &smc_interrupt, SMC_IRQ_FLAGS, dev->name, dev);
if (retval)
goto err_out;
-#if !defined(__m32r__)
- set_irq_type(dev->irq, IRQT_RISING);
-#endif
#ifdef SMC_USE_PXA_DMA
{
int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW,
retval = register_netdev(dev);
if (retval == 0) {
/* now, print out the card info, in a short format.. */
- printk("%s: %s (rev %d) at %#lx IRQ %d",
+ printk("%s: %s (rev %d) at %p IRQ %d",
dev->name, version_string, revision_register & 0x0f,
- dev->base_addr, dev->irq);
+ lp->base, dev->irq);
if (dev->dma != (unsigned char)-1)
printk(" DMA %d", dev->dma);
return retval;
}
-static int smc_enable_device(unsigned long attrib_phys)
+static int smc_enable_device(struct platform_device *pdev)
{
unsigned long flags;
unsigned char ecor, ecsr;
- void *addr;
+ void __iomem *addr;
+ struct resource * res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+ if (!res)
+ return 0;
/*
* Map the attribute space. This is overkill, but clean.
*/
- addr = ioremap(attrib_phys, ATTRIB_SIZE);
+ addr = ioremap(res->start, ATTRIB_SIZE);
if (!addr)
return -ENOMEM;
* Set the appropriate byte/word mode.
*/
ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
-#ifndef SMC_CAN_USE_16BIT
- ecsr |= ECSR_IOIS8;
-#endif
+ if (!SMC_CAN_USE_16BIT)
+ ecsr |= ECSR_IOIS8;
writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
local_irq_restore(flags);
return 0;
}
+static int smc_request_attrib(struct platform_device *pdev)
+{
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+
+ if (!res)
+ return 0;
+
+ if (!request_mem_region(res->start, ATTRIB_SIZE, CARDNAME))
+ return -EBUSY;
+
+ return 0;
+}
+
+static void smc_release_attrib(struct platform_device *pdev)
+{
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
+
+ if (res)
+ release_mem_region(res->start, ATTRIB_SIZE);
+}
+
+static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev)
+{
+ if (SMC_CAN_USE_DATACS) {
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+ struct smc_local *lp = netdev_priv(ndev);
+
+ if (!res)
+ return;
+
+ if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) {
+ printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME);
+ return;
+ }
+
+ lp->datacs = ioremap(res->start, SMC_DATA_EXTENT);
+ }
+}
+
+static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev)
+{
+ if (SMC_CAN_USE_DATACS) {
+ struct smc_local *lp = netdev_priv(ndev);
+ struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32");
+
+ if (lp->datacs)
+ iounmap(lp->datacs);
+
+ lp->datacs = NULL;
+
+ if (res)
+ release_mem_region(res->start, SMC_DATA_EXTENT);
+ }
+}
+
/*
* smc_init(void)
* Input parameters:
* 0 --> there is a device
* anything else, error
*/
-static int smc_drv_probe(struct device *dev)
+static int smc_drv_probe(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev;
- struct resource *res, *ext = NULL;
- unsigned int *addr;
+ struct resource *res;
+ unsigned int __iomem *addr;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
goto out;
}
- /*
- * Request the regions.
- */
- if (!request_mem_region(res->start, SMC_IO_EXTENT, "smc91x")) {
+
+ if (!request_mem_region(res->start, SMC_IO_EXTENT, CARDNAME)) {
ret = -EBUSY;
goto out;
}
if (!ndev) {
printk("%s: could not allocate device.\n", CARDNAME);
ret = -ENOMEM;
- goto release_1;
+ goto out_release_io;
}
SET_MODULE_OWNER(ndev);
- SET_NETDEV_DEV(ndev, dev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->dma = (unsigned char)-1;
ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
- ext = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (ext) {
- if (!request_mem_region(ext->start, ATTRIB_SIZE, ndev->name)) {
- ret = -EBUSY;
- goto release_1;
- }
-
+ ret = smc_request_attrib(pdev);
+ if (ret)
+ goto out_free_netdev;
#if defined(CONFIG_SA1100_ASSABET)
- NCR_0 |= NCR_ENET_OSC_EN;
+ NCR_0 |= NCR_ENET_OSC_EN;
#endif
-
- ret = smc_enable_device(ext->start);
- if (ret)
- goto release_both;
- }
+ ret = smc_enable_device(pdev);
+ if (ret)
+ goto out_release_attrib;
addr = ioremap(res->start, SMC_IO_EXTENT);
if (!addr) {
ret = -ENOMEM;
- goto release_both;
+ goto out_release_attrib;
}
- dev_set_drvdata(dev, ndev);
- ret = smc_probe(ndev, (unsigned long)addr);
- if (ret != 0) {
- dev_set_drvdata(dev, NULL);
- iounmap(addr);
- release_both:
- if (ext)
- release_mem_region(ext->start, ATTRIB_SIZE);
- free_netdev(ndev);
- release_1:
- release_mem_region(res->start, SMC_IO_EXTENT);
- out:
- printk("%s: not found (%d).\n", CARDNAME, ret);
- }
+ platform_set_drvdata(pdev, ndev);
+ ret = smc_probe(ndev, addr);
+ if (ret != 0)
+ goto out_iounmap;
#ifdef SMC_USE_PXA_DMA
else {
struct smc_local *lp = netdev_priv(ndev);
}
#endif
+ smc_request_datacs(pdev, ndev);
+
+ return 0;
+
+ out_iounmap:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(addr);
+ out_release_attrib:
+ smc_release_attrib(pdev);
+ out_free_netdev:
+ free_netdev(ndev);
+ out_release_io:
+ release_mem_region(res->start, SMC_IO_EXTENT);
+ out:
+ printk("%s: not found (%d).\n", CARDNAME, ret);
+
return ret;
}
-static int smc_drv_remove(struct device *dev)
+static int smc_drv_remove(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smc_local *lp = netdev_priv(ndev);
struct resource *res;
- dev_set_drvdata(dev, NULL);
+ platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
if (ndev->dma != (unsigned char)-1)
pxa_free_dma(ndev->dma);
#endif
- iounmap((void *)ndev->base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res)
- release_mem_region(res->start, ATTRIB_SIZE);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iounmap(lp->base);
+
+ smc_release_datacs(pdev,ndev);
+ smc_release_attrib(pdev);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
+ if (!res)
+ platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, SMC_IO_EXTENT);
free_netdev(ndev);
return 0;
}
-static int smc_drv_suspend(struct device *dev, u32 state, u32 level)
+static int smc_drv_suspend(struct platform_device *dev, pm_message_t state)
{
- struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(dev);
- if (ndev && level == SUSPEND_DISABLE) {
+ if (ndev) {
if (netif_running(ndev)) {
netif_device_detach(ndev);
- smc_shutdown(ndev->base_addr);
+ smc_shutdown(ndev);
+ smc_phy_powerdown(ndev);
}
}
return 0;
}
-static int smc_drv_resume(struct device *dev, u32 level)
+static int smc_drv_resume(struct platform_device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(dev);
- if (ndev && level == RESUME_ENABLE) {
+ if (ndev) {
struct smc_local *lp = netdev_priv(ndev);
- unsigned long ioaddr = ndev->base_addr;
-
- if (pdev->num_resources == 3)
- smc_enable_device(pdev->resource[2].start);
+ smc_enable_device(dev);
if (netif_running(ndev)) {
smc_reset(ndev);
smc_enable(ndev);
- SMC_SELECT_BANK(1);
- SMC_SET_MAC_ADDR(ndev->dev_addr);
if (lp->phy_type != 0)
smc_phy_configure(ndev);
netif_device_attach(ndev);
return 0;
}
-static struct device_driver smc_driver = {
- .name = CARDNAME,
- .bus = &platform_bus_type,
+static struct platform_driver smc_driver = {
.probe = smc_drv_probe,
.remove = smc_drv_remove,
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
+ .driver = {
+ .name = CARDNAME,
+ },
};
static int __init smc_init(void)
#endif
#endif
- return driver_register(&smc_driver);
+ return platform_driver_register(&smc_driver);
}
static void __exit smc_cleanup(void)
{
- driver_unregister(&smc_driver);
+ platform_driver_unregister(&smc_driver);
}
module_init(smc_init);