*
* Support for Apple GMAC and assorted PHYs by
* Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ *
+ * NAPI and NETPOLL support
+ * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
*
* TODO:
* - Get rid of all those nasty mdelay's and replace them
__phy_write(gp, gp->mii_phy_addr, reg, val);
}
+static inline void gem_enable_ints(struct gem *gp)
+{
+ /* Enable all interrupts but TXDONE */
+ writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+}
+
+static inline void gem_disable_ints(struct gem *gp)
+{
+ /* Disable all interrupts, including TXDONE */
+ writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
+}
+
static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
{
if (netif_msg_intr(gp))
}
}
-static void gem_rx(struct gem *gp)
+static int gem_rx(struct gem *gp, int work_to_do)
{
- int entry, drops;
+ int entry, drops, work_done = 0;
u32 done;
- if (netif_msg_intr(gp))
+ if (netif_msg_rx_status(gp))
printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
if ((status & RXDCTRL_OWN) != 0)
break;
+ if (work_done >= RX_RING_SIZE || work_done >= work_to_do)
+ break;
+
/* When writing back RX descriptor, GEM writes status
* then buffer address, possibly in seperate transactions.
* If we don't wait for the chip to write both, we could
break;
}
+ /* We can now account for the work we're about to do */
+ work_done++;
+
skb = gp->rx_skbs[entry];
len = (status & RXDCTRL_BUFSZ) >> 16;
skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
skb->ip_summed = CHECKSUM_HW;
skb->protocol = eth_type_trans(skb, gp->dev);
- netif_rx(skb);
+
+ netif_receive_skb(skb);
gp->net_stats.rx_packets++;
gp->net_stats.rx_bytes += len;
if (drops)
printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
gp->dev->name);
+
+ return work_done;
+}
+
+static int gem_poll(struct net_device *dev, int *budget)
+{
+ struct gem *gp = dev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gp->lock, flags);
+
+ do {
+ int work_to_do, work_done;
+
+ /* Handle anomalies */
+ if (gp->status & GREG_STAT_ABNORMAL) {
+ if (gem_abnormal_irq(dev, gp, gp->status))
+ break;
+ }
+
+ /* Run TX completion thread */
+ spin_lock(&gp->tx_lock);
+ gem_tx(dev, gp, gp->status);
+ spin_unlock(&gp->tx_lock);
+
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ /* Run RX thread. We don't use any locking here,
+ * code willing to do bad things - like cleaning the
+ * rx ring - must call netif_poll_disable(), which
+ * schedule_timeout()'s if polling is already disabled.
+ */
+ work_to_do = min(*budget, dev->quota);
+
+ work_done = gem_rx(gp, work_to_do);
+
+ *budget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done >= work_to_do)
+ return 1;
+
+ spin_lock_irqsave(&gp->lock, flags);
+
+ gp->status = readl(gp->regs + GREG_STAT);
+ } while (gp->status & GREG_STAT_NAPI);
+
+ __netif_rx_complete(dev);
+ gem_enable_ints(gp);
+
+ spin_unlock_irqrestore(&gp->lock, flags);
+ return 0;
}
static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct gem *gp = dev->priv;
- u32 gem_status = readl(gp->regs + GREG_STAT);
+ unsigned long flags;
/* Swallow interrupts when shutting the chip down */
- if (gp->hw_running == 0)
- goto out;
+ if (!gp->hw_running)
+ return IRQ_HANDLED;
- spin_lock(&gp->lock);
+ spin_lock_irqsave(&gp->lock, flags);
+
+ if (netif_rx_schedule_prep(dev)) {
+ u32 gem_status = readl(gp->regs + GREG_STAT);
- if (gem_status & GREG_STAT_ABNORMAL) {
- if (gem_abnormal_irq(dev, gp, gem_status))
- goto out;
+ if (gem_status == 0) {
+ spin_unlock_irqrestore(&gp->lock, flags);
+ return IRQ_NONE;
+ }
+ gp->status = gem_status;
+ gem_disable_ints(gp);
+ __netif_rx_schedule(dev);
}
- if (gem_status & (GREG_STAT_TXALL | GREG_STAT_TXINTME))
- gem_tx(dev, gp, gem_status);
- if (gem_status & GREG_STAT_RXDONE)
- gem_rx(gp);
-
-out:
- spin_unlock(&gp->lock);
+ spin_unlock_irqrestore(&gp->lock, flags);
+
+ /* If polling was disabled at the time we received that
+ * interrupt, we may return IRQ_HANDLED here while we
+ * should return IRQ_NONE. No big deal...
+ */
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void gem_poll_controller(struct net_device *dev)
+{
+ /* gem_interrupt is safe to reentrance so no need
+ * to disable_irq here.
+ */
+ gem_interrupt(dev->irq, dev, NULL);
+}
+#endif
+
static void gem_tx_timeout(struct net_device *dev)
{
struct gem *gp = dev->priv;
readl(gp->regs + MAC_RXCFG));
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
gp->reset_task_pending = 2;
schedule_work(&gp->reset_task);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
}
struct gem *gp = dev->priv;
int entry;
u64 ctrl;
+ unsigned long flags;
ctrl = 0;
if (skb->ip_summed == CHECKSUM_HW) {
(csum_stuff_off << 21));
}
- spin_lock_irq(&gp->lock);
+ local_irq_save(flags);
+ if (!spin_trylock(&gp->tx_lock)) {
+ /* Tell upper layer to requeue */
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
/* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
- spin_unlock_irq(&gp->lock);
+ spin_unlock_irqrestore(&gp->tx_lock, flags);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
- return 1;
+ return NETDEV_TX_BUSY;
}
entry = gp->tx_new;
dev->name, entry, skb->len);
mb();
writel(gp->tx_new, gp->regs + TXDMA_KICK);
- spin_unlock_irq(&gp->lock);
+ spin_unlock_irqrestore(&gp->tx_lock, flags);
dev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
/* Jumbo-grams don't seem to work :-( */
}
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
dev->mtu = new_mtu;
gp->reset_task_pending = 1;
schedule_work(&gp->reset_task);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
flush_scheduled_work();
#define STOP_TRIES 32
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_stop(struct gem *gp)
{
int limit;
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_start_dma(struct gem *gp)
{
unsigned long val;
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
// XXX dbl check what that function should do when called on PCS PHY
static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
{
/* A link-up condition has occurred, initialize and enable the
* rest of the chip.
*
- * Must be invoked under gp->lock.
+ * Must be invoked under gp->lock and gp->tx_lock.
*/
static int gem_set_link_modes(struct gem *gp)
{
return 0;
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static int gem_mdio_link_not_up(struct gem *gp)
{
switch (gp->lstate) {
{
struct gem *gp = (struct gem *) data;
- /* The link went down, we reset the ring, but keep
- * DMA stopped. Todo: Use this function for reset
- * on error as well.
- */
-
+ netif_poll_disable(gp->dev);
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
if (gp->hw_running && gp->opened) {
- /* Make sure we don't get interrupts or tx packets */
netif_stop_queue(gp->dev);
- writel(0xffffffff, gp->regs + GREG_IMASK);
-
/* Reset the chip & rings */
gem_stop(gp);
gem_init_rings(gp);
}
gp->reset_task_pending = 0;
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
+ netif_poll_enable(gp->dev);
}
static void gem_link_timer(unsigned long data)
return;
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
/* If the link of task is still pending, we just
* reschedule the link timer
restart:
mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
out_unlock:
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_clean_rings(struct gem *gp)
{
struct gem_init_block *gb = gp->init_block;
}
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_rings(struct gem *gp)
{
struct gem_init_block *gb = gp->init_block;
wmb();
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_phy(struct gem *gp)
{
u32 mifcfg;
}
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_dma(struct gem *gp)
{
u64 desc_dma = (u64) gp->gblock_dvma;
gp->regs + RXDMA_BLANK);
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static u32
gem_setup_multicast(struct gem *gp)
{
return rxcfg;
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_mac(struct gem *gp)
{
unsigned char *e = &gp->dev->dev_addr[0];
writel(0xffffffff, gp->regs + MAC_MCMASK);
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_pause_thresholds(struct gem *gp)
{
u32 cfg;
return 0;
}
-/* Must be invoked under gp->lock. */
+/* Must be invoked under gp->lock and gp->tx_lock. */
static void gem_init_hw(struct gem *gp, int restart_link)
{
/* On Apple's gmac, I initialize the PHY only after
if (!gp->wake_on_lan) {
spin_lock_irqsave(&gp->lock, flags);
+ spin_lock(&gp->tx_lock);
gem_stop(gp);
writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irqrestore(&gp->lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&gp->lock, flags);
+ spin_lock(&gp->tx_lock);
gem_stop(gp);
- spin_unlock_irqrestore(&gp->lock, flags);
+ spin_unlock(&gp->tx_lock);
+ spin_unlock_irqrestore(&gp->lock, flags);
}
}
/* Reset the chip */
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
gem_stop(gp);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
gp->hw_running = 1;
printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
#ifdef CONFIG_PPC_PMAC
if (!hw_was_up && gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
gem_apple_powerdown(gp);
gp->pm_timer.expires = jiffies + 10*HZ;
add_timer(&gp->pm_timer);
up(&gp->pm_sem);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
return -EAGAIN;
}
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
/* Allocate & setup ring buffers */
gem_init_rings(gp);
gp->opened = 1;
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
up(&gp->pm_sem);
/* Make sure we don't get distracted by suspend/resume */
down(&gp->pm_sem);
+ /* Note: we don't need to call netif_poll_disable() here because
+ * our caller (dev_close) already did it for us
+ */
+
/* Stop traffic, mark us closed */
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
gp->opened = 0;
- writel(0xffffffff, gp->regs + GREG_IMASK);
+
netif_stop_queue(dev);
/* Stop chip */
/* Bye, the pm timer will finish the job */
free_irq(gp->pdev->irq, (void *) dev);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
/* Fire the PM timer that will shut us down in about 10 seconds */
struct net_device *dev = pci_get_drvdata(pdev);
struct gem *gp = dev->priv;
+ netif_poll_disable(dev);
+
/* We hold the PM semaphore during entire driver
* sleep time
*/
/* If the driver is opened, we stop the DMA */
if (gp->opened) {
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
/* Stop traffic, mark us closed */
netif_device_detach(dev);
- writel(0xffffffff, gp->regs + GREG_IMASK);
-
/* Stop chip */
gem_stop(gp);
/* Get rid of ring buffers */
gem_clean_rings(gp);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
}
#endif /* CONFIG_PPC_PMAC */
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
gem_stop(gp);
gp->hw_running = 1;
gem_init_rings(gp);
gem_init_hw(gp, 1);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
netif_device_attach(dev);
}
up(&gp->pm_sem);
+ netif_poll_enable(dev);
+
return 0;
}
#endif /* CONFIG_PM */
struct net_device_stats *stats = &gp->net_stats;
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
if (gp->hw_running) {
stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
writel(0, gp->regs + MAC_LCOLL);
}
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
return &gp->net_stats;
return;
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
netif_stop_queue(dev);
netif_wake_queue(dev);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
}
/* Return current PHY settings */
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
cmd->autoneg = gp->want_autoneg;
cmd->speed = gp->phy_mii.speed;
cmd->duplex = gp->phy_mii.duplex;
*/
if (cmd->advertising == 0)
cmd->advertising = cmd->supported;
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
} else { // XXX PCS ?
cmd->supported =
/* Apply settings and restart link process. */
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
gem_begin_auto_negotiation(gp, cmd);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
return 0;
/* Restart link process. */
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
gem_begin_auto_negotiation(gp, NULL);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
return 0;
static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
{
u32 rom_reg_orig;
- void *p;
+ void __iomem *p;
if (pdev->resource[PCI_ROM_RESOURCE].parent == NULL) {
if (pci_assign_resource(pdev, PCI_ROM_RESOURCE) < 0)
gp->msg_enable = DEFAULT_MSG;
spin_lock_init(&gp->lock);
+ spin_lock_init(&gp->tx_lock);
init_MUTEX(&gp->pm_sem);
init_timer(&gp->link_timer);
gp->timer_ticks = 0;
netif_carrier_off(dev);
- gp->regs = (unsigned long) ioremap(gemreg_base, gemreg_len);
+ gp->regs = ioremap(gemreg_base, gemreg_len);
if (gp->regs == 0UL) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
gem_apple_powerup(gp);
#endif
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
gem_stop(gp);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
/* Fill up the mii_phy structure (even if we won't use it) */
dev->get_stats = gem_get_stats;
dev->set_multicast_list = gem_set_multicast;
dev->do_ioctl = gem_ioctl;
+ dev->poll = gem_poll;
+ dev->weight = 64;
dev->ethtool_ops = &gem_ethtool_ops;
dev->tx_timeout = gem_tx_timeout;
dev->watchdog_timeo = 5 * HZ;
dev->change_mtu = gem_change_mtu;
dev->irq = pdev->irq;
dev->dma = 0;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = gem_poll_controller;
+#endif
if (register_netdev(dev)) {
printk(KERN_ERR PFX "Cannot register net device, "
/* Detect & init PHY, start autoneg */
spin_lock_irq(&gp->lock);
+ spin_lock(&gp->tx_lock);
gp->hw_running = 1;
gem_init_phy(gp);
gem_begin_auto_negotiation(gp, NULL);
+ spin_unlock(&gp->tx_lock);
spin_unlock_irq(&gp->lock);
if (gp->phy_type == phy_mii_mdio0 ||
pci_set_drvdata(pdev, dev);
/* GEM can do it all... */
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
gem_shutdown(gp);
up(&gp->pm_sem);
- iounmap((void *) gp->regs);
+ iounmap(gp->regs);
err_out_free_res:
pci_release_regions(pdev);
sizeof(struct gem_init_block),
gp->init_block,
gp->gblock_dvma);
- iounmap((void *) gp->regs);
+ iounmap(gp->regs);
pci_release_regions(pdev);
free_netdev(dev);