X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fnet%2Fsungem.c;h=c78811b0082e67f44c6606f0b9b0be95e062084d;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=cb890cd3e0359d694859cd26d88be2e9ce65a9b2;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index cb890cd3e..c78811b00 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -5,6 +5,9 @@ * * Support for Apple GMAC and assorted PHYs by * Benjamin Herrenscmidt (benh@kernel.crashing.org) + * + * NAPI and NETPOLL support + * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) * * TODO: * - Get rid of all those nasty mdelay's and replace them @@ -33,6 +36,7 @@ #include #include #include +#include #include #include @@ -180,6 +184,18 @@ static inline void phy_write(struct gem *gp, int reg, u16 val) __phy_write(gp, gp->mii_phy_addr, reg, val); } +static inline void gem_enable_ints(struct gem *gp) +{ + /* Enable all interrupts but TXDONE */ + writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); +} + +static inline void gem_disable_ints(struct gem *gp) +{ + /* Disable all interrupts, including TXDONE */ + writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); +} + static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) { if (netif_msg_intr(gp)) @@ -677,12 +693,12 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit) } } -static void gem_rx(struct gem *gp) +static int gem_rx(struct gem *gp, int work_to_do) { - int entry, drops; + int entry, drops, work_done = 0; u32 done; - if (netif_msg_intr(gp)) + if (netif_msg_rx_status(gp)) printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); @@ -699,6 +715,9 @@ static void gem_rx(struct gem *gp) if ((status & RXDCTRL_OWN) != 0) break; + if (work_done >= RX_RING_SIZE || work_done >= work_to_do) + break; + /* When writing back RX descriptor, GEM writes status * then buffer address, possibly in seperate transactions. * If we don't wait for the chip to write both, we could @@ -712,6 +731,9 @@ static void gem_rx(struct gem *gp) break; } + /* We can now account for the work we're about to do */ + work_done++; + skb = gp->rx_skbs[entry]; len = (status & RXDCTRL_BUFSZ) >> 16; @@ -742,7 +764,7 @@ static void gem_rx(struct gem *gp) PCI_DMA_FROMDEVICE); gp->rx_skbs[entry] = new_skb; new_skb->dev = gp->dev; - skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET)); + skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, virt_to_page(new_skb->data), offset_in_page(new_skb->data), @@ -774,7 +796,8 @@ static void gem_rx(struct gem *gp) skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff); skb->ip_summed = CHECKSUM_HW; skb->protocol = eth_type_trans(skb, gp->dev); - netif_rx(skb); + + netif_receive_skb(skb); gp->net_stats.rx_packets++; gp->net_stats.rx_bytes += len; @@ -791,35 +814,103 @@ static void gem_rx(struct gem *gp) if (drops) printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", gp->dev->name); + + return work_done; +} + +static int gem_poll(struct net_device *dev, int *budget) +{ + struct gem *gp = dev->priv; + unsigned long flags; + + spin_lock_irqsave(&gp->lock, flags); + + do { + int work_to_do, work_done; + + /* Handle anomalies */ + if (gp->status & GREG_STAT_ABNORMAL) { + if (gem_abnormal_irq(dev, gp, gp->status)) + break; + } + + /* Run TX completion thread */ + spin_lock(&gp->tx_lock); + gem_tx(dev, gp, gp->status); + spin_unlock(&gp->tx_lock); + + spin_unlock_irqrestore(&gp->lock, flags); + + /* Run RX thread. We don't use any locking here, + * code willing to do bad things - like cleaning the + * rx ring - must call netif_poll_disable(), which + * schedule_timeout()'s if polling is already disabled. + */ + work_to_do = min(*budget, dev->quota); + + work_done = gem_rx(gp, work_to_do); + + *budget -= work_done; + dev->quota -= work_done; + + if (work_done >= work_to_do) + return 1; + + spin_lock_irqsave(&gp->lock, flags); + + gp->status = readl(gp->regs + GREG_STAT); + } while (gp->status & GREG_STAT_NAPI); + + __netif_rx_complete(dev); + gem_enable_ints(gp); + + spin_unlock_irqrestore(&gp->lock, flags); + return 0; } static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = dev_id; struct gem *gp = dev->priv; - u32 gem_status = readl(gp->regs + GREG_STAT); + unsigned long flags; /* Swallow interrupts when shutting the chip down */ - if (gp->hw_running == 0) - goto out; + if (!gp->hw_running) + return IRQ_HANDLED; - spin_lock(&gp->lock); + spin_lock_irqsave(&gp->lock, flags); + + if (netif_rx_schedule_prep(dev)) { + u32 gem_status = readl(gp->regs + GREG_STAT); - if (gem_status & GREG_STAT_ABNORMAL) { - if (gem_abnormal_irq(dev, gp, gem_status)) - goto out; + if (gem_status == 0) { + spin_unlock_irqrestore(&gp->lock, flags); + return IRQ_NONE; + } + gp->status = gem_status; + gem_disable_ints(gp); + __netif_rx_schedule(dev); } - if (gem_status & (GREG_STAT_TXALL | GREG_STAT_TXINTME)) - gem_tx(dev, gp, gem_status); - if (gem_status & GREG_STAT_RXDONE) - gem_rx(gp); - -out: - spin_unlock(&gp->lock); + spin_unlock_irqrestore(&gp->lock, flags); + + /* If polling was disabled at the time we received that + * interrupt, we may return IRQ_HANDLED here while we + * should return IRQ_NONE. No big deal... + */ return IRQ_HANDLED; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void gem_poll_controller(struct net_device *dev) +{ + /* gem_interrupt is safe to reentrance so no need + * to disable_irq here. + */ + gem_interrupt(dev->irq, dev, NULL); +} +#endif + static void gem_tx_timeout(struct net_device *dev) { struct gem *gp = dev->priv; @@ -841,10 +932,12 @@ static void gem_tx_timeout(struct net_device *dev) readl(gp->regs + MAC_RXCFG)); spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); gp->reset_task_pending = 2; schedule_work(&gp->reset_task); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); } @@ -862,6 +955,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) struct gem *gp = dev->priv; int entry; u64 ctrl; + unsigned long flags; ctrl = 0; if (skb->ip_summed == CHECKSUM_HW) { @@ -875,15 +969,20 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) (csum_stuff_off << 21)); } - spin_lock_irq(&gp->lock); + local_irq_save(flags); + if (!spin_trylock(&gp->tx_lock)) { + /* Tell upper layer to requeue */ + local_irq_restore(flags); + return NETDEV_TX_LOCKED; + } /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); - spin_unlock_irq(&gp->lock); + spin_unlock_irqrestore(&gp->tx_lock, flags); printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", dev->name); - return 1; + return NETDEV_TX_BUSY; } entry = gp->tx_new; @@ -967,11 +1066,11 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->name, entry, skb->len); mb(); writel(gp->tx_new, gp->regs + TXDMA_KICK); - spin_unlock_irq(&gp->lock); + spin_unlock_irqrestore(&gp->tx_lock, flags); dev->trans_start = jiffies; - return 0; + return NETDEV_TX_OK; } /* Jumbo-grams don't seem to work :-( */ @@ -998,9 +1097,11 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) } spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); dev->mtu = new_mtu; gp->reset_task_pending = 1; schedule_work(&gp->reset_task); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); flush_scheduled_work(); @@ -1010,7 +1111,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) #define STOP_TRIES 32 -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_stop(struct gem *gp) { int limit; @@ -1036,7 +1137,7 @@ static void gem_stop(struct gem *gp) printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_start_dma(struct gem *gp) { unsigned long val; @@ -1061,7 +1162,7 @@ static void gem_start_dma(struct gem *gp) } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ // XXX dbl check what that function should do when called on PCS PHY static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) { @@ -1148,7 +1249,7 @@ non_mii: /* A link-up condition has occurred, initialize and enable the * rest of the chip. * - * Must be invoked under gp->lock. + * Must be invoked under gp->lock and gp->tx_lock. */ static int gem_set_link_modes(struct gem *gp) { @@ -1255,7 +1356,7 @@ static int gem_set_link_modes(struct gem *gp) return 0; } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static int gem_mdio_link_not_up(struct gem *gp) { switch (gp->lstate) { @@ -1311,19 +1412,13 @@ static void gem_reset_task(void *data) { struct gem *gp = (struct gem *) data; - /* The link went down, we reset the ring, but keep - * DMA stopped. Todo: Use this function for reset - * on error as well. - */ - + netif_poll_disable(gp->dev); spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); if (gp->hw_running && gp->opened) { - /* Make sure we don't get interrupts or tx packets */ netif_stop_queue(gp->dev); - writel(0xffffffff, gp->regs + GREG_IMASK); - /* Reset the chip & rings */ gem_stop(gp); gem_init_rings(gp); @@ -1335,7 +1430,9 @@ static void gem_reset_task(void *data) } gp->reset_task_pending = 0; + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); + netif_poll_enable(gp->dev); } static void gem_link_timer(unsigned long data) @@ -1347,6 +1444,7 @@ static void gem_link_timer(unsigned long data) return; spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); /* If the link of task is still pending, we just * reschedule the link timer @@ -1416,10 +1514,11 @@ static void gem_link_timer(unsigned long data) restart: mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); out_unlock: + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_clean_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; @@ -1470,7 +1569,7 @@ static void gem_clean_rings(struct gem *gp) } } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_init_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; @@ -1482,6 +1581,9 @@ static void gem_init_rings(struct gem *gp) gem_clean_rings(gp); + gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, + (unsigned)VLAN_ETH_FRAME_LEN); + for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; struct gem_rxd *rxd = &gb->rxd[i]; @@ -1495,7 +1597,7 @@ static void gem_init_rings(struct gem *gp) gp->rx_skbs[i] = skb; skb->dev = dev; - skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET)); + skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); dma_addr = pci_map_page(gp->pdev, virt_to_page(skb->data), offset_in_page(skb->data), @@ -1517,7 +1619,7 @@ static void gem_init_rings(struct gem *gp) wmb(); } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_init_phy(struct gem *gp) { u32 mifcfg; @@ -1655,7 +1757,7 @@ static void gem_init_phy(struct gem *gp) } } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_init_dma(struct gem *gp) { u64 desc_dma = (u64) gp->gblock_dvma; @@ -1693,7 +1795,7 @@ static void gem_init_dma(struct gem *gp) gp->regs + RXDMA_BLANK); } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static u32 gem_setup_multicast(struct gem *gp) { @@ -1736,7 +1838,7 @@ gem_setup_multicast(struct gem *gp) return rxcfg; } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_init_mac(struct gem *gp) { unsigned char *e = &gp->dev->dev_addr[0]; @@ -1750,7 +1852,7 @@ static void gem_init_mac(struct gem *gp) writel(0x40, gp->regs + MAC_MINFSZ); /* Ethernet payload + header + FCS + optional VLAN tag. */ - writel(0x20000000 | (gp->dev->mtu + ETH_HLEN + 4 + 4), gp->regs + MAC_MAXFSZ); + writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); writel(0x07, gp->regs + MAC_PASIZE); writel(0x04, gp->regs + MAC_JAMSIZE); @@ -1814,7 +1916,7 @@ static void gem_init_mac(struct gem *gp) writel(0xffffffff, gp->regs + MAC_MCMASK); } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_init_pause_thresholds(struct gem *gp) { u32 cfg; @@ -1827,7 +1929,7 @@ static void gem_init_pause_thresholds(struct gem *gp) if (gp->rx_fifo_sz <= (2 * 1024)) { gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; } else { - int max_frame = (gp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; + int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; int off = (gp->rx_fifo_sz - (max_frame * 2)); int on = off - max_frame; @@ -1950,7 +2052,7 @@ static int gem_check_invariants(struct gem *gp) return 0; } -/* Must be invoked under gp->lock. */ +/* Must be invoked under gp->lock and gp->tx_lock. */ static void gem_init_hw(struct gem *gp, int restart_link) { /* On Apple's gmac, I initialize the PHY only after @@ -2020,8 +2122,7 @@ static void gem_stop_phy(struct gem *gp) /* Let the chip settle down a bit, it seems that helps * for sleep mode on some models */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ/100); + msleep(10); /* Make sure we aren't polling PHY status change. We * don't currently use that feature though @@ -2039,8 +2140,7 @@ static void gem_stop_phy(struct gem *gp) * dont wait a bit here, looks like the chip takes * some time to really shut down */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ/100); + msleep(10); } writel(0, gp->regs + MAC_TXCFG); @@ -2050,9 +2150,11 @@ static void gem_stop_phy(struct gem *gp) if (!gp->wake_on_lan) { spin_lock_irqsave(&gp->lock, flags); + spin_lock(&gp->tx_lock); gem_stop(gp); writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); + spin_unlock(&gp->tx_lock); spin_unlock_irqrestore(&gp->lock, flags); } @@ -2100,8 +2202,10 @@ static void gem_shutdown(struct gem *gp) unsigned long flags; spin_lock_irqsave(&gp->lock, flags); + spin_lock(&gp->tx_lock); gem_stop(gp); - spin_unlock_irqrestore(&gp->lock, flags); + spin_unlock(&gp->tx_lock); + spin_unlock_irqrestore(&gp->lock, flags); } } @@ -2161,7 +2265,9 @@ static int gem_open(struct net_device *dev) /* Reset the chip */ spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); gem_stop(gp); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); gp->hw_running = 1; @@ -2175,6 +2281,7 @@ static int gem_open(struct net_device *dev) printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); #ifdef CONFIG_PPC_PMAC if (!hw_was_up && gp->pdev->vendor == PCI_VENDOR_ID_APPLE) gem_apple_powerdown(gp); @@ -2183,12 +2290,14 @@ static int gem_open(struct net_device *dev) gp->pm_timer.expires = jiffies + 10*HZ; add_timer(&gp->pm_timer); up(&gp->pm_sem); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); return -EAGAIN; } spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); /* Allocate & setup ring buffers */ gem_init_rings(gp); @@ -2198,6 +2307,7 @@ static int gem_open(struct net_device *dev) gp->opened = 1; + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); up(&gp->pm_sem); @@ -2212,11 +2322,16 @@ static int gem_close(struct net_device *dev) /* Make sure we don't get distracted by suspend/resume */ down(&gp->pm_sem); + /* Note: we don't need to call netif_poll_disable() here because + * our caller (dev_close) already did it for us + */ + /* Stop traffic, mark us closed */ spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); gp->opened = 0; - writel(0xffffffff, gp->regs + GREG_IMASK); + netif_stop_queue(dev); /* Stop chip */ @@ -2228,6 +2343,7 @@ static int gem_close(struct net_device *dev) /* Bye, the pm timer will finish the job */ free_irq(gp->pdev->irq, (void *) dev); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); /* Fire the PM timer that will shut us down in about 10 seconds */ @@ -2245,6 +2361,8 @@ static int gem_suspend(struct pci_dev *pdev, u32 state) struct net_device *dev = pci_get_drvdata(pdev); struct gem *gp = dev->priv; + netif_poll_disable(dev); + /* We hold the PM semaphore during entire driver * sleep time */ @@ -2256,18 +2374,18 @@ static int gem_suspend(struct pci_dev *pdev, u32 state) /* If the driver is opened, we stop the DMA */ if (gp->opened) { spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); /* Stop traffic, mark us closed */ netif_device_detach(dev); - writel(0xffffffff, gp->regs + GREG_IMASK); - /* Stop chip */ gem_stop(gp); /* Get rid of ring buffers */ gem_clean_rings(gp); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) @@ -2301,12 +2419,14 @@ static int gem_resume(struct pci_dev *pdev) } #endif /* CONFIG_PPC_PMAC */ spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); gem_stop(gp); gp->hw_running = 1; gem_init_rings(gp); gem_init_hw(gp, 1); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); netif_device_attach(dev); @@ -2315,6 +2435,8 @@ static int gem_resume(struct pci_dev *pdev) } up(&gp->pm_sem); + netif_poll_enable(dev); + return 0; } #endif /* CONFIG_PM */ @@ -2325,6 +2447,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) struct net_device_stats *stats = &gp->net_stats; spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); if (gp->hw_running) { stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); @@ -2344,6 +2467,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) writel(0, gp->regs + MAC_LCOLL); } + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); return &gp->net_stats; @@ -2359,6 +2483,7 @@ static void gem_set_multicast(struct net_device *dev) return; spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); netif_stop_queue(dev); @@ -2383,6 +2508,7 @@ static void gem_set_multicast(struct net_device *dev) netif_wake_queue(dev); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); } @@ -2414,6 +2540,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) /* Return current PHY settings */ spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); cmd->autoneg = gp->want_autoneg; cmd->speed = gp->phy_mii.speed; cmd->duplex = gp->phy_mii.duplex; @@ -2425,6 +2552,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) */ if (cmd->advertising == 0) cmd->advertising = cmd->supported; + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); } else { // XXX PCS ? cmd->supported = @@ -2464,7 +2592,9 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) /* Apply settings and restart link process. */ spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); gem_begin_auto_negotiation(gp, cmd); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); return 0; @@ -2479,7 +2609,9 @@ static int gem_nway_reset(struct net_device *dev) /* Restart link process. */ spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); gem_begin_auto_negotiation(gp, NULL); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); return 0; @@ -2579,7 +2711,7 @@ static void find_eth_addr_in_vpd(void *rom_base, int len, unsigned char *dev_add static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) { u32 rom_reg_orig; - void *p; + void __iomem *p; if (pdev->resource[PCI_ROM_RESOURCE].parent == NULL) { if (pci_assign_resource(pdev, PCI_ROM_RESOURCE) < 0) @@ -2717,7 +2849,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, gp = dev->priv; - err = pci_request_regions(pdev, dev->name); + err = pci_request_regions(pdev, DRV_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources, " "aborting.\n"); @@ -2731,6 +2863,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, gp->msg_enable = DEFAULT_MSG; spin_lock_init(&gp->lock); + spin_lock_init(&gp->tx_lock); init_MUTEX(&gp->pm_sem); init_timer(&gp->link_timer); @@ -2748,7 +2881,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, gp->timer_ticks = 0; netif_carrier_off(dev); - gp->regs = (unsigned long) ioremap(gemreg_base, gemreg_len); + gp->regs = ioremap(gemreg_base, gemreg_len); if (gp->regs == 0UL) { printk(KERN_ERR PFX "Cannot map device registers, " "aborting.\n"); @@ -2766,7 +2899,9 @@ static int __devinit gem_init_one(struct pci_dev *pdev, gem_apple_powerup(gp); #endif spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); gem_stop(gp); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); /* Fill up the mii_phy structure (even if we won't use it) */ @@ -2804,12 +2939,17 @@ static int __devinit gem_init_one(struct pci_dev *pdev, dev->get_stats = gem_get_stats; dev->set_multicast_list = gem_set_multicast; dev->do_ioctl = gem_ioctl; + dev->poll = gem_poll; + dev->weight = 64; dev->ethtool_ops = &gem_ethtool_ops; dev->tx_timeout = gem_tx_timeout; dev->watchdog_timeo = 5 * HZ; dev->change_mtu = gem_change_mtu; dev->irq = pdev->irq; dev->dma = 0; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = gem_poll_controller; +#endif if (register_netdev(dev)) { printk(KERN_ERR PFX "Cannot register net device, " @@ -2827,9 +2967,11 @@ static int __devinit gem_init_one(struct pci_dev *pdev, /* Detect & init PHY, start autoneg */ spin_lock_irq(&gp->lock); + spin_lock(&gp->tx_lock); gp->hw_running = 1; gem_init_phy(gp); gem_begin_auto_negotiation(gp, NULL); + spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); if (gp->phy_type == phy_mii_mdio0 || @@ -2840,7 +2982,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); /* GEM can do it all... */ - dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; + dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; @@ -2865,7 +3007,7 @@ err_out_iounmap: gem_shutdown(gp); up(&gp->pm_sem); - iounmap((void *) gp->regs); + iounmap(gp->regs); err_out_free_res: pci_release_regions(pdev); @@ -2899,7 +3041,7 @@ static void __devexit gem_remove_one(struct pci_dev *pdev) sizeof(struct gem_init_block), gp->init_block, gp->gblock_dvma); - iounmap((void *) gp->regs); + iounmap(gp->regs); pci_release_regions(pdev); free_netdev(dev);