2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala (kumar.gala@freescale.com)
11 * Copyright 2004 Freescale Semiconductor, Inc
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
18 * Gianfar: AKA Lambda Draconis, "Dragon"
25 * This driver is designed for the Triple-speed Ethernet
26 * controllers on the Freescale 8540/8560 integrated processors,
27 * as well as the Fast Ethernet Controller on the 8540.
29 * The driver is initialized through OCP. Structures which
30 * define the configuration needed by the board are defined in a
31 * board structure in arch/ppc/platforms (though I do not
32 * discount the possibility that other architectures could one
33 * day be supported. One assumption the driver currently makes
34 * is that the PHY is configured in such a way to advertise all
35 * capabilities. This is a sensible default, and on certain
36 * PHYs, changing this default encounters substantial errata
37 * issues. Future versions may remove this requirement, but for
38 * now, it is best for the firmware to ensure this is the case.
40 * The Gianfar Ethernet Controller uses a ring of buffer
41 * descriptors. The beginning is indicated by a register
42 * pointing to the physical address of the start of the ring.
43 * The end is determined by a "wrap" bit being set in the
44 * last descriptor of the ring.
46 * When a packet is received, the RXF bit in the
47 * IEVENT register is set, triggering an interrupt when the
48 * corresponding bit in the IMASK register is also set (if
49 * interrupt coalescing is active, then the interrupt may not
50 * happen immediately, but will wait until either a set number
51 * of frames or amount of time have passed.). In NAPI, the
52 * interrupt handler will signal there is work to be done, and
53 * exit. Without NAPI, the packet(s) will be handled
54 * immediately. Both methods will start at the last known empty
55 * descriptor, and process every subsequent descriptor until there
56 * are none left with data (NAPI will stop after a set number of
57 * packets to give time to other tasks, but will eventually
58 * process all the packets). The data arrives inside a
59 * pre-allocated skb, and so after the skb is passed up to the
60 * stack, a new skb must be allocated, and the address field in
61 * the buffer descriptor must be updated to indicate this new
64 * When the kernel requests that a packet be transmitted, the
65 * driver starts where it left off last time, and points the
66 * descriptor at the buffer which was passed in. The driver
67 * then informs the DMA engine that there are packets ready to
68 * be transmitted. Once the controller is finished transmitting
69 * the packet, an interrupt may be triggered (under the same
70 * conditions as for reception, but depending on the TXF bit).
71 * The driver then cleans up the buffer.
74 #include <linux/config.h>
75 #include <linux/kernel.h>
76 #include <linux/sched.h>
77 #include <linux/string.h>
78 #include <linux/errno.h>
79 #include <linux/slab.h>
80 #include <linux/interrupt.h>
81 #include <linux/init.h>
82 #include <linux/delay.h>
83 #include <linux/netdevice.h>
84 #include <linux/etherdevice.h>
85 #include <linux/skbuff.h>
86 #include <linux/spinlock.h>
91 #include <asm/uaccess.h>
92 #include <linux/module.h>
93 #include <linux/version.h>
94 #include <linux/dma-mapping.h>
95 #include <linux/crc32.h>
98 #include "gianfar_phy.h"
99 #ifdef CONFIG_NET_FASTROUTE
100 #include <linux/if_arp.h>
104 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41)
105 #define irqreturn_t void
109 #define TX_TIMEOUT (1*HZ)
110 #define SKB_ALLOC_TIMEOUT 1000000
111 #undef BRIEF_GFAR_ERRORS
112 #define VERBOSE_GFAR_ERRORS
114 #ifdef CONFIG_GFAR_NAPI
115 #define RECEIVE(x) netif_receive_skb(x)
117 #define RECEIVE(x) netif_rx(x)
120 #define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.0, "
121 char gfar_driver_name[] = "Gianfar Ethernet";
122 char gfar_driver_version[] = "1.0";
124 int startup_gfar(struct net_device *dev);
125 static int gfar_enet_open(struct net_device *dev);
126 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
127 static void gfar_timeout(struct net_device *dev);
128 static int gfar_close(struct net_device *dev);
129 struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
130 static struct net_device_stats *gfar_get_stats(struct net_device *dev);
131 static int gfar_set_mac_address(struct net_device *dev);
132 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
133 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
134 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
135 irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
136 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
137 static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
138 static void gfar_phy_change(void *data);
139 static void gfar_phy_timer(unsigned long data);
140 static void adjust_link(struct net_device *dev);
141 static void init_registers(struct net_device *dev);
142 static int init_phy(struct net_device *dev);
143 static int gfar_probe(struct ocp_device *ocpdev);
144 static void gfar_remove(struct ocp_device *ocpdev);
145 void free_skb_resources(struct gfar_private *priv);
146 static void gfar_set_multi(struct net_device *dev);
147 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
148 #ifdef CONFIG_GFAR_NAPI
149 static int gfar_poll(struct net_device *dev, int *budget);
151 #ifdef CONFIG_NET_FASTROUTE
152 static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst);
154 static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length);
155 #ifdef CONFIG_GFAR_NAPI
156 static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
158 static int gfar_clean_rx_ring(struct net_device *dev);
160 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
162 extern struct ethtool_ops gfar_ethtool_ops;
163 extern void gfar_gstrings_normon(struct net_device *dev, u32 stringset,
165 extern void gfar_fill_stats_normon(struct net_device *dev,
166 struct ethtool_stats *dummy, u64 * buf);
167 extern int gfar_stats_count_normon(struct net_device *dev);
170 MODULE_AUTHOR("Freescale Semiconductor, Inc");
171 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
172 MODULE_LICENSE("GPL");
174 /* Called by the ocp code to initialize device data structures
175 * required for bringing up the device
176 * returns 0 on success */
177 static int gfar_probe(struct ocp_device *ocpdev)
180 struct ocp_device *mdiodev;
181 struct net_device *dev = NULL;
182 struct gfar_private *priv = NULL;
183 struct ocp_gfar_data *einfo;
186 struct ethtool_ops *dev_ethtool_ops;
188 einfo = (struct ocp_gfar_data *) ocpdev->def->additions;
191 printk(KERN_ERR "gfar %d: Missing additional data!\n",
197 /* get a pointer to the register memory which can
198 * configure the PHYs. If it's different from this set,
199 * get the device which has those regs */
200 if ((einfo->phyregidx >= 0) && (einfo->phyregidx != ocpdev->def->index)) {
201 mdiodev = ocp_find_device(OCP_ANY_ID,
202 OCP_FUNC_GFAR, einfo->phyregidx);
204 /* If the device which holds the MDIO regs isn't
205 * up, wait for it to come up */
212 /* Create an ethernet device instance */
213 dev = alloc_etherdev(sizeof (*priv));
218 priv = netdev_priv(dev);
220 /* Set the info in the priv to the current info */
223 /* get a pointer to the register memory */
224 priv->regs = (struct gfar *)
225 ioremap(ocpdev->def->paddr, sizeof (struct gfar));
227 if (priv->regs == NULL) {
232 /* Set the PHY base address */
233 priv->phyregs = (struct gfar *)
234 ioremap(mdiodev->def->paddr, sizeof (struct gfar));
236 if (priv->phyregs == NULL) {
241 ocp_set_drvdata(ocpdev, dev);
243 /* Stop the DMA engine now, in case it was running before */
244 /* (The firmware could have used it, and left it running). */
245 /* To do this, we write Graceful Receive Stop and Graceful */
246 /* Transmit Stop, and then wait until the corresponding bits */
247 /* in IEVENT indicate the stops have completed. */
248 tempval = gfar_read(&priv->regs->dmactrl);
249 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
250 gfar_write(&priv->regs->dmactrl, tempval);
252 tempval = gfar_read(&priv->regs->dmactrl);
253 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
254 gfar_write(&priv->regs->dmactrl, tempval);
256 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
259 /* Reset MAC layer */
260 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
262 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
263 gfar_write(&priv->regs->maccfg1, tempval);
265 /* Initialize MACCFG2. */
266 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
268 /* Initialize ECNTRL */
269 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
271 /* Copy the station address into the dev structure, */
272 /* and into the address registers MAC_STNADDR1,2. */
273 /* Backwards, because little endian MACs are dumb. */
274 /* Don't set the regs if the firmware already did */
275 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
277 /* Set the dev->base_addr to the gfar reg region */
278 dev->base_addr = (unsigned long) (priv->regs);
280 SET_MODULE_OWNER(dev);
282 /* Fill in the dev structure */
283 dev->open = gfar_enet_open;
284 dev->hard_start_xmit = gfar_start_xmit;
285 dev->tx_timeout = gfar_timeout;
286 dev->watchdog_timeo = TX_TIMEOUT;
287 #ifdef CONFIG_GFAR_NAPI
288 dev->poll = gfar_poll;
289 dev->weight = GFAR_DEV_WEIGHT;
291 dev->stop = gfar_close;
292 dev->get_stats = gfar_get_stats;
293 dev->change_mtu = gfar_change_mtu;
295 dev->set_multicast_list = gfar_set_multi;
296 dev->flags |= IFF_MULTICAST;
299 (struct ethtool_ops *)kmalloc(sizeof(struct ethtool_ops),
302 if(dev_ethtool_ops == NULL) {
307 memcpy(dev_ethtool_ops, &gfar_ethtool_ops, sizeof(gfar_ethtool_ops));
309 /* If there is no RMON support in this device, we don't
310 * want to expose non-existant statistics */
311 if((priv->einfo->flags & GFAR_HAS_RMON) == 0) {
312 dev_ethtool_ops->get_strings = gfar_gstrings_normon;
313 dev_ethtool_ops->get_stats_count = gfar_stats_count_normon;
314 dev_ethtool_ops->get_ethtool_stats = gfar_fill_stats_normon;
317 if((priv->einfo->flags & GFAR_HAS_COALESCE) == 0) {
318 dev_ethtool_ops->set_coalesce = NULL;
319 dev_ethtool_ops->get_coalesce = NULL;
322 dev->ethtool_ops = dev_ethtool_ops;
324 #ifdef CONFIG_NET_FASTROUTE
325 dev->accept_fastpath = gfar_accept_fastpath;
328 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
329 #ifdef CONFIG_GFAR_BUFSTASH
330 priv->rx_stash_size = STASH_LENGTH;
332 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
333 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
335 /* Initially, coalescing is disabled */
336 priv->txcoalescing = 0;
339 priv->rxcoalescing = 0;
343 err = register_netdev(dev);
346 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
351 /* Print out the device info */
352 printk(DEVICE_NAME, dev->name);
353 for (idx = 0; idx < 6; idx++)
354 printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':');
357 /* Even more device info helps when determining which kernel */
358 /* provided which set of benchmarks. Since this is global for all */
359 /* devices, we only print it once */
360 #ifdef CONFIG_GFAR_NAPI
361 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
363 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
365 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
366 dev->name, priv->rx_ring_size, priv->tx_ring_size);
372 kfree(dev_ethtool_ops);
374 iounmap((void *) priv->phyregs);
376 iounmap((void *) priv->regs);
382 static void gfar_remove(struct ocp_device *ocpdev)
384 struct net_device *dev = ocp_get_drvdata(ocpdev);
385 struct gfar_private *priv = netdev_priv(dev);
387 ocp_set_drvdata(ocpdev, NULL);
389 kfree(dev->ethtool_ops);
390 iounmap((void *) priv->regs);
391 iounmap((void *) priv->phyregs);
395 /* Configure the PHY for dev.
396 * returns 0 if success. -1 if failure
398 static int init_phy(struct net_device *dev)
400 struct gfar_private *priv = netdev_priv(dev);
401 struct phy_info *curphy;
408 /* get info for this PHY */
409 curphy = get_phy_info(dev);
411 if (curphy == NULL) {
412 printk(KERN_ERR "%s: No PHY found\n", dev->name);
416 priv->phyinfo = curphy;
418 /* Run the commands which configure the PHY */
419 phy_run_commands(dev, curphy->config);
424 static void init_registers(struct net_device *dev)
426 struct gfar_private *priv = netdev_priv(dev);
429 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
431 /* Initialize IMASK */
432 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
434 /* Init hash registers to zero */
435 gfar_write(&priv->regs->iaddr0, 0);
436 gfar_write(&priv->regs->iaddr1, 0);
437 gfar_write(&priv->regs->iaddr2, 0);
438 gfar_write(&priv->regs->iaddr3, 0);
439 gfar_write(&priv->regs->iaddr4, 0);
440 gfar_write(&priv->regs->iaddr5, 0);
441 gfar_write(&priv->regs->iaddr6, 0);
442 gfar_write(&priv->regs->iaddr7, 0);
444 gfar_write(&priv->regs->gaddr0, 0);
445 gfar_write(&priv->regs->gaddr1, 0);
446 gfar_write(&priv->regs->gaddr2, 0);
447 gfar_write(&priv->regs->gaddr3, 0);
448 gfar_write(&priv->regs->gaddr4, 0);
449 gfar_write(&priv->regs->gaddr5, 0);
450 gfar_write(&priv->regs->gaddr6, 0);
451 gfar_write(&priv->regs->gaddr7, 0);
454 gfar_write(&priv->regs->rctrl, 0x00000000);
456 /* Zero out the rmon mib registers if it has them */
457 if (priv->einfo->flags & GFAR_HAS_RMON) {
458 memset((void *) &(priv->regs->rmon), 0,
459 sizeof (struct rmon_mib));
461 /* Mask off the CAM interrupts */
462 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
463 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
466 /* Initialize the max receive buffer length */
467 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
469 #ifdef CONFIG_GFAR_BUFSTASH
470 /* If we are stashing buffers, we need to set the
471 * extraction length to the size of the buffer */
472 gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16);
475 /* Initialize the Minimum Frame Length Register */
476 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
478 /* Setup Attributes so that snooping is on for rx */
479 gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS);
480 gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS);
482 /* Assign the TBI an address which won't conflict with the PHYs */
483 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
486 void stop_gfar(struct net_device *dev)
488 struct gfar_private *priv = netdev_priv(dev);
489 struct gfar *regs = priv->regs;
494 spin_lock_irqsave(&priv->lock, flags);
496 /* Tell the kernel the link is down */
500 /* Mask all interrupts */
501 gfar_write(®s->imask, IMASK_INIT_CLEAR);
503 /* Clear all interrupts */
504 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
506 /* Stop the DMA, and wait for it to stop */
507 tempval = gfar_read(&priv->regs->dmactrl);
508 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
509 != (DMACTRL_GRS | DMACTRL_GTS)) {
510 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
511 gfar_write(&priv->regs->dmactrl, tempval);
513 while (!(gfar_read(&priv->regs->ievent) &
514 (IEVENT_GRSC | IEVENT_GTSC)))
518 /* Disable Rx and Tx */
519 tempval = gfar_read(®s->maccfg1);
520 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
521 gfar_write(®s->maccfg1, tempval);
523 if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
524 phy_run_commands(dev, priv->phyinfo->shutdown);
527 spin_unlock_irqrestore(&priv->lock, flags);
530 if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) {
531 free_irq(priv->einfo->interruptError, dev);
532 free_irq(priv->einfo->interruptTransmit, dev);
533 free_irq(priv->einfo->interruptReceive, dev);
535 free_irq(priv->einfo->interruptTransmit, dev);
538 if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
539 free_irq(priv->einfo->interruptPHY, dev);
541 del_timer_sync(&priv->phy_info_timer);
544 free_skb_resources(priv);
546 dma_unmap_single(NULL, gfar_read(®s->tbase),
547 sizeof(struct txbd)*priv->tx_ring_size,
549 dma_unmap_single(NULL, gfar_read(®s->rbase),
550 sizeof(struct rxbd)*priv->rx_ring_size,
553 /* Free the buffer descriptors */
554 kfree(priv->tx_bd_base);
557 /* If there are any tx skbs or rx skbs still around, free them.
558 * Then free tx_skbuff and rx_skbuff */
559 void free_skb_resources(struct gfar_private *priv)
565 /* Go through all the buffer descriptors and free their data buffers */
566 txbdp = priv->tx_bd_base;
568 for (i = 0; i < priv->tx_ring_size; i++) {
570 if (priv->tx_skbuff[i]) {
571 dma_unmap_single(NULL, txbdp->bufPtr,
574 dev_kfree_skb_any(priv->tx_skbuff[i]);
575 priv->tx_skbuff[i] = NULL;
579 kfree(priv->tx_skbuff);
581 rxbdp = priv->rx_bd_base;
583 /* rx_skbuff is not guaranteed to be allocated, so only
584 * free it and its contents if it is allocated */
585 if(priv->rx_skbuff != NULL) {
586 for (i = 0; i < priv->rx_ring_size; i++) {
587 if (priv->rx_skbuff[i]) {
588 dma_unmap_single(NULL, rxbdp->bufPtr,
593 dev_kfree_skb_any(priv->rx_skbuff[i]);
594 priv->rx_skbuff[i] = NULL;
604 kfree(priv->rx_skbuff);
608 /* Bring the controller up and running */
609 int startup_gfar(struct net_device *dev)
615 struct gfar_private *priv = netdev_priv(dev);
616 struct gfar *regs = priv->regs;
620 gfar_write(®s->imask, IMASK_INIT_CLEAR);
622 /* Allocate memory for the buffer descriptors */
624 (unsigned int) kmalloc(sizeof (struct txbd8) * priv->tx_ring_size +
625 sizeof (struct rxbd8) * priv->rx_ring_size,
629 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
634 priv->tx_bd_base = (struct txbd8 *) addr;
636 /* enet DMA only understands physical addresses */
637 gfar_write(®s->tbase,
638 dma_map_single(NULL, (void *)addr,
639 sizeof(struct txbd8) * priv->tx_ring_size,
642 /* Start the rx descriptor ring where the tx ring leaves off */
643 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
644 priv->rx_bd_base = (struct rxbd8 *) addr;
645 gfar_write(®s->rbase,
646 dma_map_single(NULL, (void *)addr,
647 sizeof(struct rxbd8) * priv->rx_ring_size,
650 /* Setup the skbuff rings */
652 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
653 priv->tx_ring_size, GFP_KERNEL);
655 if (priv->tx_skbuff == NULL) {
656 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
662 for (i = 0; i < priv->tx_ring_size; i++)
663 priv->tx_skbuff[i] = NULL;
666 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
667 priv->rx_ring_size, GFP_KERNEL);
669 if (priv->rx_skbuff == NULL) {
670 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
676 for (i = 0; i < priv->rx_ring_size; i++)
677 priv->rx_skbuff[i] = NULL;
679 /* Initialize some variables in our dev structure */
680 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
681 priv->cur_rx = priv->rx_bd_base;
682 priv->skb_curtx = priv->skb_dirtytx = 0;
685 /* Initialize Transmit Descriptor Ring */
686 txbdp = priv->tx_bd_base;
687 for (i = 0; i < priv->tx_ring_size; i++) {
694 /* Set the last descriptor in the ring to indicate wrap */
696 txbdp->status |= TXBD_WRAP;
698 rxbdp = priv->rx_bd_base;
699 for (i = 0; i < priv->rx_ring_size; i++) {
700 struct sk_buff *skb = NULL;
704 skb = gfar_new_skb(dev, rxbdp);
706 priv->rx_skbuff[i] = skb;
711 /* Set the last descriptor in the ring to wrap */
713 rxbdp->status |= RXBD_WRAP;
715 /* If the device has multiple interrupts, register for
716 * them. Otherwise, only register for the one */
717 if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) {
718 /* Install our interrupt handlers for Error,
719 * Transmit, and Receive */
720 if (request_irq(priv->einfo->interruptError, gfar_error,
721 SA_SHIRQ, "enet_error", dev) < 0) {
722 printk(KERN_ERR "%s: Can't get IRQ %d\n",
723 dev->name, priv->einfo->interruptError);
729 if (request_irq(priv->einfo->interruptTransmit, gfar_transmit,
730 SA_SHIRQ, "enet_tx", dev) < 0) {
731 printk(KERN_ERR "%s: Can't get IRQ %d\n",
732 dev->name, priv->einfo->interruptTransmit);
739 if (request_irq(priv->einfo->interruptReceive, gfar_receive,
740 SA_SHIRQ, "enet_rx", dev) < 0) {
741 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
742 dev->name, priv->einfo->interruptReceive);
748 if (request_irq(priv->einfo->interruptTransmit, gfar_interrupt,
749 SA_SHIRQ, "gfar_interrupt", dev) < 0) {
750 printk(KERN_ERR "%s: Can't get IRQ %d\n",
751 dev->name, priv->einfo->interruptError);
758 /* Grab the PHY interrupt */
759 if (priv->einfo->flags & GFAR_HAS_PHY_INTR) {
760 if (request_irq(priv->einfo->interruptPHY, phy_interrupt,
761 SA_SHIRQ, "phy_interrupt", dev) < 0) {
762 printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
763 dev->name, priv->einfo->interruptPHY);
767 if (priv->einfo->flags & GFAR_HAS_MULTI_INTR)
773 init_timer(&priv->phy_info_timer);
774 priv->phy_info_timer.function = &gfar_phy_timer;
775 priv->phy_info_timer.data = (unsigned long) dev;
776 mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ);
779 /* Set up the bottom half queue */
780 INIT_WORK(&priv->tq, (void (*)(void *))gfar_phy_change, dev);
782 /* Configure the PHY interrupt */
783 phy_run_commands(dev, priv->phyinfo->startup);
785 /* Tell the kernel the link is up, and determine the
786 * negotiated features (speed, duplex) */
790 printk(KERN_INFO "%s: No link detected\n", dev->name);
792 /* Configure the coalescing support */
793 if (priv->txcoalescing)
794 gfar_write(®s->txic,
795 mk_ic_value(priv->txcount, priv->txtime));
797 gfar_write(®s->txic, 0);
799 if (priv->rxcoalescing)
800 gfar_write(®s->rxic,
801 mk_ic_value(priv->rxcount, priv->rxtime));
803 gfar_write(®s->rxic, 0);
805 init_waitqueue_head(&priv->rxcleanupq);
807 /* Enable Rx and Tx in MACCFG1 */
808 tempval = gfar_read(®s->maccfg1);
809 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
810 gfar_write(®s->maccfg1, tempval);
812 /* Initialize DMACTRL to have WWR and WOP */
813 tempval = gfar_read(&priv->regs->dmactrl);
814 tempval |= DMACTRL_INIT_SETTINGS;
815 gfar_write(&priv->regs->dmactrl, tempval);
817 /* Clear THLT, so that the DMA starts polling now */
818 gfar_write(®s->tstat, TSTAT_CLEAR_THALT);
820 /* Make sure we aren't stopped */
821 tempval = gfar_read(&priv->regs->dmactrl);
822 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
823 gfar_write(&priv->regs->dmactrl, tempval);
825 /* Unmask the interrupts we look for */
826 gfar_write(®s->imask, IMASK_DEFAULT);
831 free_irq(priv->einfo->interruptReceive, dev);
833 free_irq(priv->einfo->interruptTransmit, dev);
835 free_irq(priv->einfo->interruptError, dev);
838 free_skb_resources(priv);
840 kfree(priv->tx_bd_base);
844 /* Called when something needs to use the ethernet device */
845 /* Returns 0 for success. */
846 static int gfar_enet_open(struct net_device *dev)
850 /* Initialize a bunch of registers */
853 gfar_set_mac_address(dev);
860 err = startup_gfar(dev);
862 netif_start_queue(dev);
867 /* This is called by the kernel when a frame is ready for transmission. */
868 /* It is pointed to by the dev->hard_start_xmit function pointer */
869 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
871 struct gfar_private *priv = netdev_priv(dev);
874 /* Update transmit stats */
875 priv->stats.tx_bytes += skb->len;
878 spin_lock_irq(&priv->lock);
880 /* Point at the first free tx descriptor */
881 txbdp = priv->cur_tx;
883 /* Clear all but the WRAP status flags */
884 txbdp->status &= TXBD_WRAP;
886 /* Set buffer length and pointer */
887 txbdp->length = skb->len;
888 txbdp->bufPtr = dma_map_single(NULL, skb->data,
889 skb->len, DMA_TO_DEVICE);
891 /* Save the skb pointer so we can free it later */
892 priv->tx_skbuff[priv->skb_curtx] = skb;
894 /* Update the current skb pointer (wrapping if this was the last) */
896 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
898 /* Flag the BD as interrupt-causing */
899 txbdp->status |= TXBD_INTERRUPT;
901 /* Flag the BD as ready to go, last in frame, and */
903 txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
905 dev->trans_start = jiffies;
907 /* If this was the last BD in the ring, the next one */
908 /* is at the beginning of the ring */
909 if (txbdp->status & TXBD_WRAP)
910 txbdp = priv->tx_bd_base;
914 /* If the next BD still needs to be cleaned up, then the bds
915 are full. We need to tell the kernel to stop sending us stuff. */
916 if (txbdp == priv->dirty_tx) {
917 netif_stop_queue(dev);
919 priv->stats.tx_fifo_errors++;
922 /* Update the current txbd to the next one */
923 priv->cur_tx = txbdp;
925 /* Tell the DMA to go go go */
926 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
929 spin_unlock_irq(&priv->lock);
934 /* Stops the kernel queue, and halts the controller */
935 static int gfar_close(struct net_device *dev)
939 netif_stop_queue(dev);
944 /* returns a net_device_stats structure pointer */
945 static struct net_device_stats * gfar_get_stats(struct net_device *dev)
947 struct gfar_private *priv = netdev_priv(dev);
949 return &(priv->stats);
952 /* Changes the mac address if the controller is not running. */
953 int gfar_set_mac_address(struct net_device *dev)
955 struct gfar_private *priv = netdev_priv(dev);
957 char tmpbuf[MAC_ADDR_LEN];
960 /* Now copy it into the mac registers backwards, cuz */
961 /* little endian is silly */
962 for (i = 0; i < MAC_ADDR_LEN; i++)
963 tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i];
965 gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf)));
967 tempval = *((u32 *) (tmpbuf + 4));
969 gfar_write(&priv->regs->macstnaddr2, tempval);
974 /**********************************************************************
975 * gfar_accept_fastpath
977 * Used to authenticate to the kernel that a fast path entry can be
978 * added to device's routing table cache
980 * Input : pointer to ethernet interface network device structure and
981 * a pointer to the designated entry to be added to the cache.
982 * Output : zero upon success, negative upon failure
983 **********************************************************************/
984 #ifdef CONFIG_NET_FASTROUTE
985 static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst)
987 struct net_device *odev = dst->dev;
989 if ((dst->ops->protocol != __constant_htons(ETH_P_IP))
990 || (odev->type != ARPHRD_ETHER)
991 || (odev->accept_fastpath == NULL)) {
999 /* try_fastroute() -- Checks the fastroute cache to see if a given packet
1000 * can be routed immediately to another device. If it can, we send it.
1001 * If we used a fastroute, we return 1. Otherwise, we return 0.
1002 * Returns 0 if CONFIG_NET_FASTROUTE is not on
1004 static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length)
1006 #ifdef CONFIG_NET_FASTROUTE
1011 struct net_device *odev;
1012 struct gfar_private *priv = netdev_priv(dev);
1013 unsigned int CPU_ID = smp_processor_id();
1015 eth = (struct ethhdr *) (skb->data);
1017 /* Only route ethernet IP packets */
1018 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
1019 iph = (struct iphdr *) (skb->data + ETH_HLEN);
1021 /* Generate the hash value */
1022 hash = ((*(u8 *) &iph->daddr) ^ (*(u8 *) & iph->saddr)) & NETDEV_FASTROUTE_HMASK;
1024 rt = (struct rtable *) (dev->fastpath[hash]);
1026 && ((*(u32 *) &iph->daddr) == (*(u32 *) &rt->key.dst))
1027 && ((*(u32 *) &iph->saddr) == (*(u32 *) &rt->key.src))
1028 && !(rt->u.dst.obsolete)) {
1029 odev = rt->u.dst.dev;
1030 netdev_rx_stat[CPU_ID].fastroute_hit++;
1032 /* Make sure the packet is:
1034 * 2) without any options (header length of 5)
1035 * 3) Not a multicast packet
1036 * 4) going to a valid destination
1037 * 5) Not out of time-to-live
1039 if (iph->version == 4
1041 && (!(eth->h_dest[0] & 0x01))
1042 && neigh_is_valid(rt->u.dst.neighbour)
1045 /* Fast Route Path: Taken if the outgoing device is ready to transmit the packet now */
1046 if ((!netif_queue_stopped(odev))
1047 && (!spin_is_locked(odev->xmit_lock))
1048 && (skb->len <= (odev->mtu + ETH_HLEN + 2 + 4))) {
1050 skb->pkt_type = PACKET_FASTROUTE;
1051 skb->protocol = __constant_htons(ETH_P_IP);
1052 ip_decrease_ttl(iph);
1053 memcpy(eth->h_source, odev->dev_addr, MAC_ADDR_LEN);
1054 memcpy(eth->h_dest, rt->u.dst.neighbour->ha, MAC_ADDR_LEN);
1057 /* Prep the skb for the packet */
1058 skb_put(skb, length);
1060 if (odev->hard_start_xmit(skb, odev) != 0) {
1061 panic("%s: FastRoute path corrupted", dev->name);
1063 netdev_rx_stat[CPU_ID].fastroute_success++;
1066 /* Semi Fast Route Path: Mark the packet as needing fast routing, but let the
1067 * stack handle getting it to the device */
1069 skb->pkt_type = PACKET_FASTROUTE;
1070 skb->nh.raw = skb->data + ETH_HLEN;
1071 skb->protocol = __constant_htons(ETH_P_IP);
1072 netdev_rx_stat[CPU_ID].fastroute_defer++;
1074 /* Prep the skb for the packet */
1075 skb_put(skb, length);
1077 if(RECEIVE(skb) == NET_RX_DROP) {
1078 priv->extra_stats.kernel_dropped++;
1086 #endif /* CONFIG_NET_FASTROUTE */
1090 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1092 int tempsize, tempval;
1093 struct gfar_private *priv = netdev_priv(dev);
1094 int oldsize = priv->rx_buffer_size;
1095 int frame_size = new_mtu + 18;
1097 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1098 printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name);
1103 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1104 INCREMENTAL_BUFFER_SIZE;
1106 /* Only stop and start the controller if it isn't already
1108 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1111 priv->rx_buffer_size = tempsize;
1115 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1116 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1118 /* If the mtu is larger than the max size for standard
1119 * ethernet frames (ie, a jumbo frame), then set maccfg2
1120 * to allow huge frames, and to check the length */
1121 tempval = gfar_read(&priv->regs->maccfg2);
1123 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1124 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1126 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1128 gfar_write(&priv->regs->maccfg2, tempval);
1130 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1136 /* gfar_timeout gets called when a packet has not been
1137 * transmitted after a set amount of time.
1138 * For now, assume that clearing out all the structures, and
1139 * starting over will fix the problem. */
1140 static void gfar_timeout(struct net_device *dev)
1142 struct gfar_private *priv = netdev_priv(dev);
1144 priv->stats.tx_errors++;
1146 if (dev->flags & IFF_UP) {
1151 if (!netif_queue_stopped(dev))
1152 netif_schedule(dev);
1155 /* Interrupt Handler for Transmit complete */
1156 static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs)
1158 struct net_device *dev = (struct net_device *) dev_id;
1159 struct gfar_private *priv = netdev_priv(dev);
1163 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1166 spin_lock(&priv->lock);
1167 bdp = priv->dirty_tx;
1168 while ((bdp->status & TXBD_READY) == 0) {
1169 /* If dirty_tx and cur_tx are the same, then either the */
1170 /* ring is empty or full now (it could only be full in the beginning, */
1171 /* obviously). If it is empty, we are done. */
1172 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1175 priv->stats.tx_packets++;
1177 /* Deferred means some collisions occurred during transmit, */
1178 /* but we eventually sent the packet. */
1179 if (bdp->status & TXBD_DEF)
1180 priv->stats.collisions++;
1182 /* Free the sk buffer associated with this TxBD */
1183 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1184 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1186 (priv->skb_dirtytx +
1187 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1189 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1190 if (bdp->status & TXBD_WRAP)
1191 bdp = priv->tx_bd_base;
1195 /* Move dirty_tx to be the next bd */
1196 priv->dirty_tx = bdp;
1198 /* We freed a buffer, so now we can restart transmission */
1199 if (netif_queue_stopped(dev))
1200 netif_wake_queue(dev);
1201 } /* while ((bdp->status & TXBD_READY) == 0) */
1203 /* If we are coalescing the interrupts, reset the timer */
1204 /* Otherwise, clear it */
1205 if (priv->txcoalescing)
1206 gfar_write(&priv->regs->txic,
1207 mk_ic_value(priv->txcount, priv->txtime));
1209 gfar_write(&priv->regs->txic, 0);
1211 spin_unlock(&priv->lock);
1216 struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1218 struct gfar_private *priv = netdev_priv(dev);
1219 struct sk_buff *skb = NULL;
1220 unsigned int timeout = SKB_ALLOC_TIMEOUT;
1222 /* We have to allocate the skb, so keep trying till we succeed */
1223 while ((!skb) && timeout--)
1224 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1229 /* We need the data buffer to be aligned properly. We will reserve
1230 * as many bytes as needed to align the data properly
1234 (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1)));
1238 bdp->bufPtr = dma_map_single(NULL, skb->data,
1239 priv->rx_buffer_size + RXBUF_ALIGNMENT,
1244 /* Mark the buffer empty */
1245 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1250 static inline void count_errors(unsigned short status, struct gfar_private *priv)
1252 struct net_device_stats *stats = &priv->stats;
1253 struct gfar_extra_stats *estats = &priv->extra_stats;
1255 /* If the packet was truncated, none of the other errors
1257 if (status & RXBD_TRUNCATED) {
1258 stats->rx_length_errors++;
1264 /* Count the errors, if there were any */
1265 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1266 stats->rx_length_errors++;
1268 if (status & RXBD_LARGE)
1273 if (status & RXBD_NONOCTET) {
1274 stats->rx_frame_errors++;
1275 estats->rx_nonoctet++;
1277 if (status & RXBD_CRCERR) {
1278 estats->rx_crcerr++;
1279 stats->rx_crc_errors++;
1281 if (status & RXBD_OVERRUN) {
1282 estats->rx_overrun++;
1283 stats->rx_crc_errors++;
1287 irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs)
1289 struct net_device *dev = (struct net_device *) dev_id;
1290 struct gfar_private *priv = netdev_priv(dev);
1292 #ifdef CONFIG_GFAR_NAPI
1296 /* Clear IEVENT, so rx interrupt isn't called again
1297 * because of this interrupt */
1298 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1301 #ifdef CONFIG_GFAR_NAPI
1302 if (netif_rx_schedule_prep(dev)) {
1303 tempval = gfar_read(&priv->regs->imask);
1304 tempval &= IMASK_RX_DISABLED;
1305 gfar_write(&priv->regs->imask, tempval);
1307 __netif_rx_schedule(dev);
1309 #ifdef VERBOSE_GFAR_ERRORS
1310 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1311 dev->name, gfar_read(priv->regs->ievent),
1312 gfar_read(priv->regs->imask));
1317 spin_lock(&priv->lock);
1318 gfar_clean_rx_ring(dev);
1320 /* If we are coalescing interrupts, update the timer */
1321 /* Otherwise, clear it */
1322 if (priv->rxcoalescing)
1323 gfar_write(&priv->regs->rxic,
1324 mk_ic_value(priv->rxcount, priv->rxtime));
1326 gfar_write(&priv->regs->rxic, 0);
1328 /* Just in case we need to wake the ring param changer */
1331 spin_unlock(&priv->lock);
1338 /* gfar_process_frame() -- handle one incoming packet if skb
1339 * isn't NULL. Try the fastroute before using the stack */
1340 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1343 struct gfar_private *priv = netdev_priv(dev);
1346 #ifdef BRIEF_GFAR_ERRORS
1347 printk(KERN_WARNING "%s: Missing skb!!.\n",
1350 priv->stats.rx_dropped++;
1351 priv->extra_stats.rx_skbmissing++;
1353 if(try_fastroute(skb, dev, length) == 0) {
1354 /* Prep the skb for the packet */
1355 skb_put(skb, length);
1357 /* Tell the skb what kind of packet this is */
1358 skb->protocol = eth_type_trans(skb, dev);
1360 /* Send the packet up the stack */
1361 if (RECEIVE(skb) == NET_RX_DROP) {
1362 priv->extra_stats.kernel_dropped++;
1370 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1371 * until all are gone (or, in the case of NAPI, the budget/quota
1372 * has been reached). Returns the number of frames handled
1374 #ifdef CONFIG_GFAR_NAPI
1375 static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1377 static int gfar_clean_rx_ring(struct net_device *dev)
1381 struct sk_buff *skb;
1384 struct gfar_private *priv = netdev_priv(dev);
1386 /* Get the first full descriptor */
1389 #ifdef CONFIG_GFAR_NAPI
1390 #define GFAR_RXDONE() ((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))
1392 #define GFAR_RXDONE() (bdp->status & RXBD_EMPTY)
1394 while (!GFAR_RXDONE()) {
1395 skb = priv->rx_skbuff[priv->skb_currx];
1398 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1399 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1400 /* Increment the number of packets */
1401 priv->stats.rx_packets++;
1404 /* Remove the FCS from the packet length */
1405 pkt_len = bdp->length - 4;
1407 gfar_process_frame(dev, skb, pkt_len);
1409 priv->stats.rx_bytes += pkt_len;
1412 count_errors(bdp->status, priv);
1415 dev_kfree_skb_any(skb);
1417 priv->rx_skbuff[priv->skb_currx] = NULL;
1420 dev->last_rx = jiffies;
1422 /* Clear the status flags for this buffer */
1423 bdp->status &= ~RXBD_STATS;
1425 /* Add another skb for the future */
1426 skb = gfar_new_skb(dev, bdp);
1427 priv->rx_skbuff[priv->skb_currx] = skb;
1429 /* Update to the next pointer */
1430 if (bdp->status & RXBD_WRAP)
1431 bdp = priv->rx_bd_base;
1435 /* update to point at the next skb */
1438 1) & RX_RING_MOD_MASK(priv->rx_ring_size);
1442 /* Update the current rxbd pointer to be the next one */
1445 /* If no packets have arrived since the
1446 * last one we processed, clear the IEVENT RX and
1447 * BSY bits so that another interrupt won't be
1448 * generated when we set IMASK */
1449 if (bdp->status & RXBD_EMPTY)
1450 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1455 #ifdef CONFIG_GFAR_NAPI
1456 static int gfar_poll(struct net_device *dev, int *budget)
1459 struct gfar_private *priv = netdev_priv(dev);
1460 int rx_work_limit = *budget;
1462 if (rx_work_limit > dev->quota)
1463 rx_work_limit = dev->quota;
1465 spin_lock(&priv->lock);
1466 howmany = gfar_clean_rx_ring(dev, rx_work_limit);
1468 dev->quota -= howmany;
1469 rx_work_limit -= howmany;
1472 if (rx_work_limit >= 0) {
1473 netif_rx_complete(dev);
1475 /* Clear the halt bit in RSTAT */
1476 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1478 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1480 /* If we are coalescing interrupts, update the timer */
1481 /* Otherwise, clear it */
1482 if (priv->rxcoalescing)
1483 gfar_write(&priv->regs->rxic,
1484 mk_ic_value(priv->rxcount, priv->rxtime));
1486 gfar_write(&priv->regs->rxic, 0);
1488 /* Signal to the ring size changer that it's safe to go */
1492 spin_unlock(priv->lock);
1494 return (rx_work_limit < 0) ? 1 : 0;
1498 /* The interrupt handler for devices with one interrupt */
1499 static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1501 struct net_device *dev = dev_id;
1502 struct gfar_private *priv = netdev_priv(dev);
1504 /* Save ievent for future reference */
1505 u32 events = gfar_read(&priv->regs->ievent);
1508 gfar_write(&priv->regs->ievent, events);
1510 /* Check for reception */
1511 if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0))
1512 gfar_receive(irq, dev_id, regs);
1514 /* Check for transmit completion */
1515 if ((events & IEVENT_TXF) || (events & IEVENT_TXB))
1516 gfar_transmit(irq, dev_id, regs);
1518 /* Update error statistics */
1519 if (events & IEVENT_TXE) {
1520 priv->stats.tx_errors++;
1522 if (events & IEVENT_LC)
1523 priv->stats.tx_window_errors++;
1524 if (events & IEVENT_CRL)
1525 priv->stats.tx_aborted_errors++;
1526 if (events & IEVENT_XFUN) {
1527 #ifdef VERBOSE_GFAR_ERRORS
1528 printk(KERN_WARNING "%s: tx underrun. dropped packet\n",
1531 priv->stats.tx_dropped++;
1532 priv->extra_stats.tx_underrun++;
1534 /* Reactivate the Tx Queues */
1535 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1538 if (events & IEVENT_BSY) {
1539 priv->stats.rx_errors++;
1540 priv->extra_stats.rx_bsy++;
1542 gfar_receive(irq, dev_id, regs);
1544 #ifndef CONFIG_GFAR_NAPI
1545 /* Clear the halt bit in RSTAT */
1546 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1549 #ifdef VERBOSE_GFAR_ERRORS
1550 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
1551 gfar_read(priv->regs->rstat));
1554 if (events & IEVENT_BABR) {
1555 priv->stats.rx_errors++;
1556 priv->extra_stats.rx_babr++;
1558 #ifdef VERBOSE_GFAR_ERRORS
1559 printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1562 if (events & IEVENT_EBERR) {
1563 priv->extra_stats.eberr++;
1564 #ifdef VERBOSE_GFAR_ERRORS
1565 printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1568 if (events & IEVENT_RXC) {
1569 #ifdef VERBOSE_GFAR_ERRORS
1570 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1574 if (events & IEVENT_BABT) {
1575 priv->extra_stats.tx_babt++;
1576 #ifdef VERBOSE_GFAR_ERRORS
1577 printk(KERN_DEBUG "%s: babt error\n", dev->name);
1584 static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1586 struct net_device *dev = (struct net_device *) dev_id;
1587 struct gfar_private *priv = netdev_priv(dev);
1589 /* Run the commands which acknowledge the interrupt */
1590 phy_run_commands(dev, priv->phyinfo->ack_int);
1592 /* Schedule the bottom half */
1593 schedule_work(&priv->tq);
1598 /* Scheduled by the phy_interrupt/timer to handle PHY changes */
1599 static void gfar_phy_change(void *data)
1601 struct net_device *dev = (struct net_device *) data;
1602 struct gfar_private *priv = netdev_priv(dev);
1603 int timeout = HZ / 1000 + 1;
1605 /* Delay to give the PHY a chance to change the
1607 set_current_state(TASK_UNINTERRUPTIBLE);
1608 schedule_timeout(timeout);
1610 /* Run the commands which check the link state */
1611 phy_run_commands(dev, priv->phyinfo->handle_int);
1613 /* React to the change in state */
1617 /* Called every so often on systems that don't interrupt
1618 * the core for PHY changes */
1619 static void gfar_phy_timer(unsigned long data)
1621 struct net_device *dev = (struct net_device *) data;
1622 struct gfar_private *priv = netdev_priv(dev);
1624 schedule_work(&priv->tq);
1626 mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ);
1629 /* Called every time the controller might need to be made
1630 * aware of new link state. The PHY code conveys this
1631 * information through variables in the priv structure, and this
1632 * function converts those variables into the appropriate
1633 * register values, and can bring down the device if needed.
1635 static void adjust_link(struct net_device *dev)
1637 struct gfar_private *priv = netdev_priv(dev);
1638 struct gfar *regs = priv->regs;
1642 /* Now we make sure that we can be in full duplex mode.
1643 * If not, we operate in half-duplex mode. */
1644 if (priv->duplexity != priv->olddplx) {
1645 if (!(priv->duplexity)) {
1646 tempval = gfar_read(®s->maccfg2);
1647 tempval &= ~(MACCFG2_FULL_DUPLEX);
1648 gfar_write(®s->maccfg2, tempval);
1650 printk(KERN_INFO "%s: Half Duplex\n",
1653 tempval = gfar_read(®s->maccfg2);
1654 tempval |= MACCFG2_FULL_DUPLEX;
1655 gfar_write(®s->maccfg2, tempval);
1657 printk(KERN_INFO "%s: Full Duplex\n",
1661 priv->olddplx = priv->duplexity;
1664 if (priv->speed != priv->oldspeed) {
1665 switch (priv->speed) {
1667 tempval = gfar_read(®s->maccfg2);
1669 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1670 gfar_write(®s->maccfg2, tempval);
1674 tempval = gfar_read(®s->maccfg2);
1676 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1677 gfar_write(®s->maccfg2, tempval);
1681 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1682 dev->name, priv->speed);
1686 printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
1689 priv->oldspeed = priv->speed;
1692 if (!priv->oldlink) {
1693 printk(KERN_INFO "%s: Link is up\n", dev->name);
1695 netif_carrier_on(dev);
1696 netif_schedule(dev);
1699 if (priv->oldlink) {
1700 printk(KERN_INFO "%s: Link is down\n", dev->name);
1704 netif_carrier_off(dev);
1708 #ifdef VERBOSE_GFAR_ERRORS
1709 printk(KERN_INFO "%s: Link now %s; %dBT %s-duplex\n",
1710 dev->name, priv->link ? "up" : "down", priv->speed, priv->duplexity ? "full" : "half");
1715 /* Update the hash table based on the current list of multicast
1716 * addresses we subscribe to. Also, change the promiscuity of
1717 * the device based on the flags (this function is called
1718 * whenever dev->flags is changed */
1719 static void gfar_set_multi(struct net_device *dev)
1721 struct dev_mc_list *mc_ptr;
1722 struct gfar_private *priv = netdev_priv(dev);
1723 struct gfar *regs = priv->regs;
1726 if(dev->flags & IFF_PROMISC) {
1727 printk(KERN_INFO "%s: Entering promiscuous mode.\n",
1729 /* Set RCTRL to PROM */
1730 tempval = gfar_read(®s->rctrl);
1731 tempval |= RCTRL_PROM;
1732 gfar_write(®s->rctrl, tempval);
1734 /* Set RCTRL to not PROM */
1735 tempval = gfar_read(®s->rctrl);
1736 tempval &= ~(RCTRL_PROM);
1737 gfar_write(®s->rctrl, tempval);
1740 if(dev->flags & IFF_ALLMULTI) {
1741 /* Set the hash to rx all multicast frames */
1742 gfar_write(®s->gaddr0, 0xffffffff);
1743 gfar_write(®s->gaddr1, 0xffffffff);
1744 gfar_write(®s->gaddr2, 0xffffffff);
1745 gfar_write(®s->gaddr3, 0xffffffff);
1746 gfar_write(®s->gaddr4, 0xffffffff);
1747 gfar_write(®s->gaddr5, 0xffffffff);
1748 gfar_write(®s->gaddr6, 0xffffffff);
1749 gfar_write(®s->gaddr7, 0xffffffff);
1751 /* zero out the hash */
1752 gfar_write(®s->gaddr0, 0x0);
1753 gfar_write(®s->gaddr1, 0x0);
1754 gfar_write(®s->gaddr2, 0x0);
1755 gfar_write(®s->gaddr3, 0x0);
1756 gfar_write(®s->gaddr4, 0x0);
1757 gfar_write(®s->gaddr5, 0x0);
1758 gfar_write(®s->gaddr6, 0x0);
1759 gfar_write(®s->gaddr7, 0x0);
1761 if(dev->mc_count == 0)
1764 /* Parse the list, and set the appropriate bits */
1765 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1766 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1773 /* Set the appropriate hash bit for the given addr */
1774 /* The algorithm works like so:
1775 * 1) Take the Destination Address (ie the multicast address), and
1776 * do a CRC on it (little endian), and reverse the bits of the
1778 * 2) Use the 8 most significant bits as a hash into a 256-entry
1779 * table. The table is controlled through 8 32-bit registers:
1780 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
1781 * gaddr7. This means that the 3 most significant bits in the
1782 * hash index which gaddr register to use, and the 5 other bits
1783 * indicate which bit (assuming an IBM numbering scheme, which
1784 * for PowerPC (tm) is usually the case) in the register holds
1786 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1789 struct gfar_private *priv = netdev_priv(dev);
1790 struct gfar *regs = priv->regs;
1791 u32 *hash = ®s->gaddr0;
1792 u32 result = ether_crc(MAC_ADDR_LEN, addr);
1793 u8 whichreg = ((result >> 29) & 0x7);
1794 u8 whichbit = ((result >> 24) & 0x1f);
1795 u32 value = (1 << (31-whichbit));
1797 tempval = gfar_read(&hash[whichreg]);
1799 gfar_write(&hash[whichreg], tempval);
1804 /* GFAR error interrupt handler */
1805 static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs)
1807 struct net_device *dev = dev_id;
1808 struct gfar_private *priv = netdev_priv(dev);
1810 /* Save ievent for future reference */
1811 u32 events = gfar_read(&priv->regs->ievent);
1814 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1817 #if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS)
1818 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1819 dev->name, events, gfar_read(priv->regs->imask));
1822 /* Update the error counters */
1823 if (events & IEVENT_TXE) {
1824 priv->stats.tx_errors++;
1826 if (events & IEVENT_LC)
1827 priv->stats.tx_window_errors++;
1828 if (events & IEVENT_CRL)
1829 priv->stats.tx_aborted_errors++;
1830 if (events & IEVENT_XFUN) {
1831 #ifdef VERBOSE_GFAR_ERRORS
1832 printk(KERN_DEBUG "%s: underrun. packet dropped.\n",
1835 priv->stats.tx_dropped++;
1836 priv->extra_stats.tx_underrun++;
1838 /* Reactivate the Tx Queues */
1839 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1841 #ifdef VERBOSE_GFAR_ERRORS
1842 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1845 if (events & IEVENT_BSY) {
1846 priv->stats.rx_errors++;
1847 priv->extra_stats.rx_bsy++;
1849 gfar_receive(irq, dev_id, regs);
1851 #ifndef CONFIG_GFAR_NAPI
1852 /* Clear the halt bit in RSTAT */
1853 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1856 #ifdef VERBOSE_GFAR_ERRORS
1857 printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name,
1858 gfar_read(priv->regs->rstat));
1861 if (events & IEVENT_BABR) {
1862 priv->stats.rx_errors++;
1863 priv->extra_stats.rx_babr++;
1865 #ifdef VERBOSE_GFAR_ERRORS
1866 printk(KERN_DEBUG "%s: babbling error\n", dev->name);
1869 if (events & IEVENT_EBERR) {
1870 priv->extra_stats.eberr++;
1871 #ifdef VERBOSE_GFAR_ERRORS
1872 printk(KERN_DEBUG "%s: EBERR\n", dev->name);
1875 if (events & IEVENT_RXC)
1876 #ifdef VERBOSE_GFAR_ERRORS
1877 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1880 if (events & IEVENT_BABT) {
1881 priv->extra_stats.tx_babt++;
1882 #ifdef VERBOSE_GFAR_ERRORS
1883 printk(KERN_DEBUG "%s: babt error\n", dev->name);
1889 /* Structure for a device driver */
1890 static struct ocp_device_id gfar_ids[] = {
1891 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_GFAR},
1892 {.vendor = OCP_VENDOR_INVALID}
1895 static struct ocp_driver gfar_driver = {
1897 .id_table = gfar_ids,
1899 .probe = gfar_probe,
1900 .remove = gfar_remove,
1903 static int __init gfar_init(void)
1907 rc = ocp_register_driver(&gfar_driver);
1908 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
1913 ocp_unregister_driver(&gfar_driver);
1920 static void __exit gfar_exit(void)
1922 ocp_unregister_driver(&gfar_driver);
1925 module_init(gfar_init);
1926 module_exit(gfar_exit);