2 /* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
3 * Copyright (C) 2004 Advanced Micro Devices
6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.[ pcnet32.c ]
12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
37 AMD8111 based 10/100 Ethernet Controller Driver.
47 1. Dynamic interrupt coalescing.
48 2. Removed prev_stats.
50 4. Dynamic IPG support
52 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
53 2. Bug fix: Fixed VLAN support failure.
54 3. Bug fix: Fixed receive interrupt coalescing bug.
55 4. Dynamic IPG support is disabled by default.
57 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
59 1. Added set_mac_address routine for bonding driver support.
60 2. Tested the driver for bonding support
61 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
63 4. Modified amd8111e_rx() routine to receive all the received packets
64 in the first interrupt.
65 5. Bug fix: Corrected rx_errors reported in get_stats() function.
72 #include <linux/config.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/types.h>
76 #include <linux/compiler.h>
77 #include <linux/slab.h>
78 #include <linux/delay.h>
79 #include <linux/init.h>
80 #include <linux/ioport.h>
81 #include <linux/pci.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
85 #include <linux/ethtool.h>
86 #include <linux/mii.h>
87 #include <linux/if_vlan.h>
88 #include <linux/ctype.h>
89 #include <linux/crc32.h>
91 #include <asm/system.h>
93 #include <asm/byteorder.h>
94 #include <asm/uaccess.h>
96 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
97 #define AMD8111E_VLAN_TAG_USED 1
99 #define AMD8111E_VLAN_TAG_USED 0
102 #include "amd8111e.h"
103 #define MODULE_NAME "amd8111e"
104 #define MODULE_VERS "3.0.5"
105 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
106 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.3");
107 MODULE_LICENSE("GPL");
108 MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i");
109 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
110 MODULE_PARM(coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
111 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
112 MODULE_PARM(dynamic_ipg, "1-" __MODULE_STRING(MAX_UNITS) "i");
113 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
115 static struct pci_device_id amd8111e_pci_tbl[] = {
117 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
123 This function will read the PHY registers.
125 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
127 void __iomem *mmio = lp->mmio;
128 unsigned int reg_val;
129 unsigned int repeat= REPEAT_CNT;
131 reg_val = readl(mmio + PHY_ACCESS);
132 while (reg_val & PHY_CMD_ACTIVE)
133 reg_val = readl( mmio + PHY_ACCESS );
135 writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
136 ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
138 reg_val = readl(mmio + PHY_ACCESS);
139 udelay(30); /* It takes 30 us to read/write data */
140 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
141 if(reg_val & PHY_RD_ERR)
144 *val = reg_val & 0xffff;
153 This function will write into PHY registers.
155 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
157 unsigned int repeat = REPEAT_CNT
158 void __iomem *mmio = lp->mmio;
159 unsigned int reg_val;
161 reg_val = readl(mmio + PHY_ACCESS);
162 while (reg_val & PHY_CMD_ACTIVE)
163 reg_val = readl( mmio + PHY_ACCESS );
165 writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
166 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
169 reg_val = readl(mmio + PHY_ACCESS);
170 udelay(30); /* It takes 30 us to read/write the data */
171 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
173 if(reg_val & PHY_RD_ERR)
183 This is the mii register read function provided to the mii interface.
185 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
187 struct amd8111e_priv* lp = netdev_priv(dev);
188 unsigned int reg_val;
190 amd8111e_read_phy(lp,phy_id,reg_num,®_val);
196 This is the mii register write function provided to the mii interface.
198 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
200 struct amd8111e_priv* lp = netdev_priv(dev);
202 amd8111e_write_phy(lp, phy_id, reg_num, val);
206 This function will set PHY speed. During initialization sets the original speed to 100 full.
208 static void amd8111e_set_ext_phy(struct net_device *dev)
210 struct amd8111e_priv *lp = netdev_priv(dev);
213 /* Determine mii register values to set the speed */
214 advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
215 tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
216 switch (lp->ext_phy_option){
219 case SPEED_AUTONEG: /* advertise all values */
220 tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
221 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
224 tmp |= ADVERTISE_10HALF;
227 tmp |= ADVERTISE_10FULL;
230 tmp |= ADVERTISE_100HALF;
233 tmp |= ADVERTISE_100FULL;
238 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
239 /* Restart auto negotiation */
240 bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
241 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
242 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
247 This function will unmap skb->data space and will free
248 all transmit and receive skbuffs.
250 static int amd8111e_free_skbs(struct net_device *dev)
252 struct amd8111e_priv *lp = netdev_priv(dev);
253 struct sk_buff* rx_skbuff;
256 /* Freeing transmit skbs */
257 for(i = 0; i < NUM_TX_BUFFERS; i++){
258 if(lp->tx_skbuff[i]){
259 pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
260 dev_kfree_skb (lp->tx_skbuff[i]);
261 lp->tx_skbuff[i] = NULL;
262 lp->tx_dma_addr[i] = 0;
265 /* Freeing previously allocated receive buffers */
266 for (i = 0; i < NUM_RX_BUFFERS; i++){
267 rx_skbuff = lp->rx_skbuff[i];
268 if(rx_skbuff != NULL){
269 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
270 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
271 dev_kfree_skb(lp->rx_skbuff[i]);
272 lp->rx_skbuff[i] = NULL;
273 lp->rx_dma_addr[i] = 0;
281 This will set the receive buffer length corresponding to the mtu size of networkinterface.
283 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
285 struct amd8111e_priv* lp = netdev_priv(dev);
286 unsigned int mtu = dev->mtu;
288 if (mtu > ETH_DATA_LEN){
289 /* MTU + ethernet header + FCS
290 + optional VLAN tag + skb reserve space 2 */
292 lp->rx_buff_len = mtu + ETH_HLEN + 10;
293 lp->options |= OPTION_JUMBO_ENABLE;
295 lp->rx_buff_len = PKT_BUFF_SZ;
296 lp->options &= ~OPTION_JUMBO_ENABLE;
301 This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
303 static int amd8111e_init_ring(struct net_device *dev)
305 struct amd8111e_priv *lp = netdev_priv(dev);
308 lp->rx_idx = lp->tx_idx = 0;
309 lp->tx_complete_idx = 0;
314 /* Free previously allocated transmit and receive skbs */
315 amd8111e_free_skbs(dev);
318 /* allocate the tx and rx descriptors */
319 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
320 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
321 &lp->tx_ring_dma_addr)) == NULL)
325 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
326 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
327 &lp->rx_ring_dma_addr)) == NULL)
329 goto err_free_tx_ring;
332 /* Set new receive buff size */
333 amd8111e_set_rx_buff_len(dev);
335 /* Allocating receive skbs */
336 for (i = 0; i < NUM_RX_BUFFERS; i++) {
338 if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
339 /* Release previos allocated skbs */
340 for(--i; i >= 0 ;i--)
341 dev_kfree_skb(lp->rx_skbuff[i]);
342 goto err_free_rx_ring;
344 skb_reserve(lp->rx_skbuff[i],2);
346 /* Initilaizing receive descriptors */
347 for (i = 0; i < NUM_RX_BUFFERS; i++) {
348 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
349 lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
351 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
352 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
354 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
357 /* Initializing transmit descriptors */
358 for (i = 0; i < NUM_TX_RING_DR; i++) {
359 lp->tx_ring[i].buff_phy_addr = 0;
360 lp->tx_ring[i].tx_flags = 0;
361 lp->tx_ring[i].buff_count = 0;
368 pci_free_consistent(lp->pci_dev,
369 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
370 lp->rx_ring_dma_addr);
374 pci_free_consistent(lp->pci_dev,
375 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
376 lp->tx_ring_dma_addr);
381 /* This function will set the interrupt coalescing according to the input arguments */
382 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
384 unsigned int timeout;
385 unsigned int event_count;
387 struct amd8111e_priv *lp = netdev_priv(dev);
388 void __iomem *mmio = lp->mmio;
389 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
395 timeout = coal_conf->rx_timeout;
396 event_count = coal_conf->rx_event_count;
397 if( timeout > MAX_TIMEOUT ||
398 event_count > MAX_EVENT_COUNT )
401 timeout = timeout * DELAY_TIMER_CONV;
402 writel(VAL0|STINTEN, mmio+INTEN0);
403 writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
408 timeout = coal_conf->tx_timeout;
409 event_count = coal_conf->tx_event_count;
410 if( timeout > MAX_TIMEOUT ||
411 event_count > MAX_EVENT_COUNT )
415 timeout = timeout * DELAY_TIMER_CONV;
416 writel(VAL0|STINTEN,mmio+INTEN0);
417 writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
422 writel(0,mmio+STVAL);
423 writel(STINTEN, mmio+INTEN0);
424 writel(0, mmio +DLY_INT_B);
425 writel(0, mmio+DLY_INT_A);
428 /* Start the timer */
429 writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
430 writel(VAL0|STINTEN, mmio+INTEN0);
441 This function initializes the device registers and starts the device.
443 static int amd8111e_restart(struct net_device *dev)
445 struct amd8111e_priv *lp = netdev_priv(dev);
446 void __iomem *mmio = lp->mmio;
450 writel(RUN, mmio + CMD0);
452 if(amd8111e_init_ring(dev))
455 /* enable the port manager and set auto negotiation always */
456 writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
457 writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
459 amd8111e_set_ext_phy(dev);
461 /* set control registers */
462 reg_val = readl(mmio + CTRL1);
463 reg_val &= ~XMTSP_MASK;
464 writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
466 /* enable interrupt */
467 writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
468 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
469 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
471 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
473 /* initialize tx and rx ring base addresses */
474 writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
475 writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
477 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
478 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
480 /* set default IPG to 96 */
481 writew((u32)DEFAULT_IPG,mmio+IPG);
482 writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
484 if(lp->options & OPTION_JUMBO_ENABLE){
485 writel((u32)VAL2|JUMBO, mmio + CMD3);
487 writel( REX_UFLO, mmio + CMD2);
488 /* Should not set REX_UFLO for jumbo frames */
489 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
491 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
492 writel((u32)JUMBO, mmio + CMD3);
495 #if AMD8111E_VLAN_TAG_USED
496 writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
498 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
500 /* Setting the MAC address to the device */
501 for(i = 0; i < ETH_ADDR_LEN; i++)
502 writeb( dev->dev_addr[i], mmio + PADR + i );
504 /* Enable interrupt coalesce */
505 if(lp->options & OPTION_INTR_COAL_ENABLE){
506 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
508 amd8111e_set_coalesce(dev,ENABLE_COAL);
511 /* set RUN bit to start the chip */
512 writel(VAL2 | RDMD0, mmio + CMD0);
513 writel(VAL0 | INTREN | RUN, mmio + CMD0);
515 /* To avoid PCI posting bug */
520 This function clears necessary the device registers.
522 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
524 unsigned int reg_val;
525 unsigned int logic_filter[2] ={0,};
526 void __iomem *mmio = lp->mmio;
530 writel(RUN, mmio + CMD0);
532 /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
533 writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
535 /* Clear RCV_RING_BASE_ADDR */
536 writel(0, mmio + RCV_RING_BASE_ADDR0);
538 /* Clear XMT_RING_BASE_ADDR */
539 writel(0, mmio + XMT_RING_BASE_ADDR0);
540 writel(0, mmio + XMT_RING_BASE_ADDR1);
541 writel(0, mmio + XMT_RING_BASE_ADDR2);
542 writel(0, mmio + XMT_RING_BASE_ADDR3);
545 writel(CMD0_CLEAR,mmio + CMD0);
548 writel(CMD2_CLEAR, mmio +CMD2);
551 writel(CMD7_CLEAR , mmio + CMD7);
553 /* Clear DLY_INT_A and DLY_INT_B */
554 writel(0x0, mmio + DLY_INT_A);
555 writel(0x0, mmio + DLY_INT_B);
557 /* Clear FLOW_CONTROL */
558 writel(0x0, mmio + FLOW_CONTROL);
560 /* Clear INT0 write 1 to clear register */
561 reg_val = readl(mmio + INT0);
562 writel(reg_val, mmio + INT0);
565 writel(0x0, mmio + STVAL);
568 writel( INTEN0_CLEAR, mmio + INTEN0);
571 writel(0x0 , mmio + LADRF);
573 /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
574 writel( 0x80010,mmio + SRAM_SIZE);
576 /* Clear RCV_RING0_LEN */
577 writel(0x0, mmio + RCV_RING_LEN0);
579 /* Clear XMT_RING0/1/2/3_LEN */
580 writel(0x0, mmio + XMT_RING_LEN0);
581 writel(0x0, mmio + XMT_RING_LEN1);
582 writel(0x0, mmio + XMT_RING_LEN2);
583 writel(0x0, mmio + XMT_RING_LEN3);
585 /* Clear XMT_RING_LIMIT */
586 writel(0x0, mmio + XMT_RING_LIMIT);
589 writew(MIB_CLEAR, mmio + MIB_ADDR);
592 amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
594 /* SRAM_SIZE register */
595 reg_val = readl(mmio + SRAM_SIZE);
597 if(lp->options & OPTION_JUMBO_ENABLE)
598 writel( VAL2|JUMBO, mmio + CMD3);
599 #if AMD8111E_VLAN_TAG_USED
600 writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
602 /* Set default value to CTRL1 Register */
603 writel(CTRL1_DEFAULT, mmio + CTRL1);
605 /* To avoid PCI posting bug */
611 This function disables the interrupt and clears all the pending
614 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
618 /* Disable interrupt */
619 writel(INTREN, lp->mmio + CMD0);
622 intr0 = readl(lp->mmio + INT0);
623 writel(intr0, lp->mmio + INT0);
625 /* To avoid PCI posting bug */
626 readl(lp->mmio + INT0);
631 This function stops the chip.
633 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
635 writel(RUN, lp->mmio + CMD0);
637 /* To avoid PCI posting bug */
638 readl(lp->mmio + CMD0);
642 This function frees the transmiter and receiver descriptor rings.
644 static void amd8111e_free_ring(struct amd8111e_priv* lp)
647 /* Free transmit and receive skbs */
648 amd8111e_free_skbs(lp->amd8111e_net_dev);
650 /* Free transmit and receive descriptor rings */
652 pci_free_consistent(lp->pci_dev,
653 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
654 lp->rx_ring, lp->rx_ring_dma_addr);
659 pci_free_consistent(lp->pci_dev,
660 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
661 lp->tx_ring, lp->tx_ring_dma_addr);
667 #if AMD8111E_VLAN_TAG_USED
669 This is the receive indication function for packets with vlan tag.
671 static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
673 #ifdef CONFIG_AMD8111E_NAPI
674 return vlan_hwaccel_receive_skb(skb, lp->vlgrp,vlan_tag);
676 return vlan_hwaccel_rx(skb, lp->vlgrp, vlan_tag);
677 #endif /* CONFIG_AMD8111E_NAPI */
682 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
684 static int amd8111e_tx(struct net_device *dev)
686 struct amd8111e_priv* lp = netdev_priv(dev);
687 int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
689 /* Complete all the transmit packet */
690 while (lp->tx_complete_idx != lp->tx_idx){
691 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
692 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
695 break; /* It still hasn't been Txed */
697 lp->tx_ring[tx_index].buff_phy_addr = 0;
699 /* We must free the original skb */
700 if (lp->tx_skbuff[tx_index]) {
701 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
702 lp->tx_skbuff[tx_index]->len,
704 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
705 lp->tx_skbuff[tx_index] = NULL;
706 lp->tx_dma_addr[tx_index] = 0;
708 lp->tx_complete_idx++;
709 /*COAL update tx coalescing parameters */
710 lp->coal_conf.tx_packets++;
711 lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count;
713 if (netif_queue_stopped(dev) &&
714 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
715 /* The ring is no longer full, clear tbusy. */
716 /* lp->tx_full = 0; */
717 netif_wake_queue (dev);
723 #ifdef CONFIG_AMD8111E_NAPI
724 /* This function handles the driver receive operation in polling mode */
725 static int amd8111e_rx_poll(struct net_device *dev, int * budget)
727 struct amd8111e_priv *lp = netdev_priv(dev);
728 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
729 void __iomem *mmio = lp->mmio;
730 struct sk_buff *skb,*new_skb;
731 int min_pkt_len, status;
734 /*int max_rx_pkt = NUM_RX_BUFFERS;*/
736 #if AMD8111E_VLAN_TAG_USED
739 int rx_pkt_limit = dev->quota;
742 /* process receive packets until we use the quota*/
743 /* If we own the next entry, it's a new packet. Send it up. */
745 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
746 if (status & OWN_BIT)
750 * There is a tricky error noted by John Murphy,
751 * <murf@perftech.com> to Russ Nelson: Even with
752 * full-sized * buffers it's possible for a
753 * jabber packet to use two buffers, with only
754 * the last correctly noting the error.
757 if(status & ERR_BIT) {
759 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
762 /* check for STP and ENP */
763 if(!((status & STP_BIT) && (status & ENP_BIT))){
765 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
768 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
770 #if AMD8111E_VLAN_TAG_USED
771 vtag = status & TT_MASK;
772 /*MAC will strip vlan tag*/
773 if(lp->vlgrp != NULL && vtag !=0)
774 min_pkt_len =MIN_PKT_LEN - 4;
777 min_pkt_len =MIN_PKT_LEN;
779 if (pkt_len < min_pkt_len) {
780 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
784 if(--rx_pkt_limit < 0)
786 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
787 /* if allocation fail,
788 ignore that pkt and go to next one */
789 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
794 skb_reserve(new_skb, 2);
795 skb = lp->rx_skbuff[rx_index];
796 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
797 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
798 skb_put(skb, pkt_len);
800 lp->rx_skbuff[rx_index] = new_skb;
802 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
807 skb->protocol = eth_type_trans(skb, dev);
809 #if AMD8111E_VLAN_TAG_USED
810 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
811 amd8111e_vlan_rx(lp, skb,
812 le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info));
815 netif_receive_skb(skb);
816 /*COAL update rx coalescing parameters*/
817 lp->coal_conf.rx_packets++;
818 lp->coal_conf.rx_bytes += pkt_len;
820 dev->last_rx = jiffies;
823 lp->rx_ring[rx_index].buff_phy_addr
824 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
825 lp->rx_ring[rx_index].buff_count =
826 cpu_to_le16(lp->rx_buff_len-2);
828 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
829 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
831 /* Check the interrupt status register for more packets in the
832 mean time. Process them since we have not used up our quota.*/
834 intr0 = readl(mmio + INT0);
835 /*Ack receive packets */
836 writel(intr0 & RINT0,mmio + INT0);
838 } while(intr0 & RINT0);
840 /* Receive descriptor is empty now */
841 dev->quota -= num_rx_pkt;
842 *budget -= num_rx_pkt;
843 netif_rx_complete(dev);
844 /* enable receive interrupt */
845 writel(VAL0|RINTEN0, mmio + INTEN0);
846 writel(VAL2 | RDMD0, mmio + CMD0);
849 /* Do not call a netif_rx_complete */
850 dev->quota -= num_rx_pkt;
851 *budget -= num_rx_pkt;
859 This function will check the ownership of receive buffers and descriptors. It will indicate to kernel up to half the number of maximum receive buffers in the descriptor ring, in a single receive interrupt. It will also replenish the descriptors with new skbs.
861 static int amd8111e_rx(struct net_device *dev)
863 struct amd8111e_priv *lp = netdev_priv(dev);
864 struct sk_buff *skb,*new_skb;
865 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
866 int min_pkt_len, status;
868 int max_rx_pkt = NUM_RX_BUFFERS;
870 #if AMD8111E_VLAN_TAG_USED
874 /* If we own the next entry, it's a new packet. Send it up. */
875 while(++num_rx_pkt <= max_rx_pkt){
876 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
880 /* check if err summary bit is set */
881 if(status & ERR_BIT){
883 * There is a tricky error noted by John Murphy,
884 * <murf@perftech.com> to Russ Nelson: Even with full-sized
885 * buffers it's possible for a jabber packet to use two
886 * buffers, with only the last correctly noting the error. */
888 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
891 /* check for STP and ENP */
892 if(!((status & STP_BIT) && (status & ENP_BIT))){
894 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
897 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
899 #if AMD8111E_VLAN_TAG_USED
900 vtag = status & TT_MASK;
901 /*MAC will strip vlan tag*/
902 if(lp->vlgrp != NULL && vtag !=0)
903 min_pkt_len =MIN_PKT_LEN - 4;
906 min_pkt_len =MIN_PKT_LEN;
908 if (pkt_len < min_pkt_len) {
909 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
913 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
914 /* if allocation fail,
915 ignore that pkt and go to next one */
916 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
921 skb_reserve(new_skb, 2);
922 skb = lp->rx_skbuff[rx_index];
923 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
924 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
925 skb_put(skb, pkt_len);
927 lp->rx_skbuff[rx_index] = new_skb;
929 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
930 new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
932 skb->protocol = eth_type_trans(skb, dev);
934 #if AMD8111E_VLAN_TAG_USED
935 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
936 amd8111e_vlan_rx(lp, skb,
937 le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info));
942 /*COAL update rx coalescing parameters*/
943 lp->coal_conf.rx_packets++;
944 lp->coal_conf.rx_bytes += pkt_len;
946 dev->last_rx = jiffies;
949 lp->rx_ring[rx_index].buff_phy_addr
950 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
951 lp->rx_ring[rx_index].buff_count =
952 cpu_to_le16(lp->rx_buff_len-2);
954 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
955 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
960 #endif /* CONFIG_AMD8111E_NAPI */
962 This function will indicate the link status to the kernel.
964 static int amd8111e_link_change(struct net_device* dev)
966 struct amd8111e_priv *lp = netdev_priv(dev);
969 /* read the link change */
970 status0 = readl(lp->mmio + STAT0);
972 if(status0 & LINK_STATS){
973 if(status0 & AUTONEG_COMPLETE)
974 lp->link_config.autoneg = AUTONEG_ENABLE;
976 lp->link_config.autoneg = AUTONEG_DISABLE;
978 if(status0 & FULL_DPLX)
979 lp->link_config.duplex = DUPLEX_FULL;
981 lp->link_config.duplex = DUPLEX_HALF;
982 speed = (status0 & SPEED_MASK) >> 7;
983 if(speed == PHY_SPEED_10)
984 lp->link_config.speed = SPEED_10;
985 else if(speed == PHY_SPEED_100)
986 lp->link_config.speed = SPEED_100;
988 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
989 (lp->link_config.speed == SPEED_100) ? "100": "10",
990 (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
991 netif_carrier_on(dev);
994 lp->link_config.speed = SPEED_INVALID;
995 lp->link_config.duplex = DUPLEX_INVALID;
996 lp->link_config.autoneg = AUTONEG_INVALID;
997 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
998 netif_carrier_off(dev);
1004 This function reads the mib counters.
1006 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
1008 unsigned int status;
1010 unsigned int repeat = REPEAT_CNT;
1012 writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
1014 status = readw(mmio + MIB_ADDR);
1015 udelay(2); /* controller takes MAX 2 us to get mib data */
1017 while (--repeat && (status & MIB_CMD_ACTIVE));
1019 data = readl(mmio + MIB_DATA);
1024 This function reads the mib registers and returns the hardware statistics. It updates previous internal driver statistics with new values.
1026 static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
1028 struct amd8111e_priv *lp = netdev_priv(dev);
1029 void __iomem *mmio = lp->mmio;
1030 unsigned long flags;
1031 /* struct net_device_stats *prev_stats = &lp->prev_stats; */
1032 struct net_device_stats* new_stats = &lp->stats;
1036 spin_lock_irqsave (&lp->lock, flags);
1038 /* stats.rx_packets */
1039 new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
1040 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
1041 amd8111e_read_mib(mmio, rcv_unicast_pkts);
1043 /* stats.tx_packets */
1044 new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
1047 new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
1049 /* stats.tx_bytes */
1050 new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
1052 /* stats.rx_errors */
1053 /* hw errors + errors driver reported */
1054 new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
1055 amd8111e_read_mib(mmio, rcv_fragments)+
1056 amd8111e_read_mib(mmio, rcv_jabbers)+
1057 amd8111e_read_mib(mmio, rcv_alignment_errors)+
1058 amd8111e_read_mib(mmio, rcv_fcs_errors)+
1059 amd8111e_read_mib(mmio, rcv_miss_pkts)+
1062 /* stats.tx_errors */
1063 new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
1065 /* stats.rx_dropped*/
1066 new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
1068 /* stats.tx_dropped*/
1069 new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
1071 /* stats.multicast*/
1072 new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
1074 /* stats.collisions*/
1075 new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
1077 /* stats.rx_length_errors*/
1078 new_stats->rx_length_errors =
1079 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
1080 amd8111e_read_mib(mmio, rcv_oversize_pkts);
1082 /* stats.rx_over_errors*/
1083 new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1085 /* stats.rx_crc_errors*/
1086 new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
1088 /* stats.rx_frame_errors*/
1089 new_stats->rx_frame_errors =
1090 amd8111e_read_mib(mmio, rcv_alignment_errors);
1092 /* stats.rx_fifo_errors */
1093 new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1095 /* stats.rx_missed_errors */
1096 new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1098 /* stats.tx_aborted_errors*/
1099 new_stats->tx_aborted_errors =
1100 amd8111e_read_mib(mmio, xmt_excessive_collision);
1102 /* stats.tx_carrier_errors*/
1103 new_stats->tx_carrier_errors =
1104 amd8111e_read_mib(mmio, xmt_loss_carrier);
1106 /* stats.tx_fifo_errors*/
1107 new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
1109 /* stats.tx_window_errors*/
1110 new_stats->tx_window_errors =
1111 amd8111e_read_mib(mmio, xmt_late_collision);
1113 /* Reset the mibs for collecting new statistics */
1114 /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
1116 spin_unlock_irqrestore (&lp->lock, flags);
1120 /* This function recalculate the interupt coalescing mode on every interrupt
1121 according to the datarate and the packet rate.
1123 static int amd8111e_calc_coalesce(struct net_device *dev)
1125 struct amd8111e_priv *lp = netdev_priv(dev);
1126 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1134 tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1135 coal_conf->tx_prev_packets = coal_conf->tx_packets;
1137 tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1138 coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
1140 rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1141 coal_conf->rx_prev_packets = coal_conf->rx_packets;
1143 rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1144 coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
1146 if(rx_pkt_rate < 800){
1147 if(coal_conf->rx_coal_type != NO_COALESCE){
1149 coal_conf->rx_timeout = 0x0;
1150 coal_conf->rx_event_count = 0;
1151 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1152 coal_conf->rx_coal_type = NO_COALESCE;
1157 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1158 if (rx_pkt_size < 128){
1159 if(coal_conf->rx_coal_type != NO_COALESCE){
1161 coal_conf->rx_timeout = 0;
1162 coal_conf->rx_event_count = 0;
1163 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1164 coal_conf->rx_coal_type = NO_COALESCE;
1168 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1170 if(coal_conf->rx_coal_type != LOW_COALESCE){
1171 coal_conf->rx_timeout = 1;
1172 coal_conf->rx_event_count = 4;
1173 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1174 coal_conf->rx_coal_type = LOW_COALESCE;
1177 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1179 if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
1180 coal_conf->rx_timeout = 1;
1181 coal_conf->rx_event_count = 4;
1182 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1183 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1187 else if(rx_pkt_size >= 1024){
1188 if(coal_conf->rx_coal_type != HIGH_COALESCE){
1189 coal_conf->rx_timeout = 2;
1190 coal_conf->rx_event_count = 3;
1191 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1192 coal_conf->rx_coal_type = HIGH_COALESCE;
1196 /* NOW FOR TX INTR COALESC */
1197 if(tx_pkt_rate < 800){
1198 if(coal_conf->tx_coal_type != NO_COALESCE){
1200 coal_conf->tx_timeout = 0x0;
1201 coal_conf->tx_event_count = 0;
1202 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1203 coal_conf->tx_coal_type = NO_COALESCE;
1208 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1209 if (tx_pkt_size < 128){
1211 if(coal_conf->tx_coal_type != NO_COALESCE){
1213 coal_conf->tx_timeout = 0;
1214 coal_conf->tx_event_count = 0;
1215 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1216 coal_conf->tx_coal_type = NO_COALESCE;
1220 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1222 if(coal_conf->tx_coal_type != LOW_COALESCE){
1223 coal_conf->tx_timeout = 1;
1224 coal_conf->tx_event_count = 2;
1225 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1226 coal_conf->tx_coal_type = LOW_COALESCE;
1230 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1232 if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
1233 coal_conf->tx_timeout = 2;
1234 coal_conf->tx_event_count = 5;
1235 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1236 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1240 else if(tx_pkt_size >= 1024){
1241 if (tx_pkt_size >= 1024){
1242 if(coal_conf->tx_coal_type != HIGH_COALESCE){
1243 coal_conf->tx_timeout = 4;
1244 coal_conf->tx_event_count = 8;
1245 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1246 coal_conf->tx_coal_type = HIGH_COALESCE;
1255 This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1257 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1260 struct net_device * dev = (struct net_device *) dev_id;
1261 struct amd8111e_priv *lp = netdev_priv(dev);
1262 void __iomem *mmio = lp->mmio;
1264 unsigned int handled = 1;
1269 if (regs) spin_lock (&lp->lock);
1270 /* disabling interrupt */
1271 writel(INTREN, mmio + CMD0);
1273 /* Read interrupt status */
1274 intr0 = readl(mmio + INT0);
1276 /* Process all the INT event until INTR bit is clear. */
1278 if (!(intr0 & INTR)){
1280 goto err_no_interrupt;
1283 /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1284 writel(intr0, mmio + INT0);
1286 /* Check if Receive Interrupt has occurred. */
1287 #if CONFIG_AMD8111E_NAPI
1289 if(netif_rx_schedule_prep(dev)){
1290 /* Disable receive interupts */
1291 writel(RINTEN0, mmio + INTEN0);
1292 /* Schedule a polling routine */
1293 __netif_rx_schedule(dev);
1296 printk("************Driver bug! \
1297 interrupt while in poll\n");
1298 /* Fix by disabling interrupts */
1299 writel(RINT0, mmio + INT0);
1305 writel(VAL2 | RDMD0, mmio + CMD0);
1307 #endif /* CONFIG_AMD8111E_NAPI */
1308 /* Check if Transmit Interrupt has occurred. */
1312 /* Check if Link Change Interrupt has occurred. */
1314 amd8111e_link_change(dev);
1316 /* Check if Hardware Timer Interrupt has occurred. */
1318 amd8111e_calc_coalesce(dev);
1321 writel( VAL0 | INTREN,mmio + CMD0);
1323 if (regs) spin_unlock(&lp->lock);
1325 return IRQ_RETVAL(handled);
1328 #ifdef CONFIG_NET_POLL_CONTROLLER
1329 static void amd8111e_poll(struct net_device *dev)
1331 unsigned long flags;
1332 local_save_flags(flags);
1333 local_irq_disable();
1334 amd8111e_interrupt(0, dev, NULL);
1335 local_irq_restore(flags);
1341 This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1343 static int amd8111e_close(struct net_device * dev)
1345 struct amd8111e_priv *lp = netdev_priv(dev);
1346 netif_stop_queue(dev);
1348 spin_lock_irq(&lp->lock);
1350 amd8111e_disable_interrupt(lp);
1351 amd8111e_stop_chip(lp);
1352 amd8111e_free_ring(lp);
1354 netif_carrier_off(lp->amd8111e_net_dev);
1356 /* Delete ipg timer */
1357 if(lp->options & OPTION_DYN_IPG_ENABLE)
1358 del_timer_sync(&lp->ipg_data.ipg_timer);
1360 spin_unlock_irq(&lp->lock);
1361 free_irq(dev->irq, dev);
1363 /* Update the statistics before closing */
1364 amd8111e_get_stats(dev);
1368 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1370 static int amd8111e_open(struct net_device * dev )
1372 struct amd8111e_priv *lp = netdev_priv(dev);
1374 if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
1378 spin_lock_irq(&lp->lock);
1380 amd8111e_init_hw_default(lp);
1382 if(amd8111e_restart(dev)){
1383 spin_unlock_irq(&lp->lock);
1386 /* Start ipg timer */
1387 if(lp->options & OPTION_DYN_IPG_ENABLE){
1388 add_timer(&lp->ipg_data.ipg_timer);
1389 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1394 spin_unlock_irq(&lp->lock);
1396 netif_start_queue(dev);
1401 This function checks if there is any transmit descriptors available to queue more packet.
1403 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1405 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1406 if(lp->tx_skbuff[tx_index] != 0)
1413 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1416 static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
1418 struct amd8111e_priv *lp = netdev_priv(dev);
1420 unsigned long flags;
1422 spin_lock_irqsave(&lp->lock, flags);
1424 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1426 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1428 lp->tx_skbuff[tx_index] = skb;
1429 lp->tx_ring[tx_index].tx_flags = 0;
1431 #if AMD8111E_VLAN_TAG_USED
1432 if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
1433 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1434 cpu_to_le16(TCC_VLAN_INSERT);
1435 lp->tx_ring[tx_index].tag_ctrl_info =
1436 cpu_to_le16(vlan_tx_tag_get(skb));
1440 lp->tx_dma_addr[tx_index] =
1441 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1442 lp->tx_ring[tx_index].buff_phy_addr =
1443 (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
1445 /* Set FCS and LTINT bits */
1447 lp->tx_ring[tx_index].tx_flags |=
1448 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1452 /* Trigger an immediate send poll. */
1453 writel( VAL1 | TDMD0, lp->mmio + CMD0);
1454 writel( VAL2 | RDMD0,lp->mmio + CMD0);
1456 dev->trans_start = jiffies;
1458 if(amd8111e_tx_queue_avail(lp) < 0){
1459 netif_stop_queue(dev);
1461 spin_unlock_irqrestore(&lp->lock, flags);
1465 This function returns all the memory mapped registers of the device.
1467 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1469 void __iomem *mmio = lp->mmio;
1470 /* Read only necessary registers */
1471 buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1472 buf[1] = readl(mmio + XMT_RING_LEN0);
1473 buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1474 buf[3] = readl(mmio + RCV_RING_LEN0);
1475 buf[4] = readl(mmio + CMD0);
1476 buf[5] = readl(mmio + CMD2);
1477 buf[6] = readl(mmio + CMD3);
1478 buf[7] = readl(mmio + CMD7);
1479 buf[8] = readl(mmio + INT0);
1480 buf[9] = readl(mmio + INTEN0);
1481 buf[10] = readl(mmio + LADRF);
1482 buf[11] = readl(mmio + LADRF+4);
1483 buf[12] = readl(mmio + STAT0);
1487 amd8111e crc generator implementation is different from the kernel
1488 ether_crc() function.
1490 int amd8111e_ether_crc(int len, char* mac_addr)
1493 unsigned char octet;
1496 for(byte=0; byte < len; byte++){
1497 octet = mac_addr[byte];
1498 for( i=0;i < 8; i++){
1499 /*If the next bit form the input stream is 1,subtract the divisor (CRC32) from the dividend(crc).*/
1500 if( (octet & 0x1) ^ (crc & 0x1) ){
1513 This function sets promiscuos mode, all-multi mode or the multicast address
1516 static void amd8111e_set_multicast_list(struct net_device *dev)
1518 struct dev_mc_list* mc_ptr;
1519 struct amd8111e_priv *lp = netdev_priv(dev);
1522 if(dev->flags & IFF_PROMISC){
1523 printk(KERN_INFO "%s: Setting promiscuous mode.\n",dev->name);
1524 writel( VAL2 | PROM, lp->mmio + CMD2);
1528 writel( PROM, lp->mmio + CMD2);
1529 if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
1530 /* get all multicast packet */
1531 mc_filter[1] = mc_filter[0] = 0xffffffff;
1532 lp->mc_list = dev->mc_list;
1533 lp->options |= OPTION_MULTICAST_ENABLE;
1534 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1537 if( dev->mc_count == 0 ){
1538 /* get only own packets */
1539 mc_filter[1] = mc_filter[0] = 0;
1541 lp->options &= ~OPTION_MULTICAST_ENABLE;
1542 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1543 /* disable promiscous mode */
1544 writel(PROM, lp->mmio + CMD2);
1547 /* load all the multicast addresses in the logic filter */
1548 lp->options |= OPTION_MULTICAST_ENABLE;
1549 lp->mc_list = dev->mc_list;
1550 mc_filter[1] = mc_filter[0] = 0;
1551 for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
1552 i++, mc_ptr = mc_ptr->next) {
1553 bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr) >> 26 ) & 0x3f;
1554 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1556 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1558 /* To eliminate PCI posting bug */
1559 readl(lp->mmio + CMD2);
1563 static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1565 struct amd8111e_priv *lp = netdev_priv(dev);
1566 struct pci_dev *pci_dev = lp->pci_dev;
1567 strcpy (info->driver, MODULE_NAME);
1568 strcpy (info->version, MODULE_VERS);
1569 sprintf(info->fw_version,"%u",chip_version);
1570 strcpy (info->bus_info, pci_name(pci_dev));
1573 static int amd8111e_get_regs_len(struct net_device *dev)
1575 return AMD8111E_REG_DUMP_LEN;
1578 static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1580 struct amd8111e_priv *lp = netdev_priv(dev);
1582 amd8111e_read_regs(lp, buf);
1585 static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1587 struct amd8111e_priv *lp = netdev_priv(dev);
1588 spin_lock_irq(&lp->lock);
1589 mii_ethtool_gset(&lp->mii_if, ecmd);
1590 spin_unlock_irq(&lp->lock);
1594 static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1596 struct amd8111e_priv *lp = netdev_priv(dev);
1598 spin_lock_irq(&lp->lock);
1599 res = mii_ethtool_sset(&lp->mii_if, ecmd);
1600 spin_unlock_irq(&lp->lock);
1604 static int amd8111e_nway_reset(struct net_device *dev)
1606 struct amd8111e_priv *lp = netdev_priv(dev);
1607 return mii_nway_restart(&lp->mii_if);
1610 static u32 amd8111e_get_link(struct net_device *dev)
1612 struct amd8111e_priv *lp = netdev_priv(dev);
1613 return mii_link_ok(&lp->mii_if);
1616 static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1618 struct amd8111e_priv *lp = netdev_priv(dev);
1619 wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1620 if (lp->options & OPTION_WOL_ENABLE)
1621 wol_info->wolopts = WAKE_MAGIC;
1624 static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1626 struct amd8111e_priv *lp = netdev_priv(dev);
1627 if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1629 spin_lock_irq(&lp->lock);
1630 if (wol_info->wolopts & WAKE_MAGIC)
1632 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1633 else if(wol_info->wolopts & WAKE_PHY)
1635 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1637 lp->options &= ~OPTION_WOL_ENABLE;
1638 spin_unlock_irq(&lp->lock);
1642 static struct ethtool_ops ops = {
1643 .get_drvinfo = amd8111e_get_drvinfo,
1644 .get_regs_len = amd8111e_get_regs_len,
1645 .get_regs = amd8111e_get_regs,
1646 .get_settings = amd8111e_get_settings,
1647 .set_settings = amd8111e_set_settings,
1648 .nway_reset = amd8111e_nway_reset,
1649 .get_link = amd8111e_get_link,
1650 .get_wol = amd8111e_get_wol,
1651 .set_wol = amd8111e_set_wol,
1655 This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1658 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1660 struct mii_ioctl_data *data = if_mii(ifr);
1661 struct amd8111e_priv *lp = netdev_priv(dev);
1665 if (!capable(CAP_NET_ADMIN))
1670 data->phy_id = lp->ext_phy_addr;
1675 spin_lock_irq(&lp->lock);
1676 err = amd8111e_read_phy(lp, data->phy_id,
1677 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1678 spin_unlock_irq(&lp->lock);
1680 data->val_out = mii_regval;
1685 spin_lock_irq(&lp->lock);
1686 err = amd8111e_write_phy(lp, data->phy_id,
1687 data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1688 spin_unlock_irq(&lp->lock);
1698 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1700 struct amd8111e_priv *lp = netdev_priv(dev);
1702 struct sockaddr *addr = p;
1704 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1705 spin_lock_irq(&lp->lock);
1706 /* Setting the MAC address to the device */
1707 for(i = 0; i < ETH_ADDR_LEN; i++)
1708 writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1710 spin_unlock_irq(&lp->lock);
1716 This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
1718 int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1720 struct amd8111e_priv *lp = netdev_priv(dev);
1723 if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1726 if (!netif_running(dev)) {
1727 /* new_mtu will be used
1728 when device starts netxt time */
1733 spin_lock_irq(&lp->lock);
1736 writel(RUN, lp->mmio + CMD0);
1740 err = amd8111e_restart(dev);
1741 spin_unlock_irq(&lp->lock);
1743 netif_start_queue(dev);
1747 #if AMD8111E_VLAN_TAG_USED
1748 static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1750 struct amd8111e_priv *lp = netdev_priv(dev);
1751 spin_lock_irq(&lp->lock);
1753 spin_unlock_irq(&lp->lock);
1756 static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1758 struct amd8111e_priv *lp = netdev_priv(dev);
1759 spin_lock_irq(&lp->lock);
1761 lp->vlgrp->vlan_devices[vid] = NULL;
1762 spin_unlock_irq(&lp->lock);
1765 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1767 writel( VAL1|MPPLBA, lp->mmio + CMD3);
1768 writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1770 /* To eliminate PCI posting bug */
1771 readl(lp->mmio + CMD7);
1775 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1778 /* Adapter is already stoped/suspended/interrupt-disabled */
1779 writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1781 /* To eliminate PCI posting bug */
1782 readl(lp->mmio + CMD7);
1785 /* This function is called when a packet transmission fails to complete within a resonable period, on the assumption that an interrupts have been failed or the interface is locked up. This function will reinitialize the hardware */
1787 static void amd8111e_tx_timeout(struct net_device *dev)
1789 struct amd8111e_priv* lp = netdev_priv(dev);
1792 printk(KERN_ERR "%s: transmit timed out, resetting\n",
1794 spin_lock_irq(&lp->lock);
1795 err = amd8111e_restart(dev);
1796 spin_unlock_irq(&lp->lock);
1798 netif_wake_queue(dev);
1800 static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
1802 struct net_device *dev = pci_get_drvdata(pci_dev);
1803 struct amd8111e_priv *lp = netdev_priv(dev);
1805 if (!netif_running(dev))
1808 /* disable the interrupt */
1809 spin_lock_irq(&lp->lock);
1810 amd8111e_disable_interrupt(lp);
1811 spin_unlock_irq(&lp->lock);
1813 netif_device_detach(dev);
1816 spin_lock_irq(&lp->lock);
1817 if(lp->options & OPTION_DYN_IPG_ENABLE)
1818 del_timer_sync(&lp->ipg_data.ipg_timer);
1819 amd8111e_stop_chip(lp);
1820 spin_unlock_irq(&lp->lock);
1822 if(lp->options & OPTION_WOL_ENABLE){
1824 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1825 amd8111e_enable_magicpkt(lp);
1826 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1827 amd8111e_enable_link_change(lp);
1829 pci_enable_wake(pci_dev, 3, 1);
1830 pci_enable_wake(pci_dev, 4, 1); /* D3 cold */
1834 pci_enable_wake(pci_dev, 3, 0);
1835 pci_enable_wake(pci_dev, 4, 0); /* 4 == D3 cold */
1838 pci_save_state(pci_dev);
1839 pci_set_power_state(pci_dev, 3);
1843 static int amd8111e_resume(struct pci_dev *pci_dev)
1845 struct net_device *dev = pci_get_drvdata(pci_dev);
1846 struct amd8111e_priv *lp = netdev_priv(dev);
1848 if (!netif_running(dev))
1851 pci_set_power_state(pci_dev, 0);
1852 pci_restore_state(pci_dev);
1854 pci_enable_wake(pci_dev, 3, 0);
1855 pci_enable_wake(pci_dev, 4, 0); /* D3 cold */
1857 netif_device_attach(dev);
1859 spin_lock_irq(&lp->lock);
1860 amd8111e_restart(dev);
1861 /* Restart ipg timer */
1862 if(lp->options & OPTION_DYN_IPG_ENABLE)
1863 mod_timer(&lp->ipg_data.ipg_timer,
1864 jiffies + IPG_CONVERGE_JIFFIES);
1865 spin_unlock_irq(&lp->lock);
1871 static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1873 struct net_device *dev = pci_get_drvdata(pdev);
1875 unregister_netdev(dev);
1876 iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
1878 pci_release_regions(pdev);
1879 pci_disable_device(pdev);
1880 pci_set_drvdata(pdev, NULL);
1883 static void amd8111e_config_ipg(struct net_device* dev)
1885 struct amd8111e_priv *lp = netdev_priv(dev);
1886 struct ipg_info* ipg_data = &lp->ipg_data;
1887 void __iomem *mmio = lp->mmio;
1888 unsigned int prev_col_cnt = ipg_data->col_cnt;
1889 unsigned int total_col_cnt;
1890 unsigned int tmp_ipg;
1892 if(lp->link_config.duplex == DUPLEX_FULL){
1893 ipg_data->ipg = DEFAULT_IPG;
1897 if(ipg_data->ipg_state == SSTATE){
1899 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1901 ipg_data->timer_tick = 0;
1902 ipg_data->ipg = MIN_IPG - IPG_STEP;
1903 ipg_data->current_ipg = MIN_IPG;
1904 ipg_data->diff_col_cnt = 0xFFFFFFFF;
1905 ipg_data->ipg_state = CSTATE;
1908 ipg_data->timer_tick++;
1911 if(ipg_data->ipg_state == CSTATE){
1913 /* Get the current collision count */
1915 total_col_cnt = ipg_data->col_cnt =
1916 amd8111e_read_mib(mmio, xmt_collisions);
1918 if ((total_col_cnt - prev_col_cnt) <
1919 (ipg_data->diff_col_cnt)){
1921 ipg_data->diff_col_cnt =
1922 total_col_cnt - prev_col_cnt ;
1924 ipg_data->ipg = ipg_data->current_ipg;
1927 ipg_data->current_ipg += IPG_STEP;
1929 if (ipg_data->current_ipg <= MAX_IPG)
1930 tmp_ipg = ipg_data->current_ipg;
1932 tmp_ipg = ipg_data->ipg;
1933 ipg_data->ipg_state = SSTATE;
1935 writew((u32)tmp_ipg, mmio + IPG);
1936 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1938 mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1943 static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
1945 struct amd8111e_priv *lp = netdev_priv(dev);
1948 for (i = 0x1e; i >= 0; i--) {
1951 if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1953 if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1955 lp->ext_phy_id = (id1 << 16) | id2;
1956 lp->ext_phy_addr = i;
1960 lp->ext_phy_addr = 1;
1963 static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1964 const struct pci_device_id *ent)
1967 unsigned long reg_addr,reg_len;
1968 struct amd8111e_priv* lp;
1969 struct net_device* dev;
1971 err = pci_enable_device(pdev);
1973 printk(KERN_ERR "amd8111e: Cannot enable new PCI device,"
1978 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1979 printk(KERN_ERR "amd8111e: Cannot find PCI base address"
1982 goto err_disable_pdev;
1985 err = pci_request_regions(pdev, MODULE_NAME);
1987 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1989 goto err_disable_pdev;
1992 pci_set_master(pdev);
1994 /* Find power-management capability. */
1995 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1996 printk(KERN_ERR "amd8111e: No Power Management capability, "
2001 /* Initialize DMA */
2002 if(!pci_dma_supported(pdev, 0xffffffff)){
2003 printk(KERN_ERR "amd8111e: DMA not supported,"
2007 pdev->dma_mask = 0xffffffff;
2009 reg_addr = pci_resource_start(pdev, 0);
2010 reg_len = pci_resource_len(pdev, 0);
2012 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
2014 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
2019 SET_MODULE_OWNER(dev);
2020 SET_NETDEV_DEV(dev, &pdev->dev);
2022 #if AMD8111E_VLAN_TAG_USED
2023 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
2024 dev->vlan_rx_register =amd8111e_vlan_rx_register;
2025 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
2028 lp = netdev_priv(dev);
2030 lp->amd8111e_net_dev = dev;
2031 lp->pm_cap = pm_cap;
2033 spin_lock_init(&lp->lock);
2035 lp->mmio = ioremap(reg_addr, reg_len);
2036 if (lp->mmio == 0) {
2037 printk(KERN_ERR "amd8111e: Cannot map device registers, "
2043 /* Initializing MAC address */
2044 for(i = 0; i < ETH_ADDR_LEN; i++)
2045 dev->dev_addr[i] =readb(lp->mmio + PADR + i);
2047 /* Setting user defined parametrs */
2048 lp->ext_phy_option = speed_duplex[card_idx];
2049 if(coalesce[card_idx])
2050 lp->options |= OPTION_INTR_COAL_ENABLE;
2051 if(dynamic_ipg[card_idx++])
2052 lp->options |= OPTION_DYN_IPG_ENABLE;
2054 /* Initialize driver entry points */
2055 dev->open = amd8111e_open;
2056 dev->hard_start_xmit = amd8111e_start_xmit;
2057 dev->stop = amd8111e_close;
2058 dev->get_stats = amd8111e_get_stats;
2059 dev->set_multicast_list = amd8111e_set_multicast_list;
2060 dev->set_mac_address = amd8111e_set_mac_address;
2061 dev->do_ioctl = amd8111e_ioctl;
2062 dev->change_mtu = amd8111e_change_mtu;
2063 SET_ETHTOOL_OPS(dev, &ops);
2064 dev->irq =pdev->irq;
2065 dev->tx_timeout = amd8111e_tx_timeout;
2066 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
2067 #ifdef CONFIG_AMD8111E_NAPI
2068 dev->poll = amd8111e_rx_poll;
2071 #ifdef CONFIG_NET_POLL_CONTROLLER
2072 dev->poll_controller = amd8111e_poll;
2075 #if AMD8111E_VLAN_TAG_USED
2076 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2077 dev->vlan_rx_register =amd8111e_vlan_rx_register;
2078 dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
2080 /* Probe the external PHY */
2081 amd8111e_probe_ext_phy(dev);
2083 /* setting mii default values */
2084 lp->mii_if.dev = dev;
2085 lp->mii_if.mdio_read = amd8111e_mdio_read;
2086 lp->mii_if.mdio_write = amd8111e_mdio_write;
2087 lp->mii_if.phy_id = lp->ext_phy_addr;
2089 /* Set receive buffer length and set jumbo option*/
2090 amd8111e_set_rx_buff_len(dev);
2093 err = register_netdev(dev);
2095 printk(KERN_ERR "amd8111e: Cannot register net device, "
2100 pci_set_drvdata(pdev, dev);
2102 /* Initialize software ipg timer */
2103 if(lp->options & OPTION_DYN_IPG_ENABLE){
2104 init_timer(&lp->ipg_data.ipg_timer);
2105 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
2106 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
2107 lp->ipg_data.ipg_timer.expires = jiffies +
2108 IPG_CONVERGE_JIFFIES;
2109 lp->ipg_data.ipg = DEFAULT_IPG;
2110 lp->ipg_data.ipg_state = CSTATE;
2113 /* display driver and device information */
2115 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
2116 printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n", dev->name,MODULE_VERS);
2117 printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ", dev->name, chip_version);
2118 for (i = 0; i < 6; i++)
2119 printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':');
2122 printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
2123 dev->name, lp->ext_phy_id, lp->ext_phy_addr);
2125 printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
2135 pci_release_regions(pdev);
2138 pci_disable_device(pdev);
2139 pci_set_drvdata(pdev, NULL);
2144 static struct pci_driver amd8111e_driver = {
2145 .name = MODULE_NAME,
2146 .id_table = amd8111e_pci_tbl,
2147 .probe = amd8111e_probe_one,
2148 .remove = __devexit_p(amd8111e_remove_one),
2149 .suspend = amd8111e_suspend,
2150 .resume = amd8111e_resume
2153 static int __init amd8111e_init(void)
2155 return pci_module_init(&amd8111e_driver);
2158 static void __exit amd8111e_cleanup(void)
2160 pci_unregister_driver(&amd8111e_driver);
2163 module_init(amd8111e_init);
2164 module_exit(amd8111e_cleanup);