patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / net / amd8111e.c
1
2 /* Advanced  Micro Devices Inc. AMD8111E Linux Network Driver 
3  * Copyright (C) 2004 Advanced Micro Devices 
4  *
5  * 
6  * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7  * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8  * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9  * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10  * Copyright 1993 United States Government as represented by the
11  *      Director, National Security Agency.[ pcnet32.c ]
12  * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
14  *
15  * 
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  *
21  * This program is distributed in the hope that it will be useful,
22  * but WITHOUT ANY WARRANTY; without even the implied warranty of
23  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24  * GNU General Public License for more details.
25  *
26  * You should have received a copy of the GNU General Public License
27  * along with this program; if not, write to the Free Software
28  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 
29  * USA
30   
31 Module Name:
32
33         amd8111e.c
34
35 Abstract:
36         
37          AMD8111 based 10/100 Ethernet Controller Driver. 
38
39 Environment:
40
41         Kernel Mode
42
43 Revision History:
44         3.0.0
45            Initial Revision.
46         3.0.1
47          1. Dynamic interrupt coalescing.
48          2. Removed prev_stats.
49          3. MII support.
50          4. Dynamic IPG support
51         3.0.2  05/29/2003
52          1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
53          2. Bug fix: Fixed VLAN support failure.
54          3. Bug fix: Fixed receive interrupt coalescing bug.
55          4. Dynamic IPG support is disabled by default.
56         3.0.3 06/05/2003
57          1. Bug fix: Fixed failure to close the interface if SMP is enabled.
58         3.0.4 12/09/2003
59          1. Added set_mac_address routine for bonding driver support.
60          2. Tested the driver for bonding support
61          3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth 
62             indicated to the h/w.
63          4. Modified amd8111e_rx() routine to receive all the received packets 
64             in the first interrupt.
65          5. Bug fix: Corrected  rx_errors  reported in get_stats() function.
66         3.0.5 03/22/2004
67          1. Added NAPI support  
68
69 */
70
71
72 #include <linux/config.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/types.h>
76 #include <linux/compiler.h>
77 #include <linux/slab.h>
78 #include <linux/delay.h>
79 #include <linux/init.h>
80 #include <linux/ioport.h>
81 #include <linux/pci.h>
82 #include <linux/netdevice.h>
83 #include <linux/etherdevice.h>
84 #include <linux/skbuff.h>
85 #include <linux/ethtool.h>
86 #include <linux/mii.h>
87 #include <linux/if_vlan.h>
88 #include <linux/ctype.h>        
89 #include <linux/crc32.h>
90
91 #include <asm/system.h>
92 #include <asm/io.h>
93 #include <asm/byteorder.h>
94 #include <asm/uaccess.h>
95
96 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
97 #define AMD8111E_VLAN_TAG_USED 1
98 #else
99 #define AMD8111E_VLAN_TAG_USED 0
100 #endif
101
102 #include "amd8111e.h"
103 #define MODULE_NAME     "amd8111e"
104 #define MODULE_VERS     "3.0.5"
105 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
106 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.3");
107 MODULE_LICENSE("GPL");
108 MODULE_PARM(speed_duplex, "1-" __MODULE_STRING (MAX_UNITS) "i");
109 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
110 MODULE_PARM(coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
111 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
112 MODULE_PARM(dynamic_ipg, "1-" __MODULE_STRING(MAX_UNITS) "i");
113 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
114
115 static struct pci_device_id amd8111e_pci_tbl[] = {
116                 
117         { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
118          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
119         { 0, }
120
121 };
122 /* 
123 This function will read the PHY registers.
124 */
125 static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
126 {
127         void * mmio = lp->mmio;
128         unsigned int reg_val;
129         unsigned int repeat= REPEAT_CNT;
130
131         reg_val = readl(mmio + PHY_ACCESS);
132         while (reg_val & PHY_CMD_ACTIVE)
133                 reg_val = readl( mmio + PHY_ACCESS );
134
135         writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
136                            ((reg & 0x1f) << 16),  mmio +PHY_ACCESS);
137         do{
138                 reg_val = readl(mmio + PHY_ACCESS);
139                 udelay(30);  /* It takes 30 us to read/write data */
140         } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
141         if(reg_val & PHY_RD_ERR)
142                 goto err_phy_read;
143         
144         *val = reg_val & 0xffff;
145         return 0;
146 err_phy_read:   
147         *val = 0;
148         return -EINVAL;
149         
150 }
151
152 /* 
153 This function will write into PHY registers. 
154 */
155 static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
156 {
157         unsigned int repeat = REPEAT_CNT
158         void * mmio = lp->mmio;
159         unsigned int reg_val;
160
161         reg_val = readl(mmio + PHY_ACCESS);
162         while (reg_val & PHY_CMD_ACTIVE)
163                 reg_val = readl( mmio + PHY_ACCESS );
164
165         writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
166                            ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
167
168         do{
169                 reg_val = readl(mmio + PHY_ACCESS);
170                 udelay(30);  /* It takes 30 us to read/write the data */
171         } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
172         
173         if(reg_val & PHY_RD_ERR)
174                 goto err_phy_write;
175         
176         return 0;
177
178 err_phy_write:  
179         return -EINVAL;
180         
181 }
182 /* 
183 This is the mii register read function provided to the mii interface.
184 */ 
185 static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
186 {
187         struct amd8111e_priv* lp = netdev_priv(dev);
188         unsigned int reg_val;
189
190         amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
191         return reg_val;
192         
193 }
194
195 /* 
196 This is the mii register write function provided to the mii interface.
197 */ 
198 static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
199 {
200         struct amd8111e_priv* lp = netdev_priv(dev);
201
202         amd8111e_write_phy(lp, phy_id, reg_num, val);
203 }
204
205 /*
206 This function will set PHY speed. During initialization sets the original speed to 100 full.
207 */
208 static void amd8111e_set_ext_phy(struct net_device *dev)
209 {
210         struct amd8111e_priv *lp = netdev_priv(dev);
211         u32 bmcr,advert,tmp;
212         
213         /* Determine mii register values to set the speed */
214         advert = amd8111e_mdio_read(dev, PHY_ID, MII_ADVERTISE);
215         tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
216         switch (lp->ext_phy_option){
217
218                 default:
219                 case SPEED_AUTONEG: /* advertise all values */
220                         tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
221                                 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
222                         break;
223                 case SPEED10_HALF:
224                         tmp |= ADVERTISE_10HALF;
225                         break;
226                 case SPEED10_FULL:
227                         tmp |= ADVERTISE_10FULL;
228                         break;
229                 case SPEED100_HALF: 
230                         tmp |= ADVERTISE_100HALF;
231                         break;
232                 case SPEED100_FULL:
233                         tmp |= ADVERTISE_100FULL;
234                         break;
235         }
236
237         if(advert != tmp)
238                 amd8111e_mdio_write(dev, PHY_ID, MII_ADVERTISE, tmp);
239         /* Restart auto negotiation */
240         bmcr = amd8111e_mdio_read(dev, PHY_ID, MII_BMCR);
241         bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
242         amd8111e_mdio_write(dev, PHY_ID, MII_BMCR, bmcr);
243
244 }
245
246 /* 
247 This function will unmap skb->data space and will free 
248 all transmit and receive skbuffs.
249 */
250 static int amd8111e_free_skbs(struct net_device *dev)
251 {
252         struct amd8111e_priv *lp = netdev_priv(dev);
253         struct sk_buff* rx_skbuff;
254         int i;
255
256         /* Freeing transmit skbs */
257         for(i = 0; i < NUM_TX_BUFFERS; i++){
258                 if(lp->tx_skbuff[i]){
259                         pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i],                                        lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
260                         dev_kfree_skb (lp->tx_skbuff[i]);
261                         lp->tx_skbuff[i] = NULL;
262                         lp->tx_dma_addr[i] = 0;
263                 }
264         }
265         /* Freeing previously allocated receive buffers */
266         for (i = 0; i < NUM_RX_BUFFERS; i++){
267                 rx_skbuff = lp->rx_skbuff[i];
268                 if(rx_skbuff != NULL){
269                         pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
270                                   lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
271                         dev_kfree_skb(lp->rx_skbuff[i]);
272                         lp->rx_skbuff[i] = NULL;
273                         lp->rx_dma_addr[i] = 0;
274                 }
275         }
276         
277         return 0;
278 }
279
280 /*
281 This will set the receive buffer length corresponding to the mtu size of networkinterface.
282 */
283 static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
284 {
285         struct amd8111e_priv* lp = netdev_priv(dev);
286         unsigned int mtu = dev->mtu;
287         
288         if (mtu > ETH_DATA_LEN){
289                 /* MTU + ethernet header + FCS
290                 + optional VLAN tag + skb reserve space 2 */
291
292                 lp->rx_buff_len = mtu + ETH_HLEN + 10;
293                 lp->options |= OPTION_JUMBO_ENABLE;
294         } else{
295                 lp->rx_buff_len = PKT_BUFF_SZ;
296                 lp->options &= ~OPTION_JUMBO_ENABLE;
297         }
298 }
299
300 /* 
301 This function will free all the previously allocated buffers, determine new receive buffer length  and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
302  */
303 static int amd8111e_init_ring(struct net_device *dev)
304 {
305         struct amd8111e_priv *lp = netdev_priv(dev);
306         int i;
307
308         lp->rx_idx = lp->tx_idx = 0;
309         lp->tx_complete_idx = 0;
310         lp->tx_ring_idx = 0;
311         
312
313         if(lp->opened)
314                 /* Free previously allocated transmit and receive skbs */
315                 amd8111e_free_skbs(dev);        
316
317         else{
318                  /* allocate the tx and rx descriptors */
319                 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, 
320                         sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
321                         &lp->tx_ring_dma_addr)) == NULL)
322                 
323                         goto err_no_mem;
324         
325                 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, 
326                         sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
327                         &lp->rx_ring_dma_addr)) == NULL)
328                 
329                         goto err_free_tx_ring;
330
331         }
332         /* Set new receive buff size */
333         amd8111e_set_rx_buff_len(dev);
334
335         /* Allocating receive  skbs */
336         for (i = 0; i < NUM_RX_BUFFERS; i++) {
337
338                 if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
339                                 /* Release previos allocated skbs */
340                                 for(--i; i >= 0 ;i--)
341                                         dev_kfree_skb(lp->rx_skbuff[i]);
342                                 goto err_free_rx_ring;
343                 }
344                 skb_reserve(lp->rx_skbuff[i],2);
345         }
346         /* Initilaizing receive descriptors */
347         for (i = 0; i < NUM_RX_BUFFERS; i++) {
348                 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, 
349                         lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
350
351                 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
352                 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
353                 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
354         }
355
356         /* Initializing transmit descriptors */
357         for (i = 0; i < NUM_TX_RING_DR; i++) {
358                 lp->tx_ring[i].buff_phy_addr = 0;
359                 lp->tx_ring[i].tx_flags = 0;
360                 lp->tx_ring[i].buff_count = 0;
361         }
362
363         return 0;
364
365 err_free_rx_ring:
366         
367         pci_free_consistent(lp->pci_dev, 
368                 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
369                 lp->rx_ring_dma_addr);
370
371 err_free_tx_ring:
372         
373         pci_free_consistent(lp->pci_dev,
374                  sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring, 
375                  lp->tx_ring_dma_addr);
376
377 err_no_mem:
378         return -ENOMEM;
379 }
380 /* This function will set the interrupt coalescing according to the input arguments */
381 static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
382 {
383         unsigned int timeout;
384         unsigned int event_count;
385
386         struct amd8111e_priv *lp = netdev_priv(dev);
387         void* mmio = lp->mmio;
388         struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
389
390
391         switch(cmod)
392         {
393                 case RX_INTR_COAL :
394                         timeout = coal_conf->rx_timeout;
395                         event_count = coal_conf->rx_event_count;
396                         if( timeout > MAX_TIMEOUT || 
397                                         event_count > MAX_EVENT_COUNT ) 
398                         return -EINVAL;
399
400                         timeout = timeout * DELAY_TIMER_CONV; 
401                         writel(VAL0|STINTEN, mmio+INTEN0);
402                         writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
403                                                         mmio+DLY_INT_A);
404                         break;
405
406                 case TX_INTR_COAL :
407                         timeout = coal_conf->tx_timeout;
408                         event_count = coal_conf->tx_event_count;
409                         if( timeout > MAX_TIMEOUT || 
410                                         event_count > MAX_EVENT_COUNT ) 
411                         return -EINVAL;
412
413                    
414                         timeout = timeout * DELAY_TIMER_CONV; 
415                         writel(VAL0|STINTEN,mmio+INTEN0);
416                         writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
417                                                          mmio+DLY_INT_B);
418                         break;
419
420                 case DISABLE_COAL:
421                         writel(0,mmio+STVAL);
422                         writel(STINTEN, mmio+INTEN0);
423                         writel(0, mmio +DLY_INT_B);
424                         writel(0, mmio+DLY_INT_A);
425                         break;
426                  case ENABLE_COAL: 
427                        /* Start the timer */
428                         writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /*  0.5 sec */
429                         writel(VAL0|STINTEN, mmio+INTEN0);
430                         break;
431                 default:
432                         break;
433
434    }
435         return 0;
436
437 }
438
439 /* 
440 This function initializes the device registers  and starts the device.  
441 */
442 static int amd8111e_restart(struct net_device *dev)
443 {
444         struct amd8111e_priv *lp = netdev_priv(dev);
445         void * mmio = lp->mmio;
446         int i,reg_val;
447
448         /* stop the chip */
449          writel(RUN, mmio + CMD0);
450
451         if(amd8111e_init_ring(dev))
452                 return -ENOMEM;
453
454         /* enable the port manager and set auto negotiation always */
455         writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
456         writel((u32)XPHYANE|XPHYRST , mmio + CTRL2); 
457         
458         amd8111e_set_ext_phy(dev);
459
460         /* set control registers */
461         reg_val = readl(mmio + CTRL1);
462         reg_val &= ~XMTSP_MASK;
463         writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
464
465         /* enable interrupt */
466         writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN | 
467                 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
468                 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
469
470         writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
471
472         /* initialize tx and rx ring base addresses */
473         writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
474         writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
475
476         writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
477         writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
478         
479         /* set default IPG to 96 */
480         writew((u32)DEFAULT_IPG,mmio+IPG);
481         writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1); 
482
483         if(lp->options & OPTION_JUMBO_ENABLE){
484                 writel((u32)VAL2|JUMBO, mmio + CMD3);
485                 /* Reset REX_UFLO */
486                 writel( REX_UFLO, mmio + CMD2);
487                 /* Should not set REX_UFLO for jumbo frames */
488                 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
489         }else{
490                 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
491                 writel((u32)JUMBO, mmio + CMD3);
492         }
493
494 #if AMD8111E_VLAN_TAG_USED
495         writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
496 #endif
497         writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
498         
499         /* Setting the MAC address to the device */
500         for(i = 0; i < ETH_ADDR_LEN; i++)
501                 writeb( dev->dev_addr[i], mmio + PADR + i ); 
502
503         /* Enable interrupt coalesce */
504         if(lp->options & OPTION_INTR_COAL_ENABLE){
505                 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
506                                                                 dev->name);
507                 amd8111e_set_coalesce(dev,ENABLE_COAL);
508         }
509         
510         /* set RUN bit to start the chip */
511         writel(VAL2 | RDMD0, mmio + CMD0);
512         writel(VAL0 | INTREN | RUN, mmio + CMD0);
513         
514         /* To avoid PCI posting bug */
515         readl(mmio+CMD0);
516         return 0;
517 }
518 /* 
519 This function clears necessary the device registers. 
520 */      
521 static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
522 {
523         unsigned int reg_val;
524         unsigned int logic_filter[2] ={0,};
525         void * mmio = lp->mmio;
526
527
528         /* stop the chip */
529         writel(RUN, mmio + CMD0);
530
531         /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
532         writew( 0x8101, mmio + AUTOPOLL0);
533
534         /* Clear RCV_RING_BASE_ADDR */
535         writel(0, mmio + RCV_RING_BASE_ADDR0);
536
537         /* Clear XMT_RING_BASE_ADDR */
538         writel(0, mmio + XMT_RING_BASE_ADDR0);
539         writel(0, mmio + XMT_RING_BASE_ADDR1);
540         writel(0, mmio + XMT_RING_BASE_ADDR2);
541         writel(0, mmio + XMT_RING_BASE_ADDR3);
542
543         /* Clear CMD0  */
544         writel(CMD0_CLEAR,mmio + CMD0);
545         
546         /* Clear CMD2 */
547         writel(CMD2_CLEAR, mmio +CMD2);
548
549         /* Clear CMD7 */
550         writel(CMD7_CLEAR , mmio + CMD7);
551
552         /* Clear DLY_INT_A and DLY_INT_B */
553         writel(0x0, mmio + DLY_INT_A);
554         writel(0x0, mmio + DLY_INT_B);
555
556         /* Clear FLOW_CONTROL */
557         writel(0x0, mmio + FLOW_CONTROL);
558
559         /* Clear INT0  write 1 to clear register */
560         reg_val = readl(mmio + INT0);
561         writel(reg_val, mmio + INT0);
562
563         /* Clear STVAL */
564         writel(0x0, mmio + STVAL);
565
566         /* Clear INTEN0 */
567         writel( INTEN0_CLEAR, mmio + INTEN0);
568
569         /* Clear LADRF */
570         writel(0x0 , mmio + LADRF);
571
572         /* Set SRAM_SIZE & SRAM_BOUNDARY registers  */
573         writel( 0x80010,mmio + SRAM_SIZE);
574
575         /* Clear RCV_RING0_LEN */
576         writel(0x0, mmio +  RCV_RING_LEN0);
577
578         /* Clear XMT_RING0/1/2/3_LEN */
579         writel(0x0, mmio +  XMT_RING_LEN0);
580         writel(0x0, mmio +  XMT_RING_LEN1);
581         writel(0x0, mmio +  XMT_RING_LEN2);
582         writel(0x0, mmio +  XMT_RING_LEN3);
583
584         /* Clear XMT_RING_LIMIT */
585         writel(0x0, mmio + XMT_RING_LIMIT);
586
587         /* Clear MIB */
588         writew(MIB_CLEAR, mmio + MIB_ADDR);
589
590         /* Clear LARF */
591         amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
592
593         /* SRAM_SIZE register */
594         reg_val = readl(mmio + SRAM_SIZE);
595         
596         if(lp->options & OPTION_JUMBO_ENABLE)
597                 writel( VAL2|JUMBO, mmio + CMD3);
598 #if AMD8111E_VLAN_TAG_USED
599         writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
600 #endif
601         /* Set default value to CTRL1 Register */
602         writel(CTRL1_DEFAULT, mmio + CTRL1);
603
604         /* To avoid PCI posting bug */
605         readl(mmio + CMD2);
606
607 }
608
609 /* 
610 This function disables the interrupt and clears all the pending 
611 interrupts in INT0
612  */
613 static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
614 {       
615         u32 intr0;
616
617         /* Disable interrupt */
618         writel(INTREN, lp->mmio + CMD0);
619         
620         /* Clear INT0 */
621         intr0 = readl(lp->mmio + INT0);
622         writel(intr0, lp->mmio + INT0);
623         
624         /* To avoid PCI posting bug */
625         readl(lp->mmio + INT0);
626
627 }
628
629 /*
630 This function stops the chip. 
631 */
632 static void amd8111e_stop_chip(struct amd8111e_priv* lp)
633 {
634         writel(RUN, lp->mmio + CMD0);
635         
636         /* To avoid PCI posting bug */
637         readl(lp->mmio + CMD0);
638 }
639
640 /* 
641 This function frees the  transmiter and receiver descriptor rings.
642 */
643 static void amd8111e_free_ring(struct amd8111e_priv* lp)
644 {       
645
646         /* Free transmit and receive skbs */
647         amd8111e_free_skbs(lp->amd8111e_net_dev);
648
649         /* Free transmit and receive descriptor rings */
650         if(lp->rx_ring){
651                 pci_free_consistent(lp->pci_dev, 
652                         sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
653                         lp->rx_ring, lp->rx_ring_dma_addr);
654                 lp->rx_ring = NULL;
655         }
656         
657         if(lp->tx_ring){
658                 pci_free_consistent(lp->pci_dev, 
659                         sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
660                         lp->tx_ring, lp->tx_ring_dma_addr);
661
662                 lp->tx_ring = NULL;
663         }
664
665 }
666 #if AMD8111E_VLAN_TAG_USED      
667 /* 
668 This is the receive indication function for packets with vlan tag.
669 */      
670 static int amd8111e_vlan_rx(struct amd8111e_priv *lp, struct sk_buff *skb, u16 vlan_tag)
671 {
672 #ifdef CONFIG_AMD8111E_NAPI
673         return vlan_hwaccel_receive_skb(skb, lp->vlgrp,vlan_tag);
674 #else
675         return vlan_hwaccel_rx(skb, lp->vlgrp, vlan_tag);
676 #endif /* CONFIG_AMD8111E_NAPI */
677 }
678 #endif
679
680 /*
681 This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb. 
682 */
683 static int amd8111e_tx(struct net_device *dev)
684 {
685         struct amd8111e_priv* lp = netdev_priv(dev);
686         int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
687         int status;
688         /* Complete all the transmit packet */
689         while (lp->tx_complete_idx != lp->tx_idx){
690                 tx_index =  lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
691                 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
692
693                 if(status & OWN_BIT)
694                         break;  /* It still hasn't been Txed */
695
696                 lp->tx_ring[tx_index].buff_phy_addr = 0;
697
698                 /* We must free the original skb */
699                 if (lp->tx_skbuff[tx_index]) {
700                         pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
701                                         lp->tx_skbuff[tx_index]->len,
702                                         PCI_DMA_TODEVICE);
703                         dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
704                         lp->tx_skbuff[tx_index] = 0;
705                         lp->tx_dma_addr[tx_index] = 0;
706                 }
707                 lp->tx_complete_idx++;
708                 /*COAL update tx coalescing parameters */
709                 lp->coal_conf.tx_packets++;
710                 lp->coal_conf.tx_bytes += lp->tx_ring[tx_index].buff_count;     
711
712                 if (netif_queue_stopped(dev) &&
713                         lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
714                         /* The ring is no longer full, clear tbusy. */
715                         /* lp->tx_full = 0; */
716                         netif_wake_queue (dev);
717                 }
718         }
719         return 0;
720 }
721
722 #if CONFIG_AMD8111E_NAPI 
723 /* This function handles the driver receive operation in polling mode */
724 static int amd8111e_rx_poll(struct net_device *dev, int * budget)
725 {
726         struct amd8111e_priv *lp = dev->priv;
727         int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
728         void * mmio = lp->mmio;
729         struct sk_buff *skb,*new_skb;
730         int min_pkt_len, status;
731         unsigned int intr0;
732         int num_rx_pkt = 0;
733         /*int max_rx_pkt = NUM_RX_BUFFERS;*/
734         short pkt_len;
735 #if AMD8111E_VLAN_TAG_USED              
736         short vtag;
737 #endif
738         int rx_pkt_limit = dev->quota;
739         
740         do{   
741                 /* process receive packets until we use the quota*/
742                 /* If we own the next entry, it's a new packet. Send it up. */
743                 while(!(lp->rx_ring[rx_index].rx_flags & OWN_BIT)){
744                
745                         /* check if err summary bit is set */ 
746                         if(le16_to_cpu(lp->rx_ring[rx_index].rx_flags) 
747                                                                 & ERR_BIT){
748                         /* 
749                          * There is a tricky error noted by John Murphy,
750                          * <murf@perftech.com> to Russ Nelson: Even with
751                          * full-sized * buffers it's possible for a  
752                          * jabber packet to use two buffers, with only 
753                          * the last correctly noting the error.
754                          */
755
756                         /* reseting flags */
757                         lp->rx_ring[rx_index].rx_flags &=RESET_RX_FLAGS;
758                         goto err_next_pkt;
759
760                         }
761                         /* check for STP and ENP */
762                 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
763                 if(!((status & STP_BIT) && (status & ENP_BIT))){
764                         /* reseting flags */
765                         lp->rx_ring[rx_index].rx_flags &=RESET_RX_FLAGS;
766                         goto err_next_pkt;
767                 }
768                 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
769
770 #if AMD8111E_VLAN_TAG_USED              
771                 vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & TT_MASK;
772                 /*MAC will strip vlan tag*/ 
773                 if(lp->vlgrp != NULL && vtag !=0)
774                         min_pkt_len =MIN_PKT_LEN - 4;
775                 else
776 #endif
777                         min_pkt_len =MIN_PKT_LEN;
778
779                 if (pkt_len < min_pkt_len) {
780                         lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
781                         lp->drv_rx_errors++;
782                         goto err_next_pkt;
783                 }
784                 if(--rx_pkt_limit < 0)
785                         goto rx_not_empty;
786                 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
787                         /* if allocation fail, 
788                                 ignore that pkt and go to next one */
789                         lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
790                         lp->drv_rx_errors++;
791                         goto err_next_pkt;
792                 }
793                 
794                 skb_reserve(new_skb, 2);
795                 skb = lp->rx_skbuff[rx_index];
796                 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
797                         lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
798                 skb_put(skb, pkt_len);
799                 skb->dev = dev;
800                 lp->rx_skbuff[rx_index] = new_skb;
801                 new_skb->dev = dev;
802                 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
803                         new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
804         
805                 skb->protocol = eth_type_trans(skb, dev);
806
807 #if AMD8111E_VLAN_TAG_USED              
808                 
809                 vtag = lp->rx_ring[rx_index].rx_flags & TT_MASK;
810                 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
811                         amd8111e_vlan_rx(lp, skb,
812                                     lp->rx_ring[rx_index].tag_ctrl_info);
813                 } else
814 #endif
815                         
816                         netif_receive_skb(skb);
817                 /*COAL update rx coalescing parameters*/
818                 lp->coal_conf.rx_packets++;
819                 lp->coal_conf.rx_bytes += pkt_len;      
820                 num_rx_pkt++;
821                 dev->last_rx = jiffies;
822         
823 err_next_pkt:   
824                 lp->rx_ring[rx_index].buff_phy_addr
825                          = cpu_to_le32(lp->rx_dma_addr[rx_index]);
826                 lp->rx_ring[rx_index].buff_count = 
827                                 cpu_to_le16(lp->rx_buff_len-2);
828                 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
829                 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
830         }
831         /* Check the interrupt status register for more packets in the 
832         mean time. Process them since we have not used up our quota.*/
833
834         intr0 = readl(mmio + INT0);
835         /*Ack receive packets */
836         writel(intr0 & RINT0,mmio + INT0);
837
838         }while(intr0 & RINT0);
839
840         /* Receive descriptor is empty now */
841         dev->quota -= num_rx_pkt;
842         *budget -= num_rx_pkt;
843         netif_rx_complete(dev);
844         /* enable receive interrupt */
845         writel(VAL0|RINTEN0, mmio + INTEN0);
846         writel(VAL2 | RDMD0, mmio + CMD0);
847         return 0;
848 rx_not_empty:
849         /* Do not call a netif_rx_complete */
850         dev->quota -= num_rx_pkt;       
851         *budget -= num_rx_pkt;
852         return 1;
853
854         
855 }
856
857 #else
858 /* 
859 This function will check the ownership of receive buffers and descriptors. It will indicate to kernel up to half the number of maximum receive buffers in the descriptor ring, in a single receive interrupt. It will also replenish the descriptors with new skbs.
860 */
861 static int amd8111e_rx(struct net_device *dev)
862 {
863         struct amd8111e_priv *lp = netdev_priv(dev);
864         struct sk_buff *skb,*new_skb;
865         int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
866         int min_pkt_len, status;
867         int num_rx_pkt = 0;
868         int max_rx_pkt = NUM_RX_BUFFERS;
869         short pkt_len;
870 #if AMD8111E_VLAN_TAG_USED              
871         short vtag;
872 #endif
873         
874         /* If we own the next entry, it's a new packet. Send it up. */
875         while(++num_rx_pkt <= max_rx_pkt){
876                 if(lp->rx_ring[rx_index].rx_flags & OWN_BIT)
877                         return 0;
878                
879                 /* check if err summary bit is set */ 
880                 if(le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & ERR_BIT){
881                         /* 
882                          * There is a tricky error noted by John Murphy,
883                          * <murf@perftech.com> to Russ Nelson: Even with full-sized
884                          * buffers it's possible for a jabber packet to use two
885                          * buffers, with only the last correctly noting the error.                       */
886                         /* reseting flags */
887                         lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
888                         goto err_next_pkt;
889                 }
890                 /* check for STP and ENP */
891                 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
892                 if(!((status & STP_BIT) && (status & ENP_BIT))){
893                         /* reseting flags */
894                         lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
895                         goto err_next_pkt;
896                 }
897                 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
898
899 #if AMD8111E_VLAN_TAG_USED              
900                 vtag = le16_to_cpu(lp->rx_ring[rx_index].rx_flags) & TT_MASK;
901                 /*MAC will strip vlan tag*/ 
902                 if(lp->vlgrp != NULL && vtag !=0)
903                         min_pkt_len =MIN_PKT_LEN - 4;
904                 else
905 #endif
906                         min_pkt_len =MIN_PKT_LEN;
907
908                 if (pkt_len < min_pkt_len) {
909                         lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
910                         lp->drv_rx_errors++;
911                         goto err_next_pkt;
912                 }
913                 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
914                         /* if allocation fail, 
915                                 ignore that pkt and go to next one */
916                         lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
917                         lp->drv_rx_errors++;
918                         goto err_next_pkt;
919                 }
920                 
921                 skb_reserve(new_skb, 2);
922                 skb = lp->rx_skbuff[rx_index];
923                 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
924                         lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
925                 skb_put(skb, pkt_len);
926                 skb->dev = dev;
927                 lp->rx_skbuff[rx_index] = new_skb;
928                 new_skb->dev = dev;
929                 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
930                         new_skb->data, lp->rx_buff_len-2,PCI_DMA_FROMDEVICE);
931         
932                 skb->protocol = eth_type_trans(skb, dev);
933
934 #if AMD8111E_VLAN_TAG_USED              
935                 
936                 vtag = lp->rx_ring[rx_index].rx_flags & TT_MASK;
937                 if(lp->vlgrp != NULL && (vtag == TT_VLAN_TAGGED)){
938                         amd8111e_vlan_rx(lp, skb,
939                                     lp->rx_ring[rx_index].tag_ctrl_info);
940                 } else
941 #endif
942                         
943                         netif_rx (skb);
944                         /*COAL update rx coalescing parameters*/
945                         lp->coal_conf.rx_packets++;
946                         lp->coal_conf.rx_bytes += pkt_len;      
947
948                         dev->last_rx = jiffies;
949         
950 err_next_pkt:
951                 lp->rx_ring[rx_index].buff_phy_addr
952                          = cpu_to_le32(lp->rx_dma_addr[rx_index]);
953                 lp->rx_ring[rx_index].buff_count = 
954                                 cpu_to_le16(lp->rx_buff_len-2);
955                 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
956                 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
957         }
958
959         return 0;
960 }
961 #endif /* CONFIG_AMD8111E_NAPI */
962 /* 
963 This function will indicate the link status to the kernel.
964 */
965 static int amd8111e_link_change(struct net_device* dev)
966 {       
967         struct amd8111e_priv *lp = netdev_priv(dev);
968         int status0,speed;
969
970         /* read the link change */
971         status0 = readl(lp->mmio + STAT0);
972         
973         if(status0 & LINK_STATS){
974                 if(status0 & AUTONEG_COMPLETE)
975                         lp->link_config.autoneg = AUTONEG_ENABLE;
976                 else 
977                         lp->link_config.autoneg = AUTONEG_DISABLE;
978
979                 if(status0 & FULL_DPLX)
980                         lp->link_config.duplex = DUPLEX_FULL;
981                 else 
982                         lp->link_config.duplex = DUPLEX_HALF;
983                 speed = (status0 & SPEED_MASK) >> 7;
984                 if(speed == PHY_SPEED_10)
985                         lp->link_config.speed = SPEED_10;
986                 else if(speed == PHY_SPEED_100)
987                         lp->link_config.speed = SPEED_100;
988
989                 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n",                        dev->name,
990                        (lp->link_config.speed == SPEED_100) ? "100": "10", 
991                        (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half"); 
992                 netif_carrier_on(dev);
993         }
994         else{   
995                 lp->link_config.speed = SPEED_INVALID;
996                 lp->link_config.duplex = DUPLEX_INVALID;
997                 lp->link_config.autoneg = AUTONEG_INVALID;
998                 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
999                 netif_carrier_off(dev);
1000         }
1001                 
1002         return 0;
1003 }
1004 /*
1005 This function reads the mib counters.    
1006 */
1007 static int amd8111e_read_mib(void* mmio, u8 MIB_COUNTER)
1008 {
1009         unsigned int  status;
1010         unsigned  int data;
1011         unsigned int repeat = REPEAT_CNT;
1012
1013         writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
1014         do {
1015                 status = readw(mmio + MIB_ADDR);
1016                 udelay(2);      /* controller takes MAX 2 us to get mib data */
1017         }
1018         while (--repeat && (status & MIB_CMD_ACTIVE));
1019
1020         data = readl(mmio + MIB_DATA);
1021         return data;
1022 }
1023
1024 /*
1025 This function reads the mib registers and returns the hardware statistics. It  updates previous internal driver statistics with new values.
1026 */ 
1027 static struct net_device_stats *amd8111e_get_stats(struct net_device * dev)
1028 {
1029         struct amd8111e_priv *lp = netdev_priv(dev);
1030         void * mmio = lp->mmio;
1031         unsigned long flags;
1032         /* struct net_device_stats *prev_stats = &lp->prev_stats; */
1033         struct net_device_stats* new_stats = &lp->stats;
1034         
1035         if(!lp->opened)
1036                 return &lp->stats;      
1037         spin_lock_irqsave (&lp->lock, flags);
1038
1039         /* stats.rx_packets */
1040         new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
1041                                 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
1042                                 amd8111e_read_mib(mmio, rcv_unicast_pkts);
1043
1044         /* stats.tx_packets */
1045         new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
1046
1047         /*stats.rx_bytes */
1048         new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
1049
1050         /* stats.tx_bytes */
1051         new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
1052
1053         /* stats.rx_errors */
1054         /* hw errors + errors driver reported */
1055         new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
1056                                 amd8111e_read_mib(mmio, rcv_fragments)+
1057                                 amd8111e_read_mib(mmio, rcv_jabbers)+
1058                                 amd8111e_read_mib(mmio, rcv_alignment_errors)+
1059                                 amd8111e_read_mib(mmio, rcv_fcs_errors)+
1060                                 amd8111e_read_mib(mmio, rcv_miss_pkts)+
1061                                 lp->drv_rx_errors;
1062
1063         /* stats.tx_errors */
1064         new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
1065
1066         /* stats.rx_dropped*/
1067         new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
1068
1069         /* stats.tx_dropped*/
1070         new_stats->tx_dropped = amd8111e_read_mib(mmio,  xmt_underrun_pkts);
1071
1072         /* stats.multicast*/
1073         new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
1074
1075         /* stats.collisions*/
1076         new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
1077
1078         /* stats.rx_length_errors*/
1079         new_stats->rx_length_errors = 
1080                 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
1081                 amd8111e_read_mib(mmio, rcv_oversize_pkts);
1082
1083         /* stats.rx_over_errors*/
1084         new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1085
1086         /* stats.rx_crc_errors*/
1087         new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
1088
1089         /* stats.rx_frame_errors*/
1090         new_stats->rx_frame_errors =
1091                 amd8111e_read_mib(mmio, rcv_alignment_errors);
1092
1093         /* stats.rx_fifo_errors */
1094         new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1095
1096         /* stats.rx_missed_errors */
1097         new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
1098
1099         /* stats.tx_aborted_errors*/
1100         new_stats->tx_aborted_errors = 
1101                 amd8111e_read_mib(mmio, xmt_excessive_collision);
1102
1103         /* stats.tx_carrier_errors*/
1104         new_stats->tx_carrier_errors = 
1105                 amd8111e_read_mib(mmio, xmt_loss_carrier);
1106
1107         /* stats.tx_fifo_errors*/
1108         new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
1109
1110         /* stats.tx_window_errors*/
1111         new_stats->tx_window_errors =
1112                 amd8111e_read_mib(mmio, xmt_late_collision);
1113
1114         /* Reset the mibs for collecting new statistics */
1115         /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
1116                 
1117         spin_unlock_irqrestore (&lp->lock, flags);
1118
1119         return new_stats;
1120 }
1121 /* This function recalculate the interupt coalescing  mode on every interrupt 
1122 according to the datarate and the packet rate.
1123 */
1124 static int amd8111e_calc_coalesce(struct net_device *dev)
1125 {
1126         struct amd8111e_priv *lp = netdev_priv(dev);
1127         struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1128         int tx_pkt_rate;
1129         int rx_pkt_rate;
1130         int tx_data_rate;
1131         int rx_data_rate;
1132         int rx_pkt_size;
1133         int tx_pkt_size;
1134
1135         tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1136         coal_conf->tx_prev_packets =  coal_conf->tx_packets;
1137         
1138         tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1139         coal_conf->tx_prev_bytes =  coal_conf->tx_bytes;
1140         
1141         rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1142         coal_conf->rx_prev_packets =  coal_conf->rx_packets;
1143         
1144         rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1145         coal_conf->rx_prev_bytes =  coal_conf->rx_bytes;
1146         
1147         if(rx_pkt_rate < 800){
1148                 if(coal_conf->rx_coal_type != NO_COALESCE){
1149                         
1150                         coal_conf->rx_timeout = 0x0;
1151                         coal_conf->rx_event_count = 0;
1152                         amd8111e_set_coalesce(dev,RX_INTR_COAL);
1153                         coal_conf->rx_coal_type = NO_COALESCE;
1154                 }
1155         }
1156         else{
1157         
1158                 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1159                 if (rx_pkt_size < 128){
1160                         if(coal_conf->rx_coal_type != NO_COALESCE){
1161                         
1162                                 coal_conf->rx_timeout = 0;
1163                                 coal_conf->rx_event_count = 0;
1164                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1165                                 coal_conf->rx_coal_type = NO_COALESCE;
1166                         }
1167
1168                 }
1169                 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1170         
1171                         if(coal_conf->rx_coal_type !=  LOW_COALESCE){
1172                                 coal_conf->rx_timeout = 1;
1173                                 coal_conf->rx_event_count = 4;
1174                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1175                                 coal_conf->rx_coal_type = LOW_COALESCE;
1176                         }
1177                 }
1178                 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1179                         
1180                         if(coal_conf->rx_coal_type !=  MEDIUM_COALESCE){
1181                                 coal_conf->rx_timeout = 1;
1182                                 coal_conf->rx_event_count = 4;
1183                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1184                                 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1185                         }               
1186                                 
1187                 }
1188                 else if(rx_pkt_size >= 1024){
1189                         if(coal_conf->rx_coal_type !=  HIGH_COALESCE){
1190                                 coal_conf->rx_timeout = 2;
1191                                 coal_conf->rx_event_count = 3;
1192                                 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1193                                 coal_conf->rx_coal_type = HIGH_COALESCE;
1194                         }               
1195                 }
1196         }
1197         /* NOW FOR TX INTR COALESC */
1198         if(tx_pkt_rate < 800){
1199                 if(coal_conf->tx_coal_type != NO_COALESCE){
1200                         
1201                         coal_conf->tx_timeout = 0x0;
1202                         coal_conf->tx_event_count = 0;
1203                         amd8111e_set_coalesce(dev,TX_INTR_COAL);
1204                         coal_conf->tx_coal_type = NO_COALESCE;
1205                 }
1206         }
1207         else{
1208         
1209                 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1210                 if (tx_pkt_size < 128){
1211                 
1212                         if(coal_conf->tx_coal_type != NO_COALESCE){
1213                         
1214                                 coal_conf->tx_timeout = 0;
1215                                 coal_conf->tx_event_count = 0;
1216                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1217                                 coal_conf->tx_coal_type = NO_COALESCE;
1218                         }
1219
1220                 }
1221                 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1222         
1223                         if(coal_conf->tx_coal_type !=  LOW_COALESCE){
1224                                 coal_conf->tx_timeout = 1;
1225                                 coal_conf->tx_event_count = 2;
1226                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1227                                 coal_conf->tx_coal_type = LOW_COALESCE;
1228
1229                         }
1230                 }
1231                 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1232                         
1233                         if(coal_conf->tx_coal_type !=  MEDIUM_COALESCE){
1234                                 coal_conf->tx_timeout = 2;
1235                                 coal_conf->tx_event_count = 5;
1236                                 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1237                                 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1238                         }               
1239                                 
1240                 }
1241                 else if(tx_pkt_size >= 1024){
1242                         if (tx_pkt_size >= 1024){
1243                                 if(coal_conf->tx_coal_type !=  HIGH_COALESCE){
1244                                         coal_conf->tx_timeout = 4;
1245                                         coal_conf->tx_event_count = 8;
1246                                         amd8111e_set_coalesce(dev,TX_INTR_COAL);
1247                                         coal_conf->tx_coal_type = HIGH_COALESCE;
1248                                 }               
1249                         }
1250                 }
1251         }
1252         return 0;
1253
1254 }
1255 /*
1256 This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1257 */
1258 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1259 {
1260
1261         struct net_device * dev = (struct net_device *) dev_id;
1262         struct amd8111e_priv *lp = netdev_priv(dev);
1263         void * mmio = lp->mmio;
1264         unsigned int intr0;
1265         unsigned int handled = 1;
1266
1267         if(dev == NULL)
1268                 return IRQ_NONE;
1269
1270         if (regs) spin_lock (&lp->lock);
1271         /* disabling interrupt */
1272         writel(INTREN, mmio + CMD0);
1273
1274         /* Read interrupt status */
1275         intr0 = readl(mmio + INT0);
1276
1277         /* Process all the INT event until INTR bit is clear. */
1278
1279         if (!(intr0 & INTR)){
1280                 handled = 0;
1281                 goto err_no_interrupt;
1282         }
1283                  
1284         /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1285         writel(intr0, mmio + INT0);
1286
1287         /* Check if Receive Interrupt has occurred. */
1288 #if CONFIG_AMD8111E_NAPI
1289         if(intr0 & RINT0){
1290                 if(netif_rx_schedule_prep(dev)){
1291                         /* Disable receive interupts */
1292                         writel(RINTEN0, mmio + INTEN0);
1293                         /* Schedule a polling routine */
1294                         __netif_rx_schedule(dev);
1295                 }
1296                 else {
1297                         printk("************Driver bug! \
1298                                 interrupt while in poll\n");
1299                         /* Fix by disabling interrupts */
1300                         writel(RINT0, mmio + INT0);
1301                 }
1302         }
1303 #else
1304         if(intr0 & RINT0){
1305                 amd8111e_rx(dev);
1306                 writel(VAL2 | RDMD0, mmio + CMD0);
1307         }
1308 #endif /* CONFIG_AMD8111E_NAPI */
1309         /* Check if  Transmit Interrupt has occurred. */
1310         if(intr0 & TINT0)
1311                 amd8111e_tx(dev);
1312                 
1313         /* Check if  Link Change Interrupt has occurred. */
1314         if (intr0 & LCINT)
1315                 amd8111e_link_change(dev);
1316
1317         /* Check if Hardware Timer Interrupt has occurred. */
1318         if (intr0 & STINT)
1319                 amd8111e_calc_coalesce(dev);
1320
1321 err_no_interrupt:
1322         writel( VAL0 | INTREN,mmio + CMD0);
1323         
1324         if (regs) spin_unlock(&lp->lock);
1325         
1326         return IRQ_RETVAL(handled);
1327 }
1328
1329 #ifdef CONFIG_NET_POLL_CONTROLLER
1330 static void amd8111e_poll(struct net_device *dev)
1331
1332         unsigned long flags;
1333         local_save_flags(flags); 
1334         local_irq_disable();
1335         amd8111e_interrupt(0, dev, NULL);
1336         local_irq_restore(flags); 
1337
1338 #endif
1339
1340
1341 /*
1342 This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1343 */
1344 static int amd8111e_close(struct net_device * dev)
1345 {
1346         struct amd8111e_priv *lp = netdev_priv(dev);
1347         netif_stop_queue(dev);
1348         
1349         spin_lock_irq(&lp->lock);
1350         
1351         amd8111e_disable_interrupt(lp);
1352         amd8111e_stop_chip(lp);
1353         amd8111e_free_ring(lp);
1354         
1355         netif_carrier_off(lp->amd8111e_net_dev);
1356
1357         /* Delete ipg timer */
1358         if(lp->options & OPTION_DYN_IPG_ENABLE)         
1359                 del_timer_sync(&lp->ipg_data.ipg_timer);
1360
1361         spin_unlock_irq(&lp->lock);
1362         free_irq(dev->irq, dev);
1363         
1364         /* Update the statistics before closing */
1365         amd8111e_get_stats(dev);
1366         lp->opened = 0;
1367         return 0;
1368 }
1369 /* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device. 
1370 */
1371 static int amd8111e_open(struct net_device * dev )
1372 {
1373         struct amd8111e_priv *lp = netdev_priv(dev);
1374
1375         if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
1376                                          dev->name, dev)) 
1377                 return -EAGAIN;
1378
1379         spin_lock_irq(&lp->lock);
1380
1381         amd8111e_init_hw_default(lp);
1382
1383         if(amd8111e_restart(dev)){
1384                 spin_unlock_irq(&lp->lock);
1385                 return -ENOMEM;
1386         }
1387         /* Start ipg timer */
1388         if(lp->options & OPTION_DYN_IPG_ENABLE){                
1389                 add_timer(&lp->ipg_data.ipg_timer);
1390                 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1391         }
1392
1393         lp->opened = 1;
1394
1395         spin_unlock_irq(&lp->lock);
1396
1397         netif_start_queue(dev);
1398
1399         return 0;               
1400 }
1401 /* 
1402 This function checks if there is any transmit  descriptors available to queue more packet.
1403 */
1404 static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1405 {       
1406         int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1407         if(lp->tx_skbuff[tx_index] != 0)
1408                 return -1;
1409         else
1410                 return 0;
1411         
1412 }
1413 /* 
1414 This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1415 */
1416
1417 static int amd8111e_start_xmit(struct sk_buff *skb, struct net_device * dev)
1418 {
1419         struct amd8111e_priv *lp = netdev_priv(dev);
1420         int tx_index;
1421         unsigned long flags;
1422
1423         spin_lock_irqsave(&lp->lock, flags);
1424
1425         tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1426
1427         lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1428
1429         lp->tx_skbuff[tx_index] = skb;
1430         lp->tx_ring[tx_index].tx_flags = 0;
1431
1432 #if AMD8111E_VLAN_TAG_USED
1433         if((lp->vlgrp != NULL) && vlan_tx_tag_present(skb)){
1434                 lp->tx_ring[tx_index].tag_ctrl_cmd |= 
1435                                 cpu_to_le32(TCC_VLAN_INSERT);   
1436                 lp->tx_ring[tx_index].tag_ctrl_info = 
1437                                 cpu_to_le16(vlan_tx_tag_get(skb));
1438
1439         }
1440 #endif
1441         lp->tx_dma_addr[tx_index] =
1442             pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1443         lp->tx_ring[tx_index].buff_phy_addr =
1444             (u32) cpu_to_le32(lp->tx_dma_addr[tx_index]);
1445
1446         /*  Set FCS and LTINT bits */
1447         lp->tx_ring[tx_index].tx_flags |=
1448             cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1449
1450         lp->tx_idx++;
1451
1452         /* Trigger an immediate send poll. */
1453         writel( VAL1 | TDMD0, lp->mmio + CMD0);
1454         writel( VAL2 | RDMD0,lp->mmio + CMD0);
1455
1456         dev->trans_start = jiffies;
1457
1458         if(amd8111e_tx_queue_avail(lp) < 0){
1459                 netif_stop_queue(dev);
1460         }
1461         spin_unlock_irqrestore(&lp->lock, flags);
1462         return 0;
1463 }
1464 /*
1465 This function returns all the memory mapped registers of the device.
1466 */
1467 static char* amd8111e_read_regs(struct amd8111e_priv* lp)
1468 {       
1469         void * mmio = lp->mmio;
1470         u32 * reg_buff;
1471
1472         reg_buff = kmalloc( AMD8111E_REG_DUMP_LEN,GFP_KERNEL);
1473         if(NULL == reg_buff)
1474                 return NULL;
1475
1476         /* Read only necessary registers */
1477         reg_buff[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1478         reg_buff[1] = readl(mmio + XMT_RING_LEN0);
1479         reg_buff[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1480         reg_buff[3] = readl(mmio + RCV_RING_LEN0);
1481         reg_buff[4] = readl(mmio + CMD0);
1482         reg_buff[5] = readl(mmio + CMD2);
1483         reg_buff[6] = readl(mmio + CMD3);
1484         reg_buff[7] = readl(mmio + CMD7);
1485         reg_buff[8] = readl(mmio + INT0);
1486         reg_buff[9] = readl(mmio + INTEN0);
1487         reg_buff[10] = readl(mmio + LADRF);
1488         reg_buff[11] = readl(mmio + LADRF+4);
1489         reg_buff[12] = readl(mmio + STAT0);
1490
1491         return (char *)reg_buff;
1492 }
1493 /*
1494 amd8111e crc generator implementation is different from the kernel
1495 ether_crc() function.
1496 */
1497 int amd8111e_ether_crc(int len, char* mac_addr)
1498 {
1499         int i,byte;
1500         unsigned char octet;
1501         u32 crc= INITCRC;
1502
1503         for(byte=0; byte < len; byte++){
1504                 octet = mac_addr[byte];
1505                 for( i=0;i < 8; i++){
1506                         /*If the next bit form the input stream is 1,subtract                            the divisor (CRC32) from the dividend(crc).*/
1507                         if( (octet & 0x1) ^ (crc & 0x1) ){
1508                                 crc >>= 1;
1509                                 crc ^= CRC32;
1510                         }
1511                         else
1512                                 crc >>= 1;
1513                         
1514                         octet >>= 1;
1515                 }
1516         }       
1517         return crc; 
1518 }
1519 /*
1520 This function sets promiscuos mode, all-multi mode or the multicast address 
1521 list to the device.
1522 */
1523 static void amd8111e_set_multicast_list(struct net_device *dev)
1524 {
1525         struct dev_mc_list* mc_ptr;
1526         struct amd8111e_priv *lp = netdev_priv(dev);
1527         u32 mc_filter[2] ;
1528         int i,bit_num;
1529         if(dev->flags & IFF_PROMISC){
1530                 printk(KERN_INFO "%s: Setting  promiscuous mode.\n",dev->name);
1531                 writel( VAL2 | PROM, lp->mmio + CMD2);
1532                 return;
1533         }
1534         else
1535                 writel( PROM, lp->mmio + CMD2);
1536         if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
1537                 /* get all multicast packet */
1538                 mc_filter[1] = mc_filter[0] = 0xffffffff;
1539                 lp->mc_list = dev->mc_list;
1540                 lp->options |= OPTION_MULTICAST_ENABLE;
1541                 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1542                 return;
1543         }
1544         if( dev->mc_count == 0 ){
1545                 /* get only own packets */
1546                 mc_filter[1] = mc_filter[0] = 0;
1547                 lp->mc_list = 0;
1548                 lp->options &= ~OPTION_MULTICAST_ENABLE;
1549                 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1550                 /* disable promiscous mode */
1551                 writel(PROM, lp->mmio + CMD2);
1552                 return;
1553         }
1554         /* load all the multicast addresses in the logic filter */
1555         lp->options |= OPTION_MULTICAST_ENABLE;
1556         lp->mc_list = dev->mc_list;
1557         mc_filter[1] = mc_filter[0] = 0;
1558         for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
1559                      i++, mc_ptr = mc_ptr->next) {
1560                 bit_num = ( amd8111e_ether_crc(ETH_ALEN,mc_ptr->dmi_addr)                                                        >> 26 ) & 0x3f;
1561                 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1562         }       
1563         amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1564
1565         /* To eliminate PCI posting bug */
1566         readl(lp->mmio + CMD2);
1567
1568 }
1569
1570 /*
1571 This function handles all the  ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application. 
1572 */
1573         
1574 static int amd8111e_ethtool_ioctl(struct net_device* dev, void __user *useraddr)
1575 {
1576         struct amd8111e_priv *lp = netdev_priv(dev);
1577         struct pci_dev *pci_dev = lp->pci_dev;
1578         u32 ethcmd;
1579         
1580         if( useraddr == NULL) 
1581                 return -EINVAL;
1582         if(copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
1583                 return -EFAULT;
1584         
1585         switch(ethcmd){
1586         
1587         case ETHTOOL_GDRVINFO:{
1588                 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1589                 strcpy (info.driver, MODULE_NAME);
1590                 strcpy (info.version, MODULE_VERS);
1591                 memset(&info.fw_version, 0, sizeof(info.fw_version));
1592                 sprintf(info.fw_version,"%u",chip_version);
1593                 strcpy (info.bus_info, pci_name(pci_dev));
1594                 info.eedump_len = 0;
1595                 info.regdump_len = AMD8111E_REG_DUMP_LEN;
1596                 if (copy_to_user (useraddr, &info, sizeof(info)))
1597                         return -EFAULT;
1598                 return 0;
1599         }
1600         /* get settings */
1601         case ETHTOOL_GSET: {
1602                 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1603                 spin_lock_irq(&lp->lock);
1604                 mii_ethtool_gset(&lp->mii_if, &ecmd);
1605                 spin_unlock_irq(&lp->lock);
1606                 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1607                         return -EFAULT;
1608                 return 0;
1609         }
1610         /* set settings */
1611         case ETHTOOL_SSET: {
1612                 int r;
1613                 struct ethtool_cmd ecmd;
1614                 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1615                         return -EFAULT;
1616
1617                 spin_lock_irq(&lp->lock);
1618                 r = mii_ethtool_sset(&lp->mii_if, &ecmd);
1619                 spin_unlock_irq(&lp->lock);
1620                 return r;
1621         }
1622         case ETHTOOL_GREGS: {
1623                 struct ethtool_regs regs;
1624                 u8 *regbuf;
1625                 int ret;
1626
1627                 if (copy_from_user(&regs, useraddr, sizeof(regs)))
1628                         return -EFAULT;
1629                 if (regs.len > AMD8111E_REG_DUMP_LEN)
1630                         regs.len = AMD8111E_REG_DUMP_LEN;
1631                 regs.version = 0;
1632                 if (copy_to_user(useraddr, &regs, sizeof(regs)))
1633                         return -EFAULT;
1634
1635                 regbuf = amd8111e_read_regs(lp);
1636                 if (!regbuf)
1637                         return -ENOMEM;
1638
1639                 useraddr += offsetof(struct ethtool_regs, data);
1640                 ret = 0;
1641                 if (copy_to_user(useraddr, regbuf, regs.len))
1642                         ret = -EFAULT;
1643                 kfree(regbuf);
1644                 return ret;
1645         }
1646         /* restart autonegotiation */
1647         case ETHTOOL_NWAY_RST: {
1648                 return mii_nway_restart(&lp->mii_if);
1649         }
1650         /* get link status */
1651         case ETHTOOL_GLINK: {
1652                 struct ethtool_value val = {ETHTOOL_GLINK};
1653                 val.data = mii_link_ok(&lp->mii_if);
1654                 if (copy_to_user(useraddr, &val, sizeof(val)))
1655                         return -EFAULT;
1656                 return 0;
1657         }
1658         case ETHTOOL_GWOL: {
1659                 struct ethtool_wolinfo wol_info = { ETHTOOL_GWOL };
1660
1661                 wol_info.supported = WAKE_MAGIC|WAKE_PHY;
1662                 wol_info.wolopts = 0;
1663                 if (lp->options & OPTION_WOL_ENABLE)
1664                         wol_info.wolopts = WAKE_MAGIC;
1665                 memset(&wol_info.sopass, 0, sizeof(wol_info.sopass));
1666                 if (copy_to_user(useraddr, &wol_info, sizeof(wol_info)))
1667                         return -EFAULT;
1668                 return 0;
1669         }
1670         case ETHTOOL_SWOL: {
1671                 struct ethtool_wolinfo wol_info;
1672
1673                 if (copy_from_user(&wol_info, useraddr, sizeof(wol_info)))
1674                         return -EFAULT;
1675                 if (wol_info.wolopts & ~(WAKE_MAGIC |WAKE_PHY))
1676                         return -EINVAL;
1677                 spin_lock_irq(&lp->lock);
1678                 if(wol_info.wolopts & WAKE_MAGIC)
1679                         lp->options |= 
1680                                 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1681                 else if(wol_info.wolopts & WAKE_PHY)
1682                         lp->options |= 
1683                                 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1684                 else
1685                         lp->options &= ~OPTION_WOL_ENABLE; 
1686                 spin_unlock_irq(&lp->lock);
1687                 return 0;
1688         }
1689         
1690         default:
1691                 break;
1692         }
1693                 return -EOPNOTSUPP;
1694 }
1695 static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1696 {
1697         struct mii_ioctl_data *data = if_mii(ifr);
1698         struct amd8111e_priv *lp = netdev_priv(dev);
1699         int err;
1700         u32 mii_regval;
1701
1702         if (!capable(CAP_NET_ADMIN))
1703                 return -EPERM;
1704
1705         switch(cmd) {
1706         case SIOCETHTOOL:
1707                 return amd8111e_ethtool_ioctl(dev, ifr->ifr_data);
1708         case SIOCGMIIPHY:
1709                 data->phy_id = PHY_ID;
1710
1711         /* fallthru */
1712         case SIOCGMIIREG: 
1713
1714                 spin_lock_irq(&lp->lock);
1715                 err = amd8111e_read_phy(lp, data->phy_id,
1716                         data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1717                 spin_unlock_irq(&lp->lock);
1718
1719                 data->val_out = mii_regval;
1720                 return err;
1721
1722         case SIOCSMIIREG:
1723
1724                 spin_lock_irq(&lp->lock);
1725                 err = amd8111e_write_phy(lp, data->phy_id,
1726                         data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1727                 spin_unlock_irq(&lp->lock);
1728
1729                 return err;
1730
1731         default:
1732                 /* do nothing */
1733                 break;
1734         }
1735         return -EOPNOTSUPP;
1736 }
1737 static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1738 {
1739         struct amd8111e_priv *lp = dev->priv;
1740         int i;
1741         struct sockaddr *addr = p;
1742
1743         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1744         spin_lock_irq(&lp->lock);
1745         /* Setting the MAC address to the device */
1746         for(i = 0; i < ETH_ADDR_LEN; i++)
1747                 writeb( dev->dev_addr[i], lp->mmio + PADR + i ); 
1748                 
1749         spin_unlock_irq(&lp->lock);
1750
1751         return 0;
1752 }
1753
1754 /* 
1755 This function changes the mtu of the device. It restarts the device  to initialize the descriptor with new receive buffers.
1756 */  
1757 int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1758 {
1759         struct amd8111e_priv *lp = netdev_priv(dev);
1760         int err;
1761
1762         if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1763                 return -EINVAL;
1764
1765         if (!netif_running(dev)) {
1766                 /* new_mtu will be used
1767                    when device starts netxt time */ 
1768                 dev->mtu = new_mtu;
1769                 return 0;
1770         }
1771
1772         spin_lock_irq(&lp->lock);
1773
1774         /* stop the chip */
1775         writel(RUN, lp->mmio + CMD0);
1776
1777         dev->mtu = new_mtu;
1778
1779         err = amd8111e_restart(dev);
1780         spin_unlock_irq(&lp->lock);
1781         if(!err)
1782                 netif_start_queue(dev);
1783         return err;
1784 }
1785
1786 #if AMD8111E_VLAN_TAG_USED
1787 static void amd8111e_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1788 {
1789         struct  amd8111e_priv *lp = netdev_priv(dev);
1790         spin_lock_irq(&lp->lock);
1791         lp->vlgrp = grp;
1792         spin_unlock_irq(&lp->lock);
1793 }
1794         
1795 static void amd8111e_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1796 {
1797         struct amd8111e_priv *lp = netdev_priv(dev);
1798         spin_lock_irq(&lp->lock);
1799         if (lp->vlgrp)
1800                 lp->vlgrp->vlan_devices[vid] = NULL;
1801         spin_unlock_irq(&lp->lock);
1802 }
1803 #endif
1804 static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1805 {
1806         writel( VAL1|MPPLBA, lp->mmio + CMD3);
1807         writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1808
1809         /* To eliminate PCI posting bug */
1810         readl(lp->mmio + CMD7);
1811         return 0;
1812 }
1813
1814 static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1815 {
1816
1817         /* Adapter is already stoped/suspended/interrupt-disabled */
1818         writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1819         
1820         /* To eliminate PCI posting bug */
1821         readl(lp->mmio + CMD7);
1822         return 0;
1823 }       
1824 /* This function is called when a packet transmission fails to complete within a  resonable period, on the assumption that an interrupts have been failed or the  interface is locked up. This function will reinitialize the hardware */
1825
1826 static void amd8111e_tx_timeout(struct net_device *dev)
1827 {
1828         struct amd8111e_priv* lp = netdev_priv(dev);
1829         int err;
1830
1831         printk(KERN_ERR "%s: transmit timed out, resetting\n",
1832                                                       dev->name);
1833         spin_lock_irq(&lp->lock);
1834         err = amd8111e_restart(dev);
1835         spin_unlock_irq(&lp->lock);
1836         if(!err)
1837                 netif_wake_queue(dev);
1838 }
1839 static int amd8111e_suspend(struct pci_dev *pci_dev, u32 state)
1840 {       
1841         struct net_device *dev = pci_get_drvdata(pci_dev);
1842         struct amd8111e_priv *lp = netdev_priv(dev);
1843         
1844         if (!netif_running(dev))
1845                 return 0;
1846
1847         /* disable the interrupt */
1848         spin_lock_irq(&lp->lock);
1849         amd8111e_disable_interrupt(lp);
1850         spin_unlock_irq(&lp->lock);
1851
1852         netif_device_detach(dev);
1853         
1854         /* stop chip */
1855         spin_lock_irq(&lp->lock);
1856         if(lp->options & OPTION_DYN_IPG_ENABLE)         
1857                 del_timer_sync(&lp->ipg_data.ipg_timer);
1858         amd8111e_stop_chip(lp);
1859         spin_unlock_irq(&lp->lock);
1860
1861         if(lp->options & OPTION_WOL_ENABLE){
1862                  /* enable wol */
1863                 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1864                         amd8111e_enable_magicpkt(lp);   
1865                 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1866                         amd8111e_enable_link_change(lp);        
1867                 
1868                 pci_enable_wake(pci_dev, 3, 1);
1869                 pci_enable_wake(pci_dev, 4, 1); /* D3 cold */
1870
1871         }
1872         else{           
1873                 pci_enable_wake(pci_dev, 3, 0);
1874                 pci_enable_wake(pci_dev, 4, 0); /* 4 == D3 cold */
1875         }
1876         
1877         pci_save_state(pci_dev, lp->pm_state);
1878         pci_set_power_state(pci_dev, 3);
1879
1880         return 0;
1881 }
1882 static int amd8111e_resume(struct pci_dev *pci_dev)
1883 {
1884         struct net_device *dev = pci_get_drvdata(pci_dev);
1885         struct amd8111e_priv *lp = netdev_priv(dev);
1886         
1887         if (!netif_running(dev))
1888                 return 0;
1889
1890         pci_set_power_state(pci_dev, 0);
1891         pci_restore_state(pci_dev, lp->pm_state);
1892
1893         pci_enable_wake(pci_dev, 3, 0);
1894         pci_enable_wake(pci_dev, 4, 0); /* D3 cold */
1895
1896         netif_device_attach(dev);
1897
1898         spin_lock_irq(&lp->lock);
1899         amd8111e_restart(dev);
1900         /* Restart ipg timer */
1901         if(lp->options & OPTION_DYN_IPG_ENABLE)         
1902                 mod_timer(&lp->ipg_data.ipg_timer, 
1903                                 jiffies + IPG_CONVERGE_JIFFIES);
1904         spin_unlock_irq(&lp->lock);
1905
1906         return 0;
1907 }
1908
1909
1910 static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1911 {
1912         struct net_device *dev = pci_get_drvdata(pdev);
1913         if (dev) {
1914                 unregister_netdev(dev);
1915                 iounmap((void *) ((struct amd8111e_priv *)(dev->priv))->mmio);
1916                 free_netdev(dev);
1917                 pci_release_regions(pdev);
1918                 pci_disable_device(pdev);
1919                 pci_set_drvdata(pdev, NULL);
1920         }
1921 }
1922 static void amd8111e_config_ipg(struct net_device* dev)
1923 {
1924         struct amd8111e_priv *lp = netdev_priv(dev);
1925         struct ipg_info* ipg_data = &lp->ipg_data;
1926         void * mmio = lp->mmio;
1927         unsigned int prev_col_cnt = ipg_data->col_cnt;
1928         unsigned int total_col_cnt;
1929         unsigned int tmp_ipg;
1930         
1931         if(lp->link_config.duplex == DUPLEX_FULL){
1932                 ipg_data->ipg = DEFAULT_IPG;
1933                 return;
1934         }
1935
1936         if(ipg_data->ipg_state == SSTATE){
1937                 
1938                 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1939                         
1940                         ipg_data->timer_tick = 0;
1941                         ipg_data->ipg = MIN_IPG - IPG_STEP;
1942                         ipg_data->current_ipg = MIN_IPG;
1943                         ipg_data->diff_col_cnt = 0xFFFFFFFF;
1944                         ipg_data->ipg_state = CSTATE;
1945                 }
1946                 else
1947                         ipg_data->timer_tick++;
1948         }
1949
1950         if(ipg_data->ipg_state == CSTATE){
1951                 
1952                 /* Get the current collision count */
1953
1954                 total_col_cnt = ipg_data->col_cnt = 
1955                                 amd8111e_read_mib(mmio, xmt_collisions);
1956
1957                 if ((total_col_cnt - prev_col_cnt) < 
1958                                 (ipg_data->diff_col_cnt)){
1959                         
1960                         ipg_data->diff_col_cnt =
1961                                 total_col_cnt - prev_col_cnt ;
1962
1963                         ipg_data->ipg = ipg_data->current_ipg;
1964                 }
1965
1966                 ipg_data->current_ipg += IPG_STEP;
1967
1968                 if (ipg_data->current_ipg <= MAX_IPG)
1969                         tmp_ipg = ipg_data->current_ipg;
1970                 else{
1971                         tmp_ipg = ipg_data->ipg;
1972                         ipg_data->ipg_state = SSTATE;
1973                 }
1974                 writew((u32)tmp_ipg, mmio + IPG); 
1975                 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1); 
1976         }
1977          mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1978         return;
1979
1980 }
1981
1982 static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1983                                   const struct pci_device_id *ent)
1984 {
1985         int err,i,pm_cap;
1986         unsigned long reg_addr,reg_len;
1987         struct amd8111e_priv* lp;
1988         struct net_device* dev;
1989
1990         err = pci_enable_device(pdev);
1991         if(err){
1992                 printk(KERN_ERR "amd8111e: Cannot enable new PCI device,"
1993                         "exiting.\n");
1994                 return err;
1995         }
1996
1997         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1998                 printk(KERN_ERR "amd8111e: Cannot find PCI base address"
1999                        "exiting.\n");
2000                 err = -ENODEV;
2001                 goto err_disable_pdev;
2002         }
2003
2004         err = pci_request_regions(pdev, MODULE_NAME);
2005         if(err){
2006                 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
2007                        "exiting.\n");
2008                 goto err_disable_pdev;
2009         }
2010
2011         pci_set_master(pdev);
2012
2013         /* Find power-management capability. */
2014         if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
2015                 printk(KERN_ERR "amd8111e: No Power Management capability, "
2016                        "exiting.\n");
2017                 goto err_free_reg;
2018         }
2019
2020         /* Initialize DMA */
2021         if(!pci_dma_supported(pdev, 0xffffffff)){
2022                 printk(KERN_ERR "amd8111e: DMA not supported,"
2023                         "exiting.\n");
2024                 goto  err_free_reg;
2025         } else
2026                 pdev->dma_mask = 0xffffffff;
2027         
2028         reg_addr = pci_resource_start(pdev, 0);
2029         reg_len = pci_resource_len(pdev, 0);
2030
2031         dev = alloc_etherdev(sizeof(struct amd8111e_priv));
2032         if (!dev) {
2033                 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
2034                 err = -ENOMEM;
2035                 goto err_free_reg;
2036         }
2037
2038         SET_MODULE_OWNER(dev);
2039         SET_NETDEV_DEV(dev, &pdev->dev);
2040
2041 #if AMD8111E_VLAN_TAG_USED
2042         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
2043         dev->vlan_rx_register =amd8111e_vlan_rx_register;
2044         dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
2045 #endif  
2046         
2047         lp = netdev_priv(dev);
2048         lp->pci_dev = pdev;
2049         lp->amd8111e_net_dev = dev;
2050         lp->pm_cap = pm_cap;
2051
2052         /* setting mii default values */
2053         lp->mii_if.dev = dev;
2054         lp->mii_if.mdio_read = amd8111e_mdio_read;
2055         lp->mii_if.mdio_write = amd8111e_mdio_write;
2056         lp->mii_if.phy_id = PHY_ID;
2057
2058         spin_lock_init(&lp->lock);
2059
2060         lp->mmio = ioremap(reg_addr, reg_len);
2061         if (lp->mmio == 0) {
2062                 printk(KERN_ERR "amd8111e: Cannot map device registers, "
2063                        "exiting\n");
2064                 err = -ENOMEM;
2065                 goto err_free_dev;
2066         }
2067         
2068         /* Initializing MAC address */
2069         for(i = 0; i < ETH_ADDR_LEN; i++)
2070                         dev->dev_addr[i] =readb(lp->mmio + PADR + i);
2071         
2072         /* Setting user defined parametrs */
2073         lp->ext_phy_option = speed_duplex[card_idx];
2074         if(coalesce[card_idx])
2075                 lp->options |= OPTION_INTR_COAL_ENABLE;         
2076         if(dynamic_ipg[card_idx++])
2077                 lp->options |= OPTION_DYN_IPG_ENABLE;                   
2078
2079         /* Initialize driver entry points */
2080         dev->open = amd8111e_open;
2081         dev->hard_start_xmit = amd8111e_start_xmit;
2082         dev->stop = amd8111e_close;
2083         dev->get_stats = amd8111e_get_stats;
2084         dev->set_multicast_list = amd8111e_set_multicast_list;
2085         dev->set_mac_address = amd8111e_set_mac_address;
2086         dev->do_ioctl = amd8111e_ioctl;
2087         dev->change_mtu = amd8111e_change_mtu;
2088         dev->irq =pdev->irq;
2089         dev->tx_timeout = amd8111e_tx_timeout; 
2090         dev->watchdog_timeo = AMD8111E_TX_TIMEOUT; 
2091 #ifdef CONFIG_AMD8111E_NAPI
2092         dev->poll = amd8111e_rx_poll;
2093         dev->weight = 32;
2094 #endif
2095 #ifdef CONFIG_NET_POLL_CONTROLLER
2096         dev->poll_controller = amd8111e_poll; 
2097 #endif
2098
2099 #if AMD8111E_VLAN_TAG_USED
2100         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2101         dev->vlan_rx_register =amd8111e_vlan_rx_register;
2102         dev->vlan_rx_kill_vid = amd8111e_vlan_rx_kill_vid;
2103 #endif  
2104         
2105         /* Set receive buffer length and set jumbo option*/
2106         amd8111e_set_rx_buff_len(dev);
2107
2108
2109         err = register_netdev(dev);
2110         if (err) {
2111                 printk(KERN_ERR "amd8111e: Cannot register net device, "
2112                        "exiting.\n");
2113                 goto err_iounmap;
2114         }
2115
2116         pci_set_drvdata(pdev, dev);
2117         
2118         /* Initialize software ipg timer */
2119         if(lp->options & OPTION_DYN_IPG_ENABLE){                
2120                 init_timer(&lp->ipg_data.ipg_timer);
2121                 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
2122                 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
2123                 lp->ipg_data.ipg_timer.expires = jiffies + 
2124                                                  IPG_CONVERGE_JIFFIES;
2125                 lp->ipg_data.ipg = DEFAULT_IPG;
2126                 lp->ipg_data.ipg_state = CSTATE;
2127         };
2128
2129         /*  display driver and device information */
2130
2131         chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
2132         printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",                                                           dev->name,MODULE_VERS);
2133         printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet ",                                                    dev->name, chip_version);
2134         for (i = 0; i < 6; i++)
2135                 printk("%2.2x%c",dev->dev_addr[i],i == 5 ? ' ' : ':');
2136         printk( "\n");  
2137         return 0;
2138 err_iounmap:
2139         iounmap((void *) lp->mmio);
2140
2141 err_free_dev:
2142         free_netdev(dev);
2143
2144 err_free_reg:
2145         pci_release_regions(pdev);
2146
2147 err_disable_pdev:
2148         pci_disable_device(pdev);
2149         pci_set_drvdata(pdev, NULL);
2150         return err;
2151
2152 }
2153
2154 static struct pci_driver amd8111e_driver = {
2155         .name           = MODULE_NAME,
2156         .id_table       = amd8111e_pci_tbl,
2157         .probe          = amd8111e_probe_one,
2158         .remove         = __devexit_p(amd8111e_remove_one),
2159         .suspend        = amd8111e_suspend,
2160         .resume         = amd8111e_resume
2161 };
2162
2163 static int __init amd8111e_init(void)
2164 {
2165         return pci_module_init(&amd8111e_driver);
2166 }
2167
2168 static void __exit amd8111e_cleanup(void)
2169 {
2170         pci_unregister_driver(&amd8111e_driver);
2171 }
2172
2173 module_init(amd8111e_init);
2174 module_exit(amd8111e_cleanup);