back ported version 1.4.43f bnx2 driver
[linux-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #include <linux/config.h>
13
14 #include <linux/version.h>
15 #if (LINUX_VERSION_CODE < 0x020500)
16 #if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
17 #define MODVERSIONS
18 #include <linux/modversions.h>
19 #endif
20 #endif
21
22 #include <linux/module.h>
23 #if (LINUX_VERSION_CODE >= 0x020600)
24 #include <linux/moduleparam.h>
25 #endif
26
27 #include <linux/kernel.h>
28 #include <linux/timer.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/interrupt.h>
34 #include <linux/pci.h>
35 #include <linux/init.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #if (LINUX_VERSION_CODE >= 0x020600)
40 #include <linux/dma-mapping.h>
41 #endif
42 #include <asm/bitops.h>
43 #include <asm/io.h>
44 #include <asm/irq.h>
45 #include <linux/delay.h>
46 #include <asm/byteorder.h>
47 #include <asm/page.h>
48 #include <linux/time.h>
49 #include <linux/ethtool.h>
50 #include <linux/mii.h>
51 #ifdef NETIF_F_HW_VLAN_TX
52 #include <linux/if_vlan.h>
53 #define BCM_VLAN 1
54 #endif
55 #ifdef NETIF_F_TSO
56 #include <net/ip.h>
57 #include <net/tcp.h>
58 #include <net/checksum.h>
59 #define BCM_TSO 1
60 #ifndef NETIF_F_GSO
61 #define gso_size tso_size
62 #endif
63 #endif
64 #if (LINUX_VERSION_CODE >= 0x020600)
65 #include <linux/workqueue.h>
66 #endif
67 #ifndef BNX2_BOOT_DISK
68 #include <linux/crc32.h>
69 #endif
70 #include <linux/prefetch.h>
71 #include <linux/cache.h>
72 #include <linux/zlib.h>
73
74 #include "bnx2.h"
75 #include "bnx2_fw.h"
76
77 #define DRV_MODULE_NAME         "bnx2"
78 #define PFX DRV_MODULE_NAME     ": "
79 #define DRV_MODULE_VERSION      "1.4.43f"
80 #define DRV_MODULE_RELDATE      "June 26, 2006"
81
82 #define RUN_AT(x) (jiffies + (x))
83
84 /* Time in jiffies before concluding the transmitter is hung. */
85 #define TX_TIMEOUT  (5*HZ)
86
87 static const char version[] __devinitdata =
88         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89
90 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
91 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
94
95 #if (LINUX_VERSION_CODE >= 0x20600)
96 static int disable_msi = 0;
97
98 module_param(disable_msi, int, 0);
99 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
100 #endif
101
102 typedef enum {
103         BCM5706 = 0,
104         NC370T,
105         NC370I,
106         BCM5706S,
107         NC370F,
108         BCM5708,
109         BCM5708S,
110 } board_t;
111
112 /* indexed by board_t, above */
113 static const struct {
114         char *name;
115 } board_info[] __devinitdata = {
116         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
117         { "HP NC370T Multifunction Gigabit Server Adapter" },
118         { "HP NC370i Multifunction Gigabit Server Adapter" },
119         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
120         { "HP NC370F Multifunction Gigabit Server Adapter" },
121         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
122         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
123         };
124
125 static struct pci_device_id bnx2_pci_tbl[] = {
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
133           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
140         { 0, }
141 };
142
143 static struct flash_spec flash_table[] =
144 {
145         /* Slow EEPROM */
146         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
147          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
148          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
149          "EEPROM - slow"},
150         /* Expansion entry 0001 */
151         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
152          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
154          "Entry 0001"},
155         /* Saifun SA25F010 (non-buffered flash) */
156         /* strap, cfg1, & write1 need updates */
157         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
158          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
159          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
160          "Non-buffered flash (128kB)"},
161         /* Saifun SA25F020 (non-buffered flash) */
162         /* strap, cfg1, & write1 need updates */
163         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
164          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
166          "Non-buffered flash (256kB)"},
167         /* Expansion entry 0100 */
168         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
169          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
171          "Entry 0100"},
172         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
173         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,        
174          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
175          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
176          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
177         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
178         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
179          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
180          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
181          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
182         /* Saifun SA25F005 (non-buffered flash) */
183         /* strap, cfg1, & write1 need updates */
184         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
185          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
187          "Non-buffered flash (64kB)"},
188         /* Fast EEPROM */
189         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
190          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
191          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
192          "EEPROM - fast"},
193         /* Expansion entry 1001 */
194         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
195          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197          "Entry 1001"},
198         /* Expansion entry 1010 */
199         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
200          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202          "Entry 1010"},
203         /* ATMEL AT45DB011B (buffered flash) */
204         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
205          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
207          "Buffered flash (128kB)"},
208         /* Expansion entry 1100 */
209         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
210          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
211          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
212          "Entry 1100"},
213         /* Expansion entry 1101 */
214         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
215          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
217          "Entry 1101"},
218         /* Ateml Expansion entry 1110 */
219         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
220          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
221          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
222          "Entry 1110 (Atmel)"},
223         /* ATMEL AT45DB021B (buffered flash) */
224         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
225          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
226          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
227          "Buffered flash (256kB)"},
228 };
229
230 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
231
232 #ifdef BNX2_BOOT_DISK
233 u32 ether_crc_le(size_t len, unsigned char const *p)
234 {
235         u32 crc = ~0;
236         int i;
237 #define CRCPOLY_LE 0xedb88320
238
239         while (len--) {
240                 crc ^= *p++;
241                 for (i = 0; i < 8; i++)
242                         crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
243         }
244         return crc;
245 }
246 #endif
247
248 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
249 {
250         u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
251
252         if (diff > MAX_TX_DESC_CNT)
253                 diff = (diff & MAX_TX_DESC_CNT) - 1;
254         return (bp->tx_ring_size - diff);
255 }
256
257 static u32
258 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
259 {
260         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
261         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
262 }
263
264 static void
265 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
266 {
267         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
269 }
270
271 static void
272 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
273 {
274         offset += cid_addr;
275         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
276         REG_WR(bp, BNX2_CTX_DATA, val);
277 }
278
279 static int
280 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
281 {
282         u32 val1;
283         int i, ret;
284
285         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
286                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
287                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
288
289                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
290                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
291
292                 udelay(40);
293         }
294
295         val1 = (bp->phy_addr << 21) | (reg << 16) |
296                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
297                 BNX2_EMAC_MDIO_COMM_START_BUSY;
298         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
299
300         for (i = 0; i < 50; i++) {
301                 udelay(10);
302
303                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
304                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
305                         udelay(5);
306
307                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
308                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
309
310                         break;
311                 }
312         }
313
314         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
315                 *val = 0x0;
316                 ret = -EBUSY;
317         }
318         else {
319                 *val = val1;
320                 ret = 0;
321         }
322
323         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
324                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
326
327                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
328                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
329
330                 udelay(40);
331         }
332
333         return ret;
334 }
335
336 static int
337 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
338 {
339         u32 val1;
340         int i, ret;
341
342         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
343                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
344                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
345
346                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
347                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
348
349                 udelay(40);
350         }
351
352         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
353                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
354                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
355         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
356     
357         for (i = 0; i < 50; i++) {
358                 udelay(10);
359
360                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
361                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
362                         udelay(5);
363                         break;
364                 }
365         }
366
367         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
368                 ret = -EBUSY;
369         else
370                 ret = 0;
371
372         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
373                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
375
376                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378
379                 udelay(40);
380         }
381
382         return ret;
383 }
384
385 static void
386 bnx2_disable_int(struct bnx2 *bp)
387 {
388         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
390         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
391 }
392
393 static void
394 bnx2_enable_int(struct bnx2 *bp)
395 {
396         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
397                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
398                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
399
400         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
402
403         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
404 }
405
406 static void
407 bnx2_disable_int_sync(struct bnx2 *bp)
408 {
409         atomic_inc(&bp->intr_sem);
410         bnx2_disable_int(bp);
411 #if (LINUX_VERSION_CODE >= 0x2051c)
412         synchronize_irq(bp->pdev->irq);
413 #else
414         synchronize_irq();
415 #endif
416 }
417
418 static void
419 bnx2_netif_stop(struct bnx2 *bp)
420 {
421         bnx2_disable_int_sync(bp);
422         if (netif_running(bp->dev)) {
423                 netif_poll_disable(bp->dev);
424                 netif_tx_disable(bp->dev);
425                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
426         }
427 }
428
429 static void
430 bnx2_netif_start(struct bnx2 *bp)
431 {
432         if (atomic_dec_and_test(&bp->intr_sem)) {
433                 if (netif_running(bp->dev)) {
434                         netif_wake_queue(bp->dev);
435                         netif_poll_enable(bp->dev);
436                         bnx2_enable_int(bp);
437                 }
438         }
439 }
440
441 static void
442 bnx2_free_mem(struct bnx2 *bp)
443 {
444         int i;
445
446         if (bp->status_blk) {
447                 pci_free_consistent(bp->pdev, bp->status_stats_size,
448                                     bp->status_blk, bp->status_blk_mapping);
449                 bp->status_blk = NULL;
450                 bp->stats_blk = NULL;
451         }
452         if (bp->tx_desc_ring) {
453                 pci_free_consistent(bp->pdev,
454                                     sizeof(struct tx_bd) * TX_DESC_CNT,
455                                     bp->tx_desc_ring, bp->tx_desc_mapping);
456                 bp->tx_desc_ring = NULL;
457         }
458         kfree(bp->tx_buf_ring);
459         bp->tx_buf_ring = NULL;
460         for (i = 0; i < bp->rx_max_ring; i++) {
461                 if (bp->rx_desc_ring[i])
462                         pci_free_consistent(bp->pdev,
463                                             sizeof(struct rx_bd) * RX_DESC_CNT,
464                                             bp->rx_desc_ring[i],
465                                             bp->rx_desc_mapping[i]);
466                 bp->rx_desc_ring[i] = NULL;
467         }
468         vfree(bp->rx_buf_ring);
469         bp->rx_buf_ring = NULL;
470 }
471
472 static int
473 bnx2_alloc_mem(struct bnx2 *bp)
474 {
475         int i, status_blk_size;
476
477         bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
478                                   GFP_KERNEL);
479         if (bp->tx_buf_ring == NULL)
480                 return -ENOMEM;
481
482         memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
483         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
484                                                 sizeof(struct tx_bd) *
485                                                 TX_DESC_CNT,
486                                                 &bp->tx_desc_mapping);
487         if (bp->tx_desc_ring == NULL)
488                 goto alloc_mem_err;
489
490         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
491                                   bp->rx_max_ring);
492         if (bp->rx_buf_ring == NULL)
493                 goto alloc_mem_err;
494
495         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
496                                    bp->rx_max_ring);
497
498         for (i = 0; i < bp->rx_max_ring; i++) {
499                 bp->rx_desc_ring[i] =
500                         pci_alloc_consistent(bp->pdev,
501                                              sizeof(struct rx_bd) * RX_DESC_CNT,
502                                              &bp->rx_desc_mapping[i]);
503                 if (bp->rx_desc_ring[i] == NULL)
504                         goto alloc_mem_err;
505
506         }
507
508         /* Combine status and statistics blocks into one allocation. */
509         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
510         bp->status_stats_size = status_blk_size +
511                                 sizeof(struct statistics_block);
512
513         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
514                                               &bp->status_blk_mapping);
515         if (bp->status_blk == NULL)
516                 goto alloc_mem_err;
517
518         memset(bp->status_blk, 0, bp->status_stats_size);
519
520         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
521                                   status_blk_size);
522
523         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
524
525         return 0;
526
527 alloc_mem_err:
528         bnx2_free_mem(bp);
529         return -ENOMEM;
530 }
531
532 static void
533 bnx2_report_fw_link(struct bnx2 *bp)
534 {
535         u32 fw_link_status = 0;
536
537         if (bp->link_up) {
538                 u32 bmsr;
539
540                 switch (bp->line_speed) {
541                 case SPEED_10:
542                         if (bp->duplex == DUPLEX_HALF)
543                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
544                         else
545                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
546                         break;
547                 case SPEED_100:
548                         if (bp->duplex == DUPLEX_HALF)
549                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
550                         else
551                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
552                         break;
553                 case SPEED_1000:
554                         if (bp->duplex == DUPLEX_HALF)
555                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
556                         else
557                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
558                         break;
559                 case SPEED_2500:
560                         if (bp->duplex == DUPLEX_HALF)
561                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
562                         else
563                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
564                         break;
565                 }
566
567                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
568
569                 if (bp->autoneg) {
570                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
571
572                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
573                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
574
575                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
576                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
577                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
578                         else
579                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
580                 }
581         }
582         else
583                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
584
585         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
586 }
587
588 static void
589 bnx2_report_link(struct bnx2 *bp)
590 {
591         if (bp->link_up) {
592                 netif_carrier_on(bp->dev);
593                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
594
595                 printk("%d Mbps ", bp->line_speed);
596
597                 if (bp->duplex == DUPLEX_FULL)
598                         printk("full duplex");
599                 else
600                         printk("half duplex");
601
602                 if (bp->flow_ctrl) {
603                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
604                                 printk(", receive ");
605                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
606                                         printk("& transmit ");
607                         }
608                         else {
609                                 printk(", transmit ");
610                         }
611                         printk("flow control ON");
612                 }
613                 printk("\n");
614         }
615         else {
616                 netif_carrier_off(bp->dev);
617                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
618         }
619
620         bnx2_report_fw_link(bp);
621 }
622
623 static void
624 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
625 {
626         u32 local_adv, remote_adv;
627
628         bp->flow_ctrl = 0;
629         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) != 
630                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
631
632                 if (bp->duplex == DUPLEX_FULL) {
633                         bp->flow_ctrl = bp->req_flow_ctrl;
634                 }
635                 return;
636         }
637
638         if (bp->duplex != DUPLEX_FULL) {
639                 return;
640         }
641
642         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
643             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
644                 u32 val;
645
646                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
647                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
648                         bp->flow_ctrl |= FLOW_CTRL_TX;
649                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
650                         bp->flow_ctrl |= FLOW_CTRL_RX;
651                 return;
652         }
653
654         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
655         bnx2_read_phy(bp, MII_LPA, &remote_adv);
656
657         if (bp->phy_flags & PHY_SERDES_FLAG) {
658                 u32 new_local_adv = 0;
659                 u32 new_remote_adv = 0;
660
661                 if (local_adv & ADVERTISE_1000XPAUSE)
662                         new_local_adv |= ADVERTISE_PAUSE_CAP;
663                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
664                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
665                 if (remote_adv & ADVERTISE_1000XPAUSE)
666                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
667                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
668                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
669
670                 local_adv = new_local_adv;
671                 remote_adv = new_remote_adv;
672         }
673
674         /* See Table 28B-3 of 802.3ab-1999 spec. */
675         if (local_adv & ADVERTISE_PAUSE_CAP) {
676                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
677                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
678                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
679                         }
680                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
681                                 bp->flow_ctrl = FLOW_CTRL_RX;
682                         }
683                 }
684                 else {
685                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
686                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
687                         }
688                 }
689         }
690         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
691                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
692                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
693
694                         bp->flow_ctrl = FLOW_CTRL_TX;
695                 }
696         }
697 }
698
699 static int
700 bnx2_5708s_linkup(struct bnx2 *bp)
701 {
702         u32 val;
703
704         bp->link_up = 1;
705         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
706         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
707                 case BCM5708S_1000X_STAT1_SPEED_10:
708                         bp->line_speed = SPEED_10;
709                         break;
710                 case BCM5708S_1000X_STAT1_SPEED_100:
711                         bp->line_speed = SPEED_100;
712                         break;
713                 case BCM5708S_1000X_STAT1_SPEED_1G:
714                         bp->line_speed = SPEED_1000;
715                         break;
716                 case BCM5708S_1000X_STAT1_SPEED_2G5:
717                         bp->line_speed = SPEED_2500;
718                         break;
719         }
720         if (val & BCM5708S_1000X_STAT1_FD)
721                 bp->duplex = DUPLEX_FULL;
722         else
723                 bp->duplex = DUPLEX_HALF;
724
725         return 0;
726 }
727
728 static int
729 bnx2_5706s_linkup(struct bnx2 *bp)
730 {
731         u32 bmcr, local_adv, remote_adv, common;
732
733         bp->link_up = 1;
734         bp->line_speed = SPEED_1000;
735
736         bnx2_read_phy(bp, MII_BMCR, &bmcr);
737         if (bmcr & BMCR_FULLDPLX) {
738                 bp->duplex = DUPLEX_FULL;
739         }
740         else {
741                 bp->duplex = DUPLEX_HALF;
742         }
743
744         if (!(bmcr & BMCR_ANENABLE)) {
745                 return 0;
746         }
747
748         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
749         bnx2_read_phy(bp, MII_LPA, &remote_adv);
750
751         common = local_adv & remote_adv;
752         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
753
754                 if (common & ADVERTISE_1000XFULL) {
755                         bp->duplex = DUPLEX_FULL;
756                 }
757                 else {
758                         bp->duplex = DUPLEX_HALF;
759                 }
760         }
761
762         return 0;
763 }
764
765 static int
766 bnx2_copper_linkup(struct bnx2 *bp)
767 {
768         u32 bmcr;
769
770         bnx2_read_phy(bp, MII_BMCR, &bmcr);
771         if (bmcr & BMCR_ANENABLE) {
772                 u32 local_adv, remote_adv, common;
773
774                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
775                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
776
777                 common = local_adv & (remote_adv >> 2);
778                 if (common & ADVERTISE_1000FULL) {
779                         bp->line_speed = SPEED_1000;
780                         bp->duplex = DUPLEX_FULL;
781                 }
782                 else if (common & ADVERTISE_1000HALF) {
783                         bp->line_speed = SPEED_1000;
784                         bp->duplex = DUPLEX_HALF;
785                 }
786                 else {
787                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
788                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
789
790                         common = local_adv & remote_adv;
791                         if (common & ADVERTISE_100FULL) {
792                                 bp->line_speed = SPEED_100;
793                                 bp->duplex = DUPLEX_FULL;
794                         }
795                         else if (common & ADVERTISE_100HALF) {
796                                 bp->line_speed = SPEED_100;
797                                 bp->duplex = DUPLEX_HALF;
798                         }
799                         else if (common & ADVERTISE_10FULL) {
800                                 bp->line_speed = SPEED_10;
801                                 bp->duplex = DUPLEX_FULL;
802                         }
803                         else if (common & ADVERTISE_10HALF) {
804                                 bp->line_speed = SPEED_10;
805                                 bp->duplex = DUPLEX_HALF;
806                         }
807                         else {
808                                 bp->line_speed = 0;
809                                 bp->link_up = 0;
810                         }
811                 }
812         }
813         else {
814                 if (bmcr & BMCR_SPEED100) {
815                         bp->line_speed = SPEED_100;
816                 }
817                 else {
818                         bp->line_speed = SPEED_10;
819                 }
820                 if (bmcr & BMCR_FULLDPLX) {
821                         bp->duplex = DUPLEX_FULL;
822                 }
823                 else {
824                         bp->duplex = DUPLEX_HALF;
825                 }
826         }
827
828         return 0;
829 }
830
831 static int
832 bnx2_set_mac_link(struct bnx2 *bp)
833 {
834         u32 val;
835
836         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
837         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
838                 (bp->duplex == DUPLEX_HALF)) {
839                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
840         }
841
842         /* Configure the EMAC mode register. */
843         val = REG_RD(bp, BNX2_EMAC_MODE);
844
845         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
846                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
847                 BNX2_EMAC_MODE_25G);
848
849         if (bp->link_up) {
850                 switch (bp->line_speed) {
851                         case SPEED_10:
852                                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
853                                         val |= BNX2_EMAC_MODE_PORT_MII_10;
854                                         break;
855                                 }
856                                 /* fall through */
857                         case SPEED_100:
858                                 val |= BNX2_EMAC_MODE_PORT_MII;
859                                 break;
860                         case SPEED_2500:
861                                 val |= BNX2_EMAC_MODE_25G;
862                                 /* fall through */
863                         case SPEED_1000:
864                                 val |= BNX2_EMAC_MODE_PORT_GMII;
865                                 break;
866                 }
867         }
868         else {
869                 val |= BNX2_EMAC_MODE_PORT_GMII;
870         }
871
872         /* Set the MAC to operate in the appropriate duplex mode. */
873         if (bp->duplex == DUPLEX_HALF)
874                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
875         REG_WR(bp, BNX2_EMAC_MODE, val);
876
877         /* Enable/disable rx PAUSE. */
878         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
879
880         if (bp->flow_ctrl & FLOW_CTRL_RX)
881                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
882         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
883
884         /* Enable/disable tx PAUSE. */
885         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
886         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
887
888         if (bp->flow_ctrl & FLOW_CTRL_TX)
889                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
890         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
891
892         /* Acknowledge the interrupt. */
893         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
894
895         return 0;
896 }
897
898 static int
899 bnx2_set_link(struct bnx2 *bp)
900 {
901         u32 bmsr;
902         u8 link_up;
903
904         if (bp->loopback == MAC_LOOPBACK) {
905                 bp->link_up = 1;
906                 return 0;
907         }
908
909         link_up = bp->link_up;
910
911         bnx2_read_phy(bp, MII_BMSR, &bmsr);
912         bnx2_read_phy(bp, MII_BMSR, &bmsr);
913
914         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
915             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
916                 u32 val;
917
918                 val = REG_RD(bp, BNX2_EMAC_STATUS);
919                 if (val & BNX2_EMAC_STATUS_LINK)
920                         bmsr |= BMSR_LSTATUS;
921                 else
922                         bmsr &= ~BMSR_LSTATUS;
923         }
924
925         if (bmsr & BMSR_LSTATUS) {
926                 bp->link_up = 1;
927
928                 if (bp->phy_flags & PHY_SERDES_FLAG) {
929                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
930                                 bnx2_5706s_linkup(bp);
931                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
932                                 bnx2_5708s_linkup(bp);
933                 }
934                 else {
935                         bnx2_copper_linkup(bp);
936                 }
937                 bnx2_resolve_flow_ctrl(bp);
938         }
939         else {
940                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
941                         (bp->autoneg & AUTONEG_SPEED)) {
942
943                         u32 bmcr;
944
945                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
946                         if (!(bmcr & BMCR_ANENABLE)) {
947                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
948                                         BMCR_ANENABLE);
949                         }
950                 }
951                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
952                 bp->link_up = 0;
953         }
954
955         if (bp->link_up != link_up) {
956                 bnx2_report_link(bp);
957         }
958
959         bnx2_set_mac_link(bp);
960
961         return 0;
962 }
963
964 static int
965 bnx2_reset_phy(struct bnx2 *bp)
966 {
967         int i;
968         u32 reg;
969
970         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
971
972 #define PHY_RESET_MAX_WAIT 100
973         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
974                 udelay(10);
975
976                 bnx2_read_phy(bp, MII_BMCR, &reg);
977                 if (!(reg & BMCR_RESET)) {
978                         udelay(20);
979                         break;
980                 }
981         }
982         if (i == PHY_RESET_MAX_WAIT) {
983                 return -EBUSY;
984         }
985         return 0;
986 }
987
988 static u32
989 bnx2_phy_get_pause_adv(struct bnx2 *bp)
990 {
991         u32 adv = 0;
992
993         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
994                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
995
996                 if (bp->phy_flags & PHY_SERDES_FLAG) {
997                         adv = ADVERTISE_1000XPAUSE;
998                 }
999                 else {
1000                         adv = ADVERTISE_PAUSE_CAP;
1001                 }
1002         }
1003         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1004                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1005                         adv = ADVERTISE_1000XPSE_ASYM;
1006                 }
1007                 else {
1008                         adv = ADVERTISE_PAUSE_ASYM;
1009                 }
1010         }
1011         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1012                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1013                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1014                 }
1015                 else {
1016                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1017                 }
1018         }
1019         return adv;
1020 }
1021
1022 static int
1023 bnx2_setup_serdes_phy(struct bnx2 *bp)
1024 {
1025         u32 adv, bmcr, up1;
1026         u32 new_adv = 0;
1027
1028         if (!(bp->autoneg & AUTONEG_SPEED)) {
1029                 u32 new_bmcr;
1030                 int force_link_down = 0;
1031
1032                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1033                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1034                         if (up1 & BCM5708S_UP1_2G5) {
1035                                 up1 &= ~BCM5708S_UP1_2G5;
1036                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1037                                 force_link_down = 1;
1038                         }
1039                 }
1040
1041                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1042                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1043
1044                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1045                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1046                 new_bmcr |= BMCR_SPEED1000;
1047                 if (bp->req_duplex == DUPLEX_FULL) {
1048                         adv |= ADVERTISE_1000XFULL;
1049                         new_bmcr |= BMCR_FULLDPLX;
1050                 }
1051                 else {
1052                         adv |= ADVERTISE_1000XHALF;
1053                         new_bmcr &= ~BMCR_FULLDPLX;
1054                 }
1055                 if ((new_bmcr != bmcr) || (force_link_down)) {
1056                         /* Force a link down visible on the other side */
1057                         if (bp->link_up) {
1058                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1059                                                ~(ADVERTISE_1000XFULL |
1060                                                  ADVERTISE_1000XHALF));
1061                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1062                                         BMCR_ANRESTART | BMCR_ANENABLE);
1063
1064                                 bp->link_up = 0;
1065                                 netif_carrier_off(bp->dev);
1066                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1067                         }
1068                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1069                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1070                 }
1071                 return 0;
1072         }
1073
1074         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1075                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1076                 up1 |= BCM5708S_UP1_2G5;
1077                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1078         }
1079
1080         if (bp->advertising & ADVERTISED_1000baseT_Full)
1081                 new_adv |= ADVERTISE_1000XFULL;
1082
1083         new_adv |= bnx2_phy_get_pause_adv(bp);
1084
1085         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1086         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1087
1088         bp->serdes_an_pending = 0;
1089         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1090                 /* Force a link down visible on the other side */
1091                 if (bp->link_up) {
1092                         int i;
1093
1094                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1095                         for (i = 0; i < 110; i++) {
1096                                 udelay(100);
1097                         }
1098                 }
1099
1100                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1101                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1102                         BMCR_ANENABLE);
1103                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1104                         /* Speed up link-up time when the link partner
1105                          * does not autonegotiate which is very common
1106                          * in blade servers. Some blade servers use
1107                          * IPMI for kerboard input and it's important
1108                          * to minimize link disruptions. Autoneg. involves
1109                          * exchanging base pages plus 3 next pages and
1110                          * normally completes in about 120 msec.
1111                          */
1112                         bp->current_interval = SERDES_AN_TIMEOUT;
1113                         bp->serdes_an_pending = 1;
1114                         mod_timer(&bp->timer, jiffies + bp->current_interval);
1115                 }
1116         }
1117
1118         return 0;
1119 }
1120
1121 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1122         (ADVERTISED_1000baseT_Full)
1123
1124 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1125         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1126         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1127         ADVERTISED_1000baseT_Full)
1128
1129 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1130         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1131         
1132 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1133
1134 static int
1135 bnx2_setup_copper_phy(struct bnx2 *bp)
1136 {
1137         u32 bmcr;
1138         u32 new_bmcr;
1139
1140         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1141
1142         if (bp->autoneg & AUTONEG_SPEED) {
1143                 u32 adv_reg, adv1000_reg;
1144                 u32 new_adv_reg = 0;
1145                 u32 new_adv1000_reg = 0;
1146
1147                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1148                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1149                         ADVERTISE_PAUSE_ASYM);
1150
1151                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1152                 adv1000_reg &= PHY_ALL_1000_SPEED;
1153
1154                 if (bp->advertising & ADVERTISED_10baseT_Half)
1155                         new_adv_reg |= ADVERTISE_10HALF;
1156                 if (bp->advertising & ADVERTISED_10baseT_Full)
1157                         new_adv_reg |= ADVERTISE_10FULL;
1158                 if (bp->advertising & ADVERTISED_100baseT_Half)
1159                         new_adv_reg |= ADVERTISE_100HALF;
1160                 if (bp->advertising & ADVERTISED_100baseT_Full)
1161                         new_adv_reg |= ADVERTISE_100FULL;
1162                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1163                         new_adv1000_reg |= ADVERTISE_1000FULL;
1164                 
1165                 new_adv_reg |= ADVERTISE_CSMA;
1166
1167                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1168
1169                 if ((adv1000_reg != new_adv1000_reg) ||
1170                         (adv_reg != new_adv_reg) ||
1171                         ((bmcr & BMCR_ANENABLE) == 0)) {
1172
1173                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1174                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1175                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1176                                 BMCR_ANENABLE);
1177                 }
1178                 else if (bp->link_up) {
1179                         /* Flow ctrl may have changed from auto to forced */
1180                         /* or vice-versa. */
1181
1182                         bnx2_resolve_flow_ctrl(bp);
1183                         bnx2_set_mac_link(bp);
1184                 }
1185                 return 0;
1186         }
1187
1188         new_bmcr = 0;
1189         if (bp->req_line_speed == SPEED_100) {
1190                 new_bmcr |= BMCR_SPEED100;
1191         }
1192         if (bp->req_duplex == DUPLEX_FULL) {
1193                 new_bmcr |= BMCR_FULLDPLX;
1194         }
1195         if (new_bmcr != bmcr) {
1196                 u32 bmsr;
1197                 int i = 0;
1198
1199                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1200                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1201                 
1202                 if (bmsr & BMSR_LSTATUS) {
1203                         /* Force link down */
1204                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1205                         do {
1206                                 udelay(100);
1207                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209                                 i++;
1210                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1211                 }
1212
1213                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1214
1215                 /* Normally, the new speed is setup after the link has
1216                  * gone down and up again. In some cases, link will not go
1217                  * down so we need to set up the new speed here.
1218                  */
1219                 if (bmsr & BMSR_LSTATUS) {
1220                         bp->line_speed = bp->req_line_speed;
1221                         bp->duplex = bp->req_duplex;
1222                         bnx2_resolve_flow_ctrl(bp);
1223                         bnx2_set_mac_link(bp);
1224                 }
1225         }
1226         return 0;
1227 }
1228
1229 static int
1230 bnx2_setup_phy(struct bnx2 *bp)
1231 {
1232         if (bp->loopback == MAC_LOOPBACK)
1233                 return 0;
1234
1235         if (bp->phy_flags & PHY_SERDES_FLAG) {
1236                 return (bnx2_setup_serdes_phy(bp));
1237         }
1238         else {
1239                 return (bnx2_setup_copper_phy(bp));
1240         }
1241 }
1242
1243 static int
1244 bnx2_init_5708s_phy(struct bnx2 *bp)
1245 {
1246         u32 val;
1247
1248         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1249         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1250         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1251
1252         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1253         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1254         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1255
1256         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1257         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1258         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1259
1260         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1261                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1262                 val |= BCM5708S_UP1_2G5;
1263                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1264         }
1265
1266         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1267             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1268             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1269                 /* increase tx signal amplitude */
1270                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1271                                BCM5708S_BLK_ADDR_TX_MISC);
1272                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1273                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1274                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1275                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1276         }
1277
1278         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1279               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1280
1281         if (val) {
1282                 u32 is_backplane;
1283
1284                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1285                                           BNX2_SHARED_HW_CFG_CONFIG);
1286                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1287                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1288                                        BCM5708S_BLK_ADDR_TX_MISC);
1289                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1290                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1291                                        BCM5708S_BLK_ADDR_DIG);
1292                 }
1293         }
1294         return 0;
1295 }
1296
1297 static int
1298 bnx2_init_5706s_phy(struct bnx2 *bp)
1299 {
1300         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1301
1302         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1303                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1304         }
1305
1306         if (bp->dev->mtu > 1500) {
1307                 u32 val;
1308
1309                 /* Set extended packet length bit */
1310                 bnx2_write_phy(bp, 0x18, 0x7);
1311                 bnx2_read_phy(bp, 0x18, &val);
1312                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1313
1314                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1315                 bnx2_read_phy(bp, 0x1c, &val);
1316                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1317         }
1318         else {
1319                 u32 val;
1320
1321                 bnx2_write_phy(bp, 0x18, 0x7);
1322                 bnx2_read_phy(bp, 0x18, &val);
1323                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1324
1325                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1326                 bnx2_read_phy(bp, 0x1c, &val);
1327                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1328         }
1329
1330         return 0;
1331 }
1332
1333 static int
1334 bnx2_init_copper_phy(struct bnx2 *bp)
1335 {
1336         u32 val;
1337
1338         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1339
1340         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1341                 bnx2_write_phy(bp, 0x18, 0x0c00);
1342                 bnx2_write_phy(bp, 0x17, 0x000a);
1343                 bnx2_write_phy(bp, 0x15, 0x310b);
1344                 bnx2_write_phy(bp, 0x17, 0x201f);
1345                 bnx2_write_phy(bp, 0x15, 0x9506);
1346                 bnx2_write_phy(bp, 0x17, 0x401f);
1347                 bnx2_write_phy(bp, 0x15, 0x14e2);
1348                 bnx2_write_phy(bp, 0x18, 0x0400);
1349         }
1350
1351         if (bp->dev->mtu > 1500) {
1352                 /* Set extended packet length bit */
1353                 bnx2_write_phy(bp, 0x18, 0x7);
1354                 bnx2_read_phy(bp, 0x18, &val);
1355                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1356
1357                 bnx2_read_phy(bp, 0x10, &val);
1358                 bnx2_write_phy(bp, 0x10, val | 0x1);
1359         }
1360         else {
1361                 bnx2_write_phy(bp, 0x18, 0x7);
1362                 bnx2_read_phy(bp, 0x18, &val);
1363                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1364
1365                 bnx2_read_phy(bp, 0x10, &val);
1366                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1367         }
1368
1369         /* ethernet@wirespeed */
1370         bnx2_write_phy(bp, 0x18, 0x7007);
1371         bnx2_read_phy(bp, 0x18, &val);
1372         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1373         return 0;
1374 }
1375
1376
1377 static int
1378 bnx2_init_phy(struct bnx2 *bp)
1379 {
1380         u32 val;
1381         int rc = 0;
1382
1383         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1384         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1385
1386         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1387
1388         bnx2_reset_phy(bp);
1389
1390         bnx2_read_phy(bp, MII_PHYSID1, &val);
1391         bp->phy_id = val << 16;
1392         bnx2_read_phy(bp, MII_PHYSID2, &val);
1393         bp->phy_id |= val & 0xffff;
1394
1395         if (bp->phy_flags & PHY_SERDES_FLAG) {
1396                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1397                         rc = bnx2_init_5706s_phy(bp);
1398                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1399                         rc = bnx2_init_5708s_phy(bp);
1400         }
1401         else {
1402                 rc = bnx2_init_copper_phy(bp);
1403         }
1404
1405         bnx2_setup_phy(bp);
1406
1407         return rc;
1408 }
1409
1410 static int
1411 bnx2_set_mac_loopback(struct bnx2 *bp)
1412 {
1413         u32 mac_mode;
1414
1415         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1416         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1417         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1418         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1419         bp->link_up = 1;
1420         return 0;
1421 }
1422
1423 static int bnx2_test_link(struct bnx2 *);
1424
1425 static int
1426 bnx2_set_phy_loopback(struct bnx2 *bp)
1427 {
1428         u32 mac_mode;
1429         int rc, i;
1430
1431         spin_lock_bh(&bp->phy_lock);
1432         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1433                             BMCR_SPEED1000);
1434         spin_unlock_bh(&bp->phy_lock);
1435         if (rc)
1436                 return rc;
1437
1438         for (i = 0; i < 10; i++) {
1439                 if (bnx2_test_link(bp) == 0)
1440                         break;
1441                 udelay(10);
1442         }
1443
1444         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1445         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1446                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1447                       BNX2_EMAC_MODE_25G);
1448
1449         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1450         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1451         bp->link_up = 1;
1452         return 0;
1453 }
1454
1455 static int
1456 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1457 {
1458         int i;
1459         u32 val;
1460
1461         bp->fw_wr_seq++;
1462         msg_data |= bp->fw_wr_seq;
1463
1464         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1465
1466         /* wait for an acknowledgement. */
1467         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1468                 current->state = TASK_UNINTERRUPTIBLE;
1469                 schedule_timeout(HZ / 100);
1470
1471                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1472
1473                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1474                         break;
1475         }
1476         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1477                 return 0;
1478
1479         /* If we timed out, inform the firmware that this is the case. */
1480         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1481                 if (!silent)
1482                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1483                                             "%x\n", msg_data);
1484
1485                 msg_data &= ~BNX2_DRV_MSG_CODE;
1486                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1487
1488                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1489
1490                 return -EBUSY;
1491         }
1492
1493         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1494                 return -EIO;
1495
1496         return 0;
1497 }
1498
1499 static void
1500 bnx2_init_context(struct bnx2 *bp)
1501 {
1502         u32 vcid;
1503
1504         vcid = 96;
1505         while (vcid) {
1506                 u32 vcid_addr, pcid_addr, offset;
1507
1508                 vcid--;
1509
1510                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1511                         u32 new_vcid;
1512
1513                         vcid_addr = GET_PCID_ADDR(vcid);
1514                         if (vcid & 0x8) {
1515                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1516                         }
1517                         else {
1518                                 new_vcid = vcid;
1519                         }
1520                         pcid_addr = GET_PCID_ADDR(new_vcid);
1521                 }
1522                 else {
1523                         vcid_addr = GET_CID_ADDR(vcid);
1524                         pcid_addr = vcid_addr;
1525                 }
1526
1527                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1528                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1529
1530                 /* Zero out the context. */
1531                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1532                         CTX_WR(bp, 0x00, offset, 0);
1533                 }
1534
1535                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1536                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1537         }
1538 }
1539
1540 static int
1541 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1542 {
1543         u16 *good_mbuf;
1544         u32 good_mbuf_cnt;
1545         u32 val;
1546
1547         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1548         if (good_mbuf == NULL) {
1549                 printk(KERN_ERR PFX "Failed to allocate memory in "
1550                                     "bnx2_alloc_bad_rbuf\n");
1551                 return -ENOMEM;
1552         }
1553
1554         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1555                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1556
1557         good_mbuf_cnt = 0;
1558
1559         /* Allocate a bunch of mbufs and save the good ones in an array. */
1560         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1561         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1562                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1563
1564                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1565
1566                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1567
1568                 /* The addresses with Bit 9 set are bad memory blocks. */
1569                 if (!(val & (1 << 9))) {
1570                         good_mbuf[good_mbuf_cnt] = (u16) val;
1571                         good_mbuf_cnt++;
1572                 }
1573
1574                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1575         }
1576
1577         /* Free the good ones back to the mbuf pool thus discarding
1578          * all the bad ones. */
1579         while (good_mbuf_cnt) {
1580                 good_mbuf_cnt--;
1581
1582                 val = good_mbuf[good_mbuf_cnt];
1583                 val = (val << 9) | val | 1;
1584
1585                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1586         }
1587         kfree(good_mbuf);
1588         return 0;
1589 }
1590
1591 static void
1592 bnx2_set_mac_addr(struct bnx2 *bp) 
1593 {
1594         u32 val;
1595         u8 *mac_addr = bp->dev->dev_addr;
1596
1597         val = (mac_addr[0] << 8) | mac_addr[1];
1598
1599         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1600
1601         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 
1602                 (mac_addr[4] << 8) | mac_addr[5];
1603
1604         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1605 }
1606
1607 static inline int
1608 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1609 {
1610         struct sk_buff *skb;
1611         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1612         dma_addr_t mapping;
1613         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1614         unsigned long align;
1615
1616         skb = dev_alloc_skb(bp->rx_buf_size);
1617         if (skb == NULL) {
1618                 return -ENOMEM;
1619         }
1620
1621         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1622                 skb_reserve(skb, 8 - align);
1623         }
1624
1625         skb->dev = bp->dev;
1626         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1627                 PCI_DMA_FROMDEVICE);
1628
1629         rx_buf->skb = skb;
1630         pci_unmap_addr_set(rx_buf, mapping, mapping);
1631
1632         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1633         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1634
1635         bp->rx_prod_bseq += bp->rx_buf_use_size;
1636
1637         return 0;
1638 }
1639
1640 static void
1641 bnx2_phy_int(struct bnx2 *bp)
1642 {
1643         u32 new_link_state, old_link_state;
1644
1645         new_link_state = bp->status_blk->status_attn_bits &
1646                 STATUS_ATTN_BITS_LINK_STATE;
1647         old_link_state = bp->status_blk->status_attn_bits_ack &
1648                 STATUS_ATTN_BITS_LINK_STATE;
1649         if (new_link_state != old_link_state) {
1650                 if (new_link_state) {
1651                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1652                                 STATUS_ATTN_BITS_LINK_STATE);
1653                 }
1654                 else {
1655                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1656                                 STATUS_ATTN_BITS_LINK_STATE);
1657                 }
1658                 bnx2_set_link(bp);
1659         }
1660 }
1661
1662 static void
1663 bnx2_tx_int(struct bnx2 *bp)
1664 {
1665         struct status_block *sblk = bp->status_blk;
1666         u16 hw_cons, sw_cons, sw_ring_cons;
1667         int tx_free_bd = 0;
1668
1669         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1670         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1671                 hw_cons++;
1672         }
1673         sw_cons = bp->tx_cons;
1674
1675         while (sw_cons != hw_cons) {
1676                 struct sw_bd *tx_buf;
1677                 struct sk_buff *skb;
1678                 int i, last;
1679
1680                 sw_ring_cons = TX_RING_IDX(sw_cons);
1681
1682                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1683                 skb = tx_buf->skb;
1684 #ifdef BCM_TSO 
1685                 /* partial BD completions possible with TSO packets */
1686                 if (skb_shinfo(skb)->gso_size) {
1687                         u16 last_idx, last_ring_idx;
1688
1689                         last_idx = sw_cons +
1690                                 skb_shinfo(skb)->nr_frags + 1;
1691                         last_ring_idx = sw_ring_cons +
1692                                 skb_shinfo(skb)->nr_frags + 1;
1693                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1694                                 last_idx++;
1695                         }
1696                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1697                                 break;
1698                         }
1699                 }
1700 #endif
1701                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1702                         skb_headlen(skb), PCI_DMA_TODEVICE);
1703
1704                 tx_buf->skb = NULL;
1705                 last = skb_shinfo(skb)->nr_frags;
1706
1707                 for (i = 0; i < last; i++) {
1708                         sw_cons = NEXT_TX_BD(sw_cons);
1709
1710                         pci_unmap_page(bp->pdev,
1711                                 pci_unmap_addr(
1712                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1713                                         mapping),
1714                                 skb_shinfo(skb)->frags[i].size,
1715                                 PCI_DMA_TODEVICE);
1716                 }
1717
1718                 sw_cons = NEXT_TX_BD(sw_cons);
1719
1720                 tx_free_bd += last + 1;
1721
1722                 dev_kfree_skb(skb);
1723
1724                 hw_cons = bp->hw_tx_cons =
1725                         sblk->status_tx_quick_consumer_index0;
1726
1727                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1728                         hw_cons++;
1729                 }
1730         }
1731
1732         bp->tx_cons = sw_cons;
1733
1734         if (unlikely(netif_queue_stopped(bp->dev))) {
1735                 spin_lock(&bp->tx_lock);
1736                 if ((netif_queue_stopped(bp->dev)) &&
1737                     (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1738
1739                         netif_wake_queue(bp->dev);
1740                 }
1741                 spin_unlock(&bp->tx_lock);
1742         }
1743
1744 }
1745
1746 static inline void
1747 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1748         u16 cons, u16 prod)
1749 {
1750         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1751         struct rx_bd *cons_bd, *prod_bd;
1752
1753         cons_rx_buf = &bp->rx_buf_ring[cons];
1754         prod_rx_buf = &bp->rx_buf_ring[prod];
1755
1756         pci_dma_sync_single_for_device(bp->pdev,
1757                 pci_unmap_addr(cons_rx_buf, mapping),
1758                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1759
1760         bp->rx_prod_bseq += bp->rx_buf_use_size;
1761
1762         prod_rx_buf->skb = skb;
1763
1764         if (cons == prod)
1765                 return;
1766
1767         pci_unmap_addr_set(prod_rx_buf, mapping,
1768                         pci_unmap_addr(cons_rx_buf, mapping));
1769
1770         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1771         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1772         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1773         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1774 }
1775
1776 static int
1777 bnx2_rx_int(struct bnx2 *bp, int budget)
1778 {
1779         struct status_block *sblk = bp->status_blk;
1780         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1781         struct l2_fhdr *rx_hdr;
1782         int rx_pkt = 0;
1783
1784         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1785         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1786                 hw_cons++;
1787         }
1788         sw_cons = bp->rx_cons;
1789         sw_prod = bp->rx_prod;
1790
1791         /* Memory barrier necessary as speculative reads of the rx
1792          * buffer can be ahead of the index in the status block
1793          */
1794         rmb();
1795         while (sw_cons != hw_cons) {
1796                 unsigned int len;
1797                 u32 status;
1798                 struct sw_bd *rx_buf;
1799                 struct sk_buff *skb;
1800                 dma_addr_t dma_addr;
1801
1802                 sw_ring_cons = RX_RING_IDX(sw_cons);
1803                 sw_ring_prod = RX_RING_IDX(sw_prod);
1804
1805                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1806                 skb = rx_buf->skb;
1807
1808                 rx_buf->skb = NULL;
1809
1810                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1811
1812                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1813                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1814
1815                 rx_hdr = (struct l2_fhdr *) skb->data;
1816                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1817
1818                 if ((status = rx_hdr->l2_fhdr_status) &
1819                         (L2_FHDR_ERRORS_BAD_CRC |
1820                         L2_FHDR_ERRORS_PHY_DECODE |
1821                         L2_FHDR_ERRORS_ALIGNMENT |
1822                         L2_FHDR_ERRORS_TOO_SHORT |
1823                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1824
1825                         goto reuse_rx;
1826                 }
1827
1828                 /* Since we don't have a jumbo ring, copy small packets
1829                  * if mtu > 1500
1830                  */
1831                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1832                         struct sk_buff *new_skb;
1833
1834                         new_skb = dev_alloc_skb(len + 2);
1835                         if (new_skb == NULL)
1836                                 goto reuse_rx;
1837
1838                         /* aligned copy */
1839                         memcpy(new_skb->data,
1840                                 skb->data + bp->rx_offset - 2,
1841                                 len + 2);
1842
1843                         skb_reserve(new_skb, 2);
1844                         skb_put(new_skb, len);
1845                         new_skb->dev = bp->dev;
1846
1847                         bnx2_reuse_rx_skb(bp, skb,
1848                                 sw_ring_cons, sw_ring_prod);
1849
1850                         skb = new_skb;
1851                 }
1852                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1853                         pci_unmap_single(bp->pdev, dma_addr,
1854                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1855
1856                         skb_reserve(skb, bp->rx_offset);
1857                         skb_put(skb, len);
1858                 }
1859                 else {
1860 reuse_rx:
1861                         bnx2_reuse_rx_skb(bp, skb,
1862                                 sw_ring_cons, sw_ring_prod);
1863                         goto next_rx;
1864                 }
1865
1866                 skb->protocol = eth_type_trans(skb, bp->dev);
1867
1868                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1869                         (ntohs(skb->protocol) != 0x8100)) {
1870
1871                         dev_kfree_skb(skb);
1872                         goto next_rx;
1873
1874                 }
1875
1876                 skb->ip_summed = CHECKSUM_NONE;
1877                 if (bp->rx_csum &&
1878                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1879                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1880
1881                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1882                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1883                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1884                 }
1885
1886 #ifdef BCM_VLAN
1887                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1888                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1889                                 rx_hdr->l2_fhdr_vlan_tag);
1890                 }
1891                 else
1892 #endif
1893                         netif_receive_skb(skb);
1894
1895                 bp->dev->last_rx = jiffies;
1896                 rx_pkt++;
1897
1898 next_rx:
1899                 sw_cons = NEXT_RX_BD(sw_cons);
1900                 sw_prod = NEXT_RX_BD(sw_prod);
1901
1902                 if ((rx_pkt == budget))
1903                         break;
1904
1905                 /* Refresh hw_cons to see if there is new work */
1906                 if (sw_cons == hw_cons) {
1907                         hw_cons = bp->hw_rx_cons =
1908                                 sblk->status_rx_quick_consumer_index0;
1909                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1910                                 hw_cons++;
1911                         rmb();
1912                 }
1913         }
1914         bp->rx_cons = sw_cons;
1915         bp->rx_prod = sw_prod;
1916
1917         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1918
1919         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1920
1921         mmiowb();
1922
1923         return rx_pkt;
1924
1925 }
1926
1927 #ifdef CONFIG_PCI_MSI
1928 /* MSI ISR - The only difference between this and the INTx ISR
1929  * is that the MSI interrupt is always serviced.
1930  */
1931 static irqreturn_t
1932 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1933 {
1934         struct net_device *dev = dev_instance;
1935         struct bnx2 *bp = netdev_priv(dev);
1936
1937         prefetch(bp->status_blk);
1938         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1939                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1940                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1941
1942         /* Return here if interrupt is disabled. */
1943         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1944                 return IRQ_HANDLED;
1945
1946         netif_rx_schedule(dev);
1947
1948         return IRQ_HANDLED;
1949 }
1950 #endif
1951
1952 static irqreturn_t
1953 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1954 {
1955         struct net_device *dev = dev_instance;
1956         struct bnx2 *bp = netdev_priv(dev);
1957
1958         /* When using INTx, it is possible for the interrupt to arrive
1959          * at the CPU before the status block posted prior to the
1960          * interrupt. Reading a register will flush the status block.
1961          * When using MSI, the MSI message will always complete after
1962          * the status block write.
1963          */
1964         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1965             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1966              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1967                 return IRQ_NONE;
1968
1969         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1970                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1971                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1972
1973         /* Return here if interrupt is shared and is disabled. */
1974         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1975                 return IRQ_HANDLED;
1976
1977         netif_rx_schedule(dev);
1978
1979         return IRQ_HANDLED;
1980 }
1981
1982 static inline int
1983 bnx2_has_work(struct bnx2 *bp)
1984 {
1985         struct status_block *sblk = bp->status_blk;
1986
1987         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1988             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1989                 return 1;
1990
1991         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1992             bp->link_up)
1993                 return 1;
1994
1995         return 0;
1996 }
1997
1998 static int
1999 bnx2_poll(struct net_device *dev, int *budget)
2000 {
2001         struct bnx2 *bp = netdev_priv(dev);
2002
2003         if ((bp->status_blk->status_attn_bits &
2004                 STATUS_ATTN_BITS_LINK_STATE) !=
2005                 (bp->status_blk->status_attn_bits_ack &
2006                 STATUS_ATTN_BITS_LINK_STATE)) {
2007
2008                 spin_lock(&bp->phy_lock);
2009                 bnx2_phy_int(bp);
2010                 spin_unlock(&bp->phy_lock);
2011
2012                 /* This is needed to take care of transient status
2013                  * during link changes.
2014                  */
2015                 REG_WR(bp, BNX2_HC_COMMAND,
2016                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2017                 REG_RD(bp, BNX2_HC_COMMAND);
2018         }
2019
2020         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2021                 bnx2_tx_int(bp);
2022
2023         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2024                 int orig_budget = *budget;
2025                 int work_done;
2026
2027                 if (orig_budget > dev->quota)
2028                         orig_budget = dev->quota;
2029                 
2030                 work_done = bnx2_rx_int(bp, orig_budget);
2031                 *budget -= work_done;
2032                 dev->quota -= work_done;
2033         }
2034         
2035         bp->last_status_idx = bp->status_blk->status_idx;
2036         rmb();
2037
2038         if (!bnx2_has_work(bp)) {
2039                 netif_rx_complete(dev);
2040                 if (likely(bp->flags & USING_MSI_FLAG)) {
2041                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2042                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2043                                bp->last_status_idx);
2044                         return 0;
2045                 }
2046                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2047                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2048                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2049                        bp->last_status_idx);
2050
2051                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2052                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2053                        bp->last_status_idx);
2054                 return 0;
2055         }
2056
2057         return 1;
2058 }
2059
2060 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2061  * from set_multicast.
2062  */
2063 static void
2064 bnx2_set_rx_mode(struct net_device *dev)
2065 {
2066         struct bnx2 *bp = netdev_priv(dev);
2067         u32 rx_mode, sort_mode;
2068         int i;
2069
2070         spin_lock_bh(&bp->phy_lock);
2071
2072         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2073                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2074         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2075 #ifdef BCM_VLAN
2076         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2077                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2078 #else
2079         if (!(bp->flags & ASF_ENABLE_FLAG))
2080                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2081 #endif
2082         if (dev->flags & IFF_PROMISC) {
2083                 /* Promiscuous mode. */
2084                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2085                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2086         }
2087         else if (dev->flags & IFF_ALLMULTI) {
2088                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2089                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2090                                0xffffffff);
2091                 }
2092                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2093         }
2094         else {
2095                 /* Accept one or more multicast(s). */
2096                 struct dev_mc_list *mclist;
2097                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2098                 u32 regidx;
2099                 u32 bit;
2100                 u32 crc;
2101
2102                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2103
2104                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2105                      i++, mclist = mclist->next) {
2106
2107                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2108                         bit = crc & 0xff;
2109                         regidx = (bit & 0xe0) >> 5;
2110                         bit &= 0x1f;
2111                         mc_filter[regidx] |= (1 << bit);
2112                 }
2113
2114                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2115                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2116                                mc_filter[i]);
2117                 }
2118
2119                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2120         }
2121
2122         if (rx_mode != bp->rx_mode) {
2123                 bp->rx_mode = rx_mode;
2124                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2125         }
2126
2127         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2128         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2129         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2130
2131         spin_unlock_bh(&bp->phy_lock);
2132 }
2133
2134 #define FW_BUF_SIZE     0x8000
2135
2136 static int
2137 bnx2_gunzip_init(struct bnx2 *bp)
2138 {
2139         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2140                 goto gunzip_nomem1;
2141
2142         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2143                 goto gunzip_nomem2;
2144
2145         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2146         if (bp->strm->workspace == NULL)
2147                 goto gunzip_nomem3;
2148
2149         return 0;
2150
2151 gunzip_nomem3:
2152         kfree(bp->strm);
2153         bp->strm = NULL;
2154
2155 gunzip_nomem2:
2156         vfree(bp->gunzip_buf);
2157         bp->gunzip_buf = NULL;
2158
2159 gunzip_nomem1:
2160         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2161                             "uncompression.\n", bp->dev->name);
2162         return -ENOMEM;
2163 }
2164
2165 static void
2166 bnx2_gunzip_end(struct bnx2 *bp)
2167 {
2168         kfree(bp->strm->workspace);
2169
2170         kfree(bp->strm);
2171         bp->strm = NULL;
2172
2173         if (bp->gunzip_buf) {
2174                 vfree(bp->gunzip_buf);
2175                 bp->gunzip_buf = NULL;
2176         }
2177 }
2178
2179 static int
2180 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2181 {
2182         int n, rc;
2183
2184         /* check gzip header */
2185         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2186                 return -EINVAL;
2187
2188         n = 10;
2189
2190 #define FNAME   0x8
2191         if (zbuf[3] & FNAME)
2192                 while ((zbuf[n++] != 0) && (n < len));
2193
2194         bp->strm->next_in = zbuf + n;
2195         bp->strm->avail_in = len - n;
2196         bp->strm->next_out = bp->gunzip_buf;
2197         bp->strm->avail_out = FW_BUF_SIZE;
2198
2199         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2200         if (rc != Z_OK)
2201                 return rc;
2202
2203         rc = zlib_inflate(bp->strm, Z_FINISH);
2204
2205         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2206         *outbuf = bp->gunzip_buf;
2207
2208         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2209                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2210                        bp->dev->name, bp->strm->msg);
2211
2212         zlib_inflateEnd(bp->strm);
2213
2214         if (rc == Z_STREAM_END)
2215                 return 0;
2216
2217         return rc;
2218 }
2219
2220 static void
2221 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2222         u32 rv2p_proc)
2223 {
2224         int i;
2225         u32 val;
2226
2227
2228         for (i = 0; i < rv2p_code_len; i += 8) {
2229                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2230                 rv2p_code++;
2231                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2232                 rv2p_code++;
2233
2234                 if (rv2p_proc == RV2P_PROC1) {
2235                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2236                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2237                 }
2238                 else {
2239                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2240                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2241                 }
2242         }
2243
2244         /* Reset the processor, un-stall is done later. */
2245         if (rv2p_proc == RV2P_PROC1) {
2246                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2247         }
2248         else {
2249                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2250         }
2251 }
2252
2253 static void
2254 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2255 {
2256         u32 offset;
2257         u32 val;
2258
2259         /* Halt the CPU. */
2260         val = REG_RD_IND(bp, cpu_reg->mode);
2261         val |= cpu_reg->mode_value_halt;
2262         REG_WR_IND(bp, cpu_reg->mode, val);
2263         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2264
2265         /* Load the Text area. */
2266         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2267         if (fw->text) {
2268                 int j;
2269
2270                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2271                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2272                 }
2273         }
2274
2275         /* Load the Data area. */
2276         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2277         if (fw->data) {
2278                 int j;
2279
2280                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2281                         REG_WR_IND(bp, offset, fw->data[j]);
2282                 }
2283         }
2284
2285         /* Load the SBSS area. */
2286         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2287         if (fw->sbss) {
2288                 int j;
2289
2290                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2291                         REG_WR_IND(bp, offset, fw->sbss[j]);
2292                 }
2293         }
2294
2295         /* Load the BSS area. */
2296         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2297         if (fw->bss) {
2298                 int j;
2299
2300                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2301                         REG_WR_IND(bp, offset, fw->bss[j]);
2302                 }
2303         }
2304
2305         /* Load the Read-Only area. */
2306         offset = cpu_reg->spad_base +
2307                 (fw->rodata_addr - cpu_reg->mips_view_base);
2308         if (fw->rodata) {
2309                 int j;
2310
2311                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2312                         REG_WR_IND(bp, offset, fw->rodata[j]);
2313                 }
2314         }
2315
2316         /* Clear the pre-fetch instruction. */
2317         REG_WR_IND(bp, cpu_reg->inst, 0);
2318         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2319
2320         /* Start the CPU. */
2321         val = REG_RD_IND(bp, cpu_reg->mode);
2322         val &= ~cpu_reg->mode_value_halt;
2323         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2324         REG_WR_IND(bp, cpu_reg->mode, val);
2325 }
2326
2327 static int
2328 bnx2_init_cpus(struct bnx2 *bp)
2329 {
2330         struct cpu_reg cpu_reg;
2331         struct fw_info fw;
2332         int rc = 0;
2333         void *text;
2334         u32 text_len;
2335
2336         if ((rc = bnx2_gunzip_init(bp)) != 0)
2337                 return rc;
2338
2339         /* Initialize the RV2P processor. */
2340         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2341                          &text_len);
2342         if (rc)
2343                 goto init_cpu_err;
2344
2345         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2346
2347         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2348                          &text_len);
2349         if (rc)
2350                 goto init_cpu_err;
2351
2352         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2353
2354         /* Initialize the RX Processor. */
2355         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2356         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2357         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2358         cpu_reg.state = BNX2_RXP_CPU_STATE;
2359         cpu_reg.state_value_clear = 0xffffff;
2360         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2361         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2362         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2363         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2364         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2365         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2366         cpu_reg.mips_view_base = 0x8000000;
2367     
2368         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2369         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2370         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2371         fw.start_addr = bnx2_RXP_b06FwStartAddr;
2372
2373         fw.text_addr = bnx2_RXP_b06FwTextAddr;
2374         fw.text_len = bnx2_RXP_b06FwTextLen;
2375         fw.text_index = 0;
2376
2377         rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2378                          &text, &text_len);
2379         if (rc)
2380                 goto init_cpu_err;
2381
2382         fw.text = text;
2383
2384         fw.data_addr = bnx2_RXP_b06FwDataAddr;
2385         fw.data_len = bnx2_RXP_b06FwDataLen;
2386         fw.data_index = 0;
2387         fw.data = bnx2_RXP_b06FwData;
2388
2389         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2390         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2391         fw.sbss_index = 0;
2392         fw.sbss = bnx2_RXP_b06FwSbss;
2393
2394         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2395         fw.bss_len = bnx2_RXP_b06FwBssLen;
2396         fw.bss_index = 0;
2397         fw.bss = bnx2_RXP_b06FwBss;
2398
2399         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2400         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2401         fw.rodata_index = 0;
2402         fw.rodata = bnx2_RXP_b06FwRodata;
2403
2404         load_cpu_fw(bp, &cpu_reg, &fw);
2405
2406         /* Initialize the TX Processor. */
2407         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2408         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2409         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2410         cpu_reg.state = BNX2_TXP_CPU_STATE;
2411         cpu_reg.state_value_clear = 0xffffff;
2412         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2413         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2414         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2415         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2416         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2417         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2418         cpu_reg.mips_view_base = 0x8000000;
2419     
2420         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2421         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2422         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2423         fw.start_addr = bnx2_TXP_b06FwStartAddr;
2424
2425         fw.text_addr = bnx2_TXP_b06FwTextAddr;
2426         fw.text_len = bnx2_TXP_b06FwTextLen;
2427         fw.text_index = 0;
2428
2429         rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2430                          &text, &text_len);
2431         if (rc)
2432                 goto init_cpu_err;
2433
2434         fw.text = text;
2435
2436         fw.data_addr = bnx2_TXP_b06FwDataAddr;
2437         fw.data_len = bnx2_TXP_b06FwDataLen;
2438         fw.data_index = 0;
2439         fw.data = bnx2_TXP_b06FwData;
2440
2441         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2442         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2443         fw.sbss_index = 0;
2444         fw.sbss = bnx2_TXP_b06FwSbss;
2445
2446         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2447         fw.bss_len = bnx2_TXP_b06FwBssLen;
2448         fw.bss_index = 0;
2449         fw.bss = bnx2_TXP_b06FwBss;
2450
2451         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2452         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2453         fw.rodata_index = 0;
2454         fw.rodata = bnx2_TXP_b06FwRodata;
2455
2456         load_cpu_fw(bp, &cpu_reg, &fw);
2457
2458         /* Initialize the TX Patch-up Processor. */
2459         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2460         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2461         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2462         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2463         cpu_reg.state_value_clear = 0xffffff;
2464         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2465         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2466         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2467         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2468         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2469         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2470         cpu_reg.mips_view_base = 0x8000000;
2471     
2472         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2473         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2474         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2475         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2476
2477         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2478         fw.text_len = bnx2_TPAT_b06FwTextLen;
2479         fw.text_index = 0;
2480
2481         rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2482                          &text, &text_len);
2483         if (rc)
2484                 goto init_cpu_err;
2485
2486         fw.text = text;
2487
2488         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2489         fw.data_len = bnx2_TPAT_b06FwDataLen;
2490         fw.data_index = 0;
2491         fw.data = bnx2_TPAT_b06FwData;
2492
2493         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2494         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2495         fw.sbss_index = 0;
2496         fw.sbss = bnx2_TPAT_b06FwSbss;
2497
2498         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2499         fw.bss_len = bnx2_TPAT_b06FwBssLen;
2500         fw.bss_index = 0;
2501         fw.bss = bnx2_TPAT_b06FwBss;
2502
2503         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2504         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2505         fw.rodata_index = 0;
2506         fw.rodata = bnx2_TPAT_b06FwRodata;
2507
2508         load_cpu_fw(bp, &cpu_reg, &fw);
2509
2510         /* Initialize the Completion Processor. */
2511         cpu_reg.mode = BNX2_COM_CPU_MODE;
2512         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2513         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2514         cpu_reg.state = BNX2_COM_CPU_STATE;
2515         cpu_reg.state_value_clear = 0xffffff;
2516         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2517         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2518         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2519         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2520         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2521         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2522         cpu_reg.mips_view_base = 0x8000000;
2523     
2524         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2525         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2526         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2527         fw.start_addr = bnx2_COM_b06FwStartAddr;
2528
2529         fw.text_addr = bnx2_COM_b06FwTextAddr;
2530         fw.text_len = bnx2_COM_b06FwTextLen;
2531         fw.text_index = 0;
2532
2533         rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2534                          &text, &text_len);
2535         if (rc)
2536                 goto init_cpu_err;
2537
2538         fw.text = text;
2539
2540         fw.data_addr = bnx2_COM_b06FwDataAddr;
2541         fw.data_len = bnx2_COM_b06FwDataLen;
2542         fw.data_index = 0;
2543         fw.data = bnx2_COM_b06FwData;
2544
2545         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2546         fw.sbss_len = bnx2_COM_b06FwSbssLen;
2547         fw.sbss_index = 0;
2548         fw.sbss = bnx2_COM_b06FwSbss;
2549
2550         fw.bss_addr = bnx2_COM_b06FwBssAddr;
2551         fw.bss_len = bnx2_COM_b06FwBssLen;
2552         fw.bss_index = 0;
2553         fw.bss = bnx2_COM_b06FwBss;
2554
2555         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2556         fw.rodata_len = bnx2_COM_b06FwRodataLen;
2557         fw.rodata_index = 0;
2558         fw.rodata = bnx2_COM_b06FwRodata;
2559
2560         load_cpu_fw(bp, &cpu_reg, &fw);
2561
2562 init_cpu_err:
2563         bnx2_gunzip_end(bp);
2564         return rc;
2565 }
2566
2567 static int
2568 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2569 {
2570         u16 pmcsr;
2571
2572         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2573
2574         switch (state) {
2575         case PCI_D0: {
2576                 u32 val;
2577
2578                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2579                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2580                         PCI_PM_CTRL_PME_STATUS);
2581
2582                 if (pmcsr & PCI_PM_CTRL_STATE_MASK) {
2583                         /* delay required during transition out of D3hot */
2584                         current->state = TASK_UNINTERRUPTIBLE;
2585                         schedule_timeout(HZ / 50);
2586                 }
2587
2588                 val = REG_RD(bp, BNX2_EMAC_MODE);
2589                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2590                 val &= ~BNX2_EMAC_MODE_MPKT;
2591                 REG_WR(bp, BNX2_EMAC_MODE, val);
2592
2593                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2594                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2595                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2596                 break;
2597         }
2598         case PCI_D3hot: {
2599                 int i;
2600                 u32 val, wol_msg;
2601
2602                 if (bp->wol) {
2603                         u32 advertising;
2604                         u8 autoneg;
2605
2606                         autoneg = bp->autoneg;
2607                         advertising = bp->advertising;
2608
2609                         bp->autoneg = AUTONEG_SPEED;
2610                         bp->advertising = ADVERTISED_10baseT_Half |
2611                                 ADVERTISED_10baseT_Full |
2612                                 ADVERTISED_100baseT_Half |
2613                                 ADVERTISED_100baseT_Full |
2614                                 ADVERTISED_Autoneg;
2615
2616                         bnx2_setup_copper_phy(bp);
2617
2618                         bp->autoneg = autoneg;
2619                         bp->advertising = advertising;
2620
2621                         bnx2_set_mac_addr(bp);
2622
2623                         val = REG_RD(bp, BNX2_EMAC_MODE);
2624
2625                         /* Enable port mode. */
2626                         val &= ~BNX2_EMAC_MODE_PORT;
2627                         val |= BNX2_EMAC_MODE_PORT_MII |
2628                                BNX2_EMAC_MODE_MPKT_RCVD |
2629                                BNX2_EMAC_MODE_ACPI_RCVD |
2630                                BNX2_EMAC_MODE_MPKT;
2631
2632                         REG_WR(bp, BNX2_EMAC_MODE, val);
2633
2634                         /* receive all multicast */
2635                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2636                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2637                                        0xffffffff);
2638                         }
2639                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2640                                BNX2_EMAC_RX_MODE_SORT_MODE);
2641
2642                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2643                               BNX2_RPM_SORT_USER0_MC_EN;
2644                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2645                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2646                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2647                                BNX2_RPM_SORT_USER0_ENA);
2648
2649                         /* Need to enable EMAC and RPM for WOL. */
2650                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2651                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2652                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2653                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2654
2655                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2656                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2657                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2658
2659                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2660                 }
2661                 else {
2662                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2663                 }
2664
2665                 if (!(bp->flags & NO_WOL_FLAG))
2666                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2667
2668                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2669                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2670                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2671
2672                         if (bp->wol)
2673                                 pmcsr |= 3;
2674                 }
2675                 else {
2676                         pmcsr |= 3;
2677                 }
2678                 if (bp->wol) {
2679                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2680                 }
2681                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2682                                       pmcsr);
2683
2684                 /* No more memory access after this point until
2685                  * device is brought back to D0.
2686                  */
2687                 udelay(50);
2688                 break;
2689         }
2690         default:
2691                 return -EINVAL;
2692         }
2693         return 0;
2694 }
2695
2696 static int
2697 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2698 {
2699         u32 val;
2700         int j;
2701
2702         /* Request access to the flash interface. */
2703         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2704         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2705                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2706                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2707                         break;
2708
2709                 udelay(5);
2710         }
2711
2712         if (j >= NVRAM_TIMEOUT_COUNT)
2713                 return -EBUSY;
2714
2715         return 0;
2716 }
2717
2718 static int
2719 bnx2_release_nvram_lock(struct bnx2 *bp)
2720 {
2721         int j;
2722         u32 val;
2723
2724         /* Relinquish nvram interface. */
2725         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2726
2727         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2728                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2729                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2730                         break;
2731
2732                 udelay(5);
2733         }
2734
2735         if (j >= NVRAM_TIMEOUT_COUNT)
2736                 return -EBUSY;
2737
2738         return 0;
2739 }
2740
2741
2742 static int
2743 bnx2_enable_nvram_write(struct bnx2 *bp)
2744 {
2745         u32 val;
2746
2747         val = REG_RD(bp, BNX2_MISC_CFG);
2748         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2749
2750         if (!bp->flash_info->buffered) {
2751                 int j;
2752
2753                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2754                 REG_WR(bp, BNX2_NVM_COMMAND,
2755                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2756
2757                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2758                         udelay(5);
2759
2760                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2761                         if (val & BNX2_NVM_COMMAND_DONE)
2762                                 break;
2763                 }
2764
2765                 if (j >= NVRAM_TIMEOUT_COUNT)
2766                         return -EBUSY;
2767         }
2768         return 0;
2769 }
2770
2771 static void
2772 bnx2_disable_nvram_write(struct bnx2 *bp)
2773 {
2774         u32 val;
2775
2776         val = REG_RD(bp, BNX2_MISC_CFG);
2777         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2778 }
2779
2780
2781 static void
2782 bnx2_enable_nvram_access(struct bnx2 *bp)
2783 {
2784         u32 val;
2785
2786         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2787         /* Enable both bits, even on read. */
2788         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2789                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2790 }
2791
2792 static void
2793 bnx2_disable_nvram_access(struct bnx2 *bp)
2794 {
2795         u32 val;
2796
2797         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2798         /* Disable both bits, even after read. */
2799         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 
2800                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2801                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2802 }
2803
2804 static int
2805 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2806 {
2807         u32 cmd;
2808         int j;
2809
2810         if (bp->flash_info->buffered)
2811                 /* Buffered flash, no erase needed */
2812                 return 0;
2813
2814         /* Build an erase command */
2815         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2816               BNX2_NVM_COMMAND_DOIT;
2817
2818         /* Need to clear DONE bit separately. */
2819         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2820
2821         /* Address of the NVRAM to read from. */
2822         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2823
2824         /* Issue an erase command. */
2825         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2826
2827         /* Wait for completion. */
2828         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2829                 u32 val;
2830
2831                 udelay(5);
2832
2833                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2834                 if (val & BNX2_NVM_COMMAND_DONE)
2835                         break;
2836         }
2837
2838         if (j >= NVRAM_TIMEOUT_COUNT)
2839                 return -EBUSY;
2840
2841         return 0;
2842 }
2843
2844 static int
2845 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2846 {
2847         u32 cmd;
2848         int j;
2849
2850         /* Build the command word. */
2851         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2852
2853         /* Calculate an offset of a buffered flash. */
2854         if (bp->flash_info->buffered) {
2855                 offset = ((offset / bp->flash_info->page_size) <<
2856                            bp->flash_info->page_bits) +
2857                           (offset % bp->flash_info->page_size);
2858         }
2859
2860         /* Need to clear DONE bit separately. */
2861         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2862
2863         /* Address of the NVRAM to read from. */
2864         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2865
2866         /* Issue a read command. */
2867         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2868
2869         /* Wait for completion. */
2870         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2871                 u32 val;
2872
2873                 udelay(5);
2874
2875                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2876                 if (val & BNX2_NVM_COMMAND_DONE) {
2877                         val = REG_RD(bp, BNX2_NVM_READ);
2878
2879                         val = be32_to_cpu(val);
2880                         memcpy(ret_val, &val, 4);
2881                         break;
2882                 }
2883         }
2884         if (j >= NVRAM_TIMEOUT_COUNT)
2885                 return -EBUSY;
2886
2887         return 0;
2888 }
2889
2890
2891 static int
2892 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2893 {
2894         u32 cmd, val32;
2895         int j;
2896
2897         /* Build the command word. */
2898         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2899
2900         /* Calculate an offset of a buffered flash. */
2901         if (bp->flash_info->buffered) {
2902                 offset = ((offset / bp->flash_info->page_size) <<
2903                           bp->flash_info->page_bits) +
2904                          (offset % bp->flash_info->page_size);
2905         }
2906
2907         /* Need to clear DONE bit separately. */
2908         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2909
2910         memcpy(&val32, val, 4);
2911         val32 = cpu_to_be32(val32);
2912
2913         /* Write the data. */
2914         REG_WR(bp, BNX2_NVM_WRITE, val32);
2915
2916         /* Address of the NVRAM to write to. */
2917         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2918
2919         /* Issue the write command. */
2920         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2921
2922         /* Wait for completion. */
2923         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2924                 udelay(5);
2925
2926                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2927                         break;
2928         }
2929         if (j >= NVRAM_TIMEOUT_COUNT)
2930                 return -EBUSY;
2931
2932         return 0;
2933 }
2934
2935 static int
2936 bnx2_init_nvram(struct bnx2 *bp)
2937 {
2938         u32 val;
2939         int j, entry_count, rc;
2940         struct flash_spec *flash;
2941
2942         /* Determine the selected interface. */
2943         val = REG_RD(bp, BNX2_NVM_CFG1);
2944
2945         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2946
2947         rc = 0;
2948         if (val & 0x40000000) {
2949                 /* Flash interface has been reconfigured */
2950                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2951                      j++, flash++) {
2952                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2953                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2954                                 bp->flash_info = flash;
2955                                 break;
2956                         }
2957                 }
2958         }
2959         else {
2960                 u32 mask;
2961                 /* Not yet been reconfigured */
2962
2963                 if (val & (1 << 23))
2964                         mask = FLASH_BACKUP_STRAP_MASK;
2965                 else
2966                         mask = FLASH_STRAP_MASK;
2967
2968                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2969                         j++, flash++) {
2970
2971                         if ((val & mask) == (flash->strapping & mask)) {
2972                                 bp->flash_info = flash;
2973
2974                                 /* Request access to the flash interface. */
2975                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2976                                         return rc;
2977
2978                                 /* Enable access to flash interface */
2979                                 bnx2_enable_nvram_access(bp);
2980
2981                                 /* Reconfigure the flash interface */
2982                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2983                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2984                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2985                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2986
2987                                 /* Disable access to flash interface */
2988                                 bnx2_disable_nvram_access(bp);
2989                                 bnx2_release_nvram_lock(bp);
2990
2991                                 break;
2992                         }
2993                 }
2994         } /* if (val & 0x40000000) */
2995
2996         if (j == entry_count) {
2997                 bp->flash_info = NULL;
2998                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2999                 return -ENODEV;
3000         }
3001
3002         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3003         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3004         if (val)
3005                 bp->flash_size = val;
3006         else
3007                 bp->flash_size = bp->flash_info->total_size;
3008
3009         return rc;
3010 }
3011
3012 static int
3013 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3014                 int buf_size)
3015 {
3016         int rc = 0;
3017         u32 cmd_flags, offset32, len32, extra;
3018
3019         if (buf_size == 0)
3020                 return 0;
3021
3022         /* Request access to the flash interface. */
3023         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3024                 return rc;
3025
3026         /* Enable access to flash interface */
3027         bnx2_enable_nvram_access(bp);
3028
3029         len32 = buf_size;
3030         offset32 = offset;
3031         extra = 0;
3032
3033         cmd_flags = 0;
3034
3035         if (offset32 & 3) {
3036                 u8 buf[4];
3037                 u32 pre_len;
3038
3039                 offset32 &= ~3;
3040                 pre_len = 4 - (offset & 3);
3041
3042                 if (pre_len >= len32) {
3043                         pre_len = len32;
3044                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3045                                     BNX2_NVM_COMMAND_LAST;
3046                 }
3047                 else {
3048                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3049                 }
3050
3051                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3052
3053                 if (rc)
3054                         return rc;
3055
3056                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3057
3058                 offset32 += 4;
3059                 ret_buf += pre_len;
3060                 len32 -= pre_len;
3061         }
3062         if (len32 & 3) {
3063                 extra = 4 - (len32 & 3);
3064                 len32 = (len32 + 4) & ~3;
3065         }
3066
3067         if (len32 == 4) {
3068                 u8 buf[4];
3069
3070                 if (cmd_flags)
3071                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3072                 else
3073                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3074                                     BNX2_NVM_COMMAND_LAST;
3075
3076                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3077
3078                 memcpy(ret_buf, buf, 4 - extra);
3079         }
3080         else if (len32 > 0) {
3081                 u8 buf[4];
3082
3083                 /* Read the first word. */
3084                 if (cmd_flags)
3085                         cmd_flags = 0;
3086                 else
3087                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3088
3089                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3090
3091                 /* Advance to the next dword. */
3092                 offset32 += 4;
3093                 ret_buf += 4;
3094                 len32 -= 4;
3095
3096                 while (len32 > 4 && rc == 0) {
3097                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3098
3099                         /* Advance to the next dword. */
3100                         offset32 += 4;
3101                         ret_buf += 4;
3102                         len32 -= 4;
3103                 }
3104
3105                 if (rc)
3106                         return rc;
3107
3108                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3109                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3110
3111                 memcpy(ret_buf, buf, 4 - extra);
3112         }
3113
3114         /* Disable access to flash interface */
3115         bnx2_disable_nvram_access(bp);
3116
3117         bnx2_release_nvram_lock(bp);
3118
3119         return rc;
3120 }
3121
3122 static int
3123 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3124                 int buf_size)
3125 {
3126         u32 written, offset32, len32;
3127         u8 *buf, start[4], end[4], *flash_buffer = NULL;
3128         int rc = 0;
3129         int align_start, align_end;
3130
3131         buf = data_buf;
3132         offset32 = offset;
3133         len32 = buf_size;
3134         align_start = align_end = 0;
3135
3136         if ((align_start = (offset32 & 3))) {
3137                 offset32 &= ~3;
3138                 len32 += align_start;
3139                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3140                         return rc;
3141         }
3142
3143         if (len32 & 3) {
3144                 if ((len32 > 4) || !align_start) {
3145                         align_end = 4 - (len32 & 3);
3146                         len32 += align_end;
3147                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3148                                 end, 4))) {
3149                                 return rc;
3150                         }
3151                 }
3152         }
3153
3154         if (align_start || align_end) {
3155                 buf = kmalloc(len32, GFP_KERNEL);
3156                 if (buf == 0)
3157                         return -ENOMEM;
3158                 if (align_start) {
3159                         memcpy(buf, start, 4);
3160                 }
3161                 if (align_end) {
3162                         memcpy(buf + len32 - 4, end, 4);
3163                 }
3164                 memcpy(buf + align_start, data_buf, buf_size);
3165         }
3166
3167         if (bp->flash_info->buffered == 0) {
3168                 flash_buffer = kmalloc(264, GFP_KERNEL);
3169                 if (flash_buffer == NULL) {
3170                         rc = -ENOMEM;
3171                         goto nvram_write_end;
3172                 }
3173         }
3174
3175         written = 0;
3176         while ((written < len32) && (rc == 0)) {
3177                 u32 page_start, page_end, data_start, data_end;
3178                 u32 addr, cmd_flags;
3179                 int i;
3180
3181                 /* Find the page_start addr */
3182                 page_start = offset32 + written;
3183                 page_start -= (page_start % bp->flash_info->page_size);
3184                 /* Find the page_end addr */
3185                 page_end = page_start + bp->flash_info->page_size;
3186                 /* Find the data_start addr */
3187                 data_start = (written == 0) ? offset32 : page_start;
3188                 /* Find the data_end addr */
3189                 data_end = (page_end > offset32 + len32) ? 
3190                         (offset32 + len32) : page_end;
3191
3192                 /* Request access to the flash interface. */
3193                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3194                         goto nvram_write_end;
3195
3196                 /* Enable access to flash interface */
3197                 bnx2_enable_nvram_access(bp);
3198
3199                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3200                 if (bp->flash_info->buffered == 0) {
3201                         int j;
3202
3203                         /* Read the whole page into the buffer
3204                          * (non-buffer flash only) */
3205                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3206                                 if (j == (bp->flash_info->page_size - 4)) {
3207                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3208                                 }
3209                                 rc = bnx2_nvram_read_dword(bp,
3210                                         page_start + j, 
3211                                         &flash_buffer[j], 
3212                                         cmd_flags);
3213
3214                                 if (rc)
3215                                         goto nvram_write_end;
3216
3217                                 cmd_flags = 0;
3218                         }
3219                 }
3220
3221                 /* Enable writes to flash interface (unlock write-protect) */
3222                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3223                         goto nvram_write_end;
3224
3225                 /* Erase the page */
3226                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3227                         goto nvram_write_end;
3228
3229                 /* Re-enable the write again for the actual write */
3230                 bnx2_enable_nvram_write(bp);
3231
3232                 /* Loop to write back the buffer data from page_start to
3233                  * data_start */
3234                 i = 0;
3235                 if (bp->flash_info->buffered == 0) {
3236                         for (addr = page_start; addr < data_start;
3237                                 addr += 4, i += 4) {
3238                                 
3239                                 rc = bnx2_nvram_write_dword(bp, addr,
3240                                         &flash_buffer[i], cmd_flags);
3241
3242                                 if (rc != 0)
3243                                         goto nvram_write_end;
3244
3245                                 cmd_flags = 0;
3246                         }
3247                 }
3248
3249                 /* Loop to write the new data from data_start to data_end */
3250                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3251                         if ((addr == page_end - 4) ||
3252                                 ((bp->flash_info->buffered) &&
3253                                  (addr == data_end - 4))) {
3254
3255                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3256                         }
3257                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3258                                 cmd_flags);
3259
3260                         if (rc != 0)
3261                                 goto nvram_write_end;
3262
3263                         cmd_flags = 0;
3264                         buf += 4;
3265                 }
3266
3267                 /* Loop to write back the buffer data from data_end
3268                  * to page_end */
3269                 if (bp->flash_info->buffered == 0) {
3270                         for (addr = data_end; addr < page_end;
3271                                 addr += 4, i += 4) {
3272                         
3273                                 if (addr == page_end-4) {
3274                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3275                                 }
3276                                 rc = bnx2_nvram_write_dword(bp, addr,
3277                                         &flash_buffer[i], cmd_flags);
3278
3279                                 if (rc != 0)
3280                                         goto nvram_write_end;
3281
3282                                 cmd_flags = 0;
3283                         }
3284                 }
3285
3286                 /* Disable writes to flash interface (lock write-protect) */
3287                 bnx2_disable_nvram_write(bp);
3288
3289                 /* Disable access to flash interface */
3290                 bnx2_disable_nvram_access(bp);
3291                 bnx2_release_nvram_lock(bp);
3292
3293                 /* Increment written */
3294                 written += data_end - data_start;
3295         }
3296
3297 nvram_write_end:
3298         if (bp->flash_info->buffered == 0)
3299                 kfree(flash_buffer);
3300
3301         if (align_start || align_end)
3302                 kfree(buf);
3303         return rc;
3304 }
3305
3306 static int
3307 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3308 {
3309         u32 val;
3310         int i, rc = 0;
3311
3312         /* Wait for the current PCI transaction to complete before
3313          * issuing a reset. */
3314         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3315                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3316                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3317                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3318                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3319         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3320         udelay(5);
3321
3322         /* Wait for the firmware to tell us it is ok to issue a reset. */
3323         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3324
3325         /* Deposit a driver reset signature so the firmware knows that
3326          * this is a soft reset. */
3327         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3328                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3329
3330         /* Do a dummy read to force the chip to complete all current transaction
3331          * before we issue a reset. */
3332         val = REG_RD(bp, BNX2_MISC_ID);
3333
3334         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3335               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3336               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3337
3338         /* Chip reset. */
3339         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3340
3341         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3342             (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3343                 current->state = TASK_UNINTERRUPTIBLE;
3344                 schedule_timeout(HZ / 50);
3345         }
3346
3347         /* Reset takes approximate 30 usec */
3348         for (i = 0; i < 10; i++) {
3349                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3350                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3351                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3352                         break;
3353                 }
3354                 udelay(10);
3355         }
3356
3357         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3358                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3359                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3360                 return -EBUSY;
3361         }
3362
3363         /* Make sure byte swapping is properly configured. */
3364         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3365         if (val != 0x01020304) {
3366                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3367                 return -ENODEV;
3368         }
3369
3370         /* Wait for the firmware to finish its initialization. */
3371         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3372         if (rc)
3373                 return rc;
3374
3375         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3376                 /* Adjust the voltage regular to two steps lower.  The default
3377                  * of this register is 0x0000000e. */
3378                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3379
3380                 /* Remove bad rbuf memory from the free pool. */
3381                 rc = bnx2_alloc_bad_rbuf(bp);
3382         }
3383
3384         return rc;
3385 }
3386
3387 static int
3388 bnx2_init_chip(struct bnx2 *bp)
3389 {
3390         u32 val;
3391         int rc;
3392
3393         /* Make sure the interrupt is not active. */
3394         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3395
3396         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3397               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3398 #ifdef __BIG_ENDIAN
3399               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP | 
3400 #endif
3401               BNX2_DMA_CONFIG_CNTL_WORD_SWAP | 
3402               DMA_READ_CHANS << 12 |
3403               DMA_WRITE_CHANS << 16;
3404
3405         val |= (0x2 << 20) | (1 << 11);
3406
3407         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3408                 val |= (1 << 23);
3409
3410         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3411             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3412                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3413
3414         REG_WR(bp, BNX2_DMA_CONFIG, val);
3415
3416         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3417                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3418                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3419                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3420         }
3421
3422         if (bp->flags & PCIX_FLAG) {
3423                 u16 val16;
3424
3425                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3426                                      &val16);
3427                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3428                                       val16 & ~PCI_X_CMD_ERO);
3429         }
3430
3431         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3432                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3433                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3434                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3435
3436         /* Initialize context mapping and zero out the quick contexts.  The
3437          * context block must have already been enabled. */
3438         bnx2_init_context(bp);
3439
3440         if ((rc = bnx2_init_cpus(bp)) != 0)
3441                 return rc;
3442
3443         bnx2_init_nvram(bp);
3444
3445         bnx2_set_mac_addr(bp);
3446
3447         val = REG_RD(bp, BNX2_MQ_CONFIG);
3448         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3449         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3450         REG_WR(bp, BNX2_MQ_CONFIG, val);
3451
3452         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3453         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3454         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3455
3456         val = (BCM_PAGE_BITS - 8) << 24;
3457         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3458
3459         /* Configure page size. */
3460         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3461         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3462         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3463         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3464
3465         val = bp->mac_addr[0] +
3466               (bp->mac_addr[1] << 8) +
3467               (bp->mac_addr[2] << 16) +
3468               bp->mac_addr[3] +
3469               (bp->mac_addr[4] << 8) +
3470               (bp->mac_addr[5] << 16);
3471         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3472
3473         /* Program the MTU.  Also include 4 bytes for CRC32. */
3474         val = bp->dev->mtu + ETH_HLEN + 4;
3475         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3476                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3477         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3478
3479         bp->last_status_idx = 0;
3480         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3481
3482         /* Set up how to generate a link change interrupt. */
3483         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3484
3485         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3486                (u64) bp->status_blk_mapping & 0xffffffff);
3487         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3488
3489         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3490                (u64) bp->stats_blk_mapping & 0xffffffff);
3491         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3492                (u64) bp->stats_blk_mapping >> 32);
3493
3494         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 
3495                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3496
3497         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3498                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3499
3500         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3501                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3502
3503         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3504
3505         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3506
3507         REG_WR(bp, BNX2_HC_COM_TICKS,
3508                (bp->com_ticks_int << 16) | bp->com_ticks);
3509
3510         REG_WR(bp, BNX2_HC_CMD_TICKS,
3511                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3512
3513         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3514         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3515
3516         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3517                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3518         else {
3519                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3520                        BNX2_HC_CONFIG_TX_TMR_MODE |
3521                        BNX2_HC_CONFIG_COLLECT_STATS);
3522         }
3523
3524         /* Clear internal stats counters. */
3525         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3526
3527         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3528
3529         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3530             BNX2_PORT_FEATURE_ASF_ENABLED)
3531                 bp->flags |= ASF_ENABLE_FLAG;
3532
3533         /* Initialize the receive filter. */
3534         bnx2_set_rx_mode(bp->dev);
3535
3536         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3537                           0);
3538
3539         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3540         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3541
3542         udelay(20);
3543
3544         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3545
3546         return rc;
3547 }
3548
3549
3550 static void
3551 bnx2_init_tx_ring(struct bnx2 *bp)
3552 {
3553         struct tx_bd *txbd;
3554         u32 val;
3555
3556         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3557                 
3558         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3559         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3560
3561         bp->tx_prod = 0;
3562         bp->tx_cons = 0;
3563         bp->hw_tx_cons = 0;
3564         bp->tx_prod_bseq = 0;
3565         
3566         val = BNX2_L2CTX_TYPE_TYPE_L2;
3567         val |= BNX2_L2CTX_TYPE_SIZE_L2;
3568         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3569
3570         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3571         val |= 8 << 16;
3572         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3573
3574         val = (u64) bp->tx_desc_mapping >> 32;
3575         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3576
3577         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3578         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3579 }
3580
3581 static void
3582 bnx2_init_rx_ring(struct bnx2 *bp)
3583 {
3584         struct rx_bd *rxbd;
3585         int i;
3586         u16 prod, ring_prod; 
3587         u32 val;
3588
3589         /* 8 for CRC and VLAN */
3590         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3591         /* 8 for alignment */
3592         bp->rx_buf_size = bp->rx_buf_use_size + 8;
3593
3594         ring_prod = prod = bp->rx_prod = 0;
3595         bp->rx_cons = 0;
3596         bp->hw_rx_cons = 0;
3597         bp->rx_prod_bseq = 0;
3598                 
3599         for (i = 0; i < bp->rx_max_ring; i++) {
3600                 int j;
3601
3602                 rxbd = &bp->rx_desc_ring[i][0];
3603                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3604                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3605                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3606                 }
3607                 if (i == (bp->rx_max_ring - 1))
3608                         j = 0;
3609                 else
3610                         j = i + 1;
3611                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3612                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3613                                        0xffffffff;
3614         }
3615
3616         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3617         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3618         val |= 0x02 << 8;
3619         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3620
3621         val = (u64) bp->rx_desc_mapping[0] >> 32;
3622         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3623
3624         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3625         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3626
3627         for (i = 0; i < bp->rx_ring_size; i++) {
3628                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3629                         break;
3630                 }
3631                 prod = NEXT_RX_BD(prod);
3632                 ring_prod = RX_RING_IDX(prod);
3633         }
3634         bp->rx_prod = prod;
3635
3636         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3637
3638         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3639 }
3640
3641 static void
3642 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3643 {
3644         u32 num_rings, max;
3645
3646         bp->rx_ring_size = size;
3647         num_rings = 1;
3648         while (size > MAX_RX_DESC_CNT) {
3649                 size -= MAX_RX_DESC_CNT;
3650                 num_rings++;
3651         }
3652         /* round to next power of 2 */
3653         max = MAX_RX_RINGS;
3654         while ((max & num_rings) == 0)
3655                 max >>= 1;
3656
3657         if (num_rings != max)
3658                 max <<= 1;
3659
3660         bp->rx_max_ring = max;
3661         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3662 }
3663
3664 static void
3665 bnx2_free_tx_skbs(struct bnx2 *bp)
3666 {
3667         int i;
3668
3669         if (bp->tx_buf_ring == NULL)
3670                 return;
3671
3672         for (i = 0; i < TX_DESC_CNT; ) {
3673                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3674                 struct sk_buff *skb = tx_buf->skb;
3675                 int j, last;
3676
3677                 if (skb == NULL) {
3678                         i++;
3679                         continue;
3680                 }
3681
3682                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3683                         skb_headlen(skb), PCI_DMA_TODEVICE);
3684
3685                 tx_buf->skb = NULL;
3686
3687                 last = skb_shinfo(skb)->nr_frags;
3688                 for (j = 0; j < last; j++) {
3689                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3690                         pci_unmap_page(bp->pdev,
3691                                 pci_unmap_addr(tx_buf, mapping),
3692                                 skb_shinfo(skb)->frags[j].size,
3693                                 PCI_DMA_TODEVICE);
3694                 }
3695                 dev_kfree_skb_any(skb);
3696                 i += j + 1;
3697         }
3698
3699 }
3700
3701 static void
3702 bnx2_free_rx_skbs(struct bnx2 *bp)
3703 {
3704         int i;
3705
3706         if (bp->rx_buf_ring == NULL)
3707                 return;
3708
3709         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3710                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3711                 struct sk_buff *skb = rx_buf->skb;
3712
3713                 if (skb == NULL)
3714                         continue;
3715
3716                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3717                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3718
3719                 rx_buf->skb = NULL;
3720
3721                 dev_kfree_skb_any(skb);
3722         }
3723 }
3724
3725 static void
3726 bnx2_free_skbs(struct bnx2 *bp)
3727 {
3728         bnx2_free_tx_skbs(bp);
3729         bnx2_free_rx_skbs(bp);
3730 }
3731
3732 static int
3733 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3734 {
3735         int rc;
3736
3737         rc = bnx2_reset_chip(bp, reset_code);
3738         bnx2_free_skbs(bp);
3739         if (rc)
3740                 return rc;
3741
3742         if ((rc = bnx2_init_chip(bp)) != 0)
3743                 return rc;
3744
3745         bnx2_init_tx_ring(bp);
3746         bnx2_init_rx_ring(bp);
3747         return 0;
3748 }
3749
3750 static int
3751 bnx2_init_nic(struct bnx2 *bp)
3752 {
3753         int rc;
3754
3755         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3756                 return rc;
3757
3758         bnx2_init_phy(bp);
3759         bnx2_set_link(bp);
3760         return 0;
3761 }
3762
3763 static int
3764 bnx2_test_registers(struct bnx2 *bp)
3765 {
3766         int ret;
3767         int i;
3768         static const struct {
3769                 u16   offset;
3770                 u16   flags;
3771                 u32   rw_mask;
3772                 u32   ro_mask;
3773         } reg_tbl[] = {
3774                 { 0x006c, 0, 0x00000000, 0x0000003f },
3775                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3776                 { 0x0094, 0, 0x00000000, 0x00000000 },
3777
3778                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3779                 { 0x0418, 0, 0x00000000, 0xffffffff },
3780                 { 0x041c, 0, 0x00000000, 0xffffffff },
3781                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3782                 { 0x0424, 0, 0x00000000, 0x00000000 },
3783                 { 0x0428, 0, 0x00000000, 0x00000001 },
3784                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3785                 { 0x0454, 0, 0x00000000, 0xffffffff },
3786                 { 0x0458, 0, 0x00000000, 0xffffffff },
3787
3788                 { 0x0808, 0, 0x00000000, 0xffffffff },
3789                 { 0x0854, 0, 0x00000000, 0xffffffff },
3790                 { 0x0868, 0, 0x00000000, 0x77777777 },
3791                 { 0x086c, 0, 0x00000000, 0x77777777 },
3792                 { 0x0870, 0, 0x00000000, 0x77777777 },
3793                 { 0x0874, 0, 0x00000000, 0x77777777 },
3794
3795                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3796                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3797                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3798
3799                 { 0x1000, 0, 0x00000000, 0x00000001 },
3800                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3801
3802                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3803                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3804                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3805                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3806                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3807                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3808                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3809                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3810                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3811                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3812
3813                 { 0x1800, 0, 0x00000000, 0x00000001 },
3814                 { 0x1804, 0, 0x00000000, 0x00000003 },
3815
3816                 { 0x2800, 0, 0x00000000, 0x00000001 },
3817                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3818                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3819                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3820                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3821                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3822                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3823                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3824                 { 0x2840, 0, 0x00000000, 0xffffffff },
3825                 { 0x2844, 0, 0x00000000, 0xffffffff },
3826                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3827                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3828
3829                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3830                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3831
3832                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3833                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3834                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3835                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3836                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3837                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3838                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3839                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3840                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3841
3842                 { 0x5004, 0, 0x00000000, 0x0000007f },
3843                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3844                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3845
3846                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3847                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3848                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3849                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3850                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3851                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3852                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3853                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3854                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3855
3856                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3857                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3858                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3859                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3860                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3861                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3862                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3863                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3864                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3865                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3866                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3867                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3868                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3869                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3870                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3871                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3872                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3873                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3874                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3875                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3876                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3877                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3878                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3879
3880                 { 0xffff, 0, 0x00000000, 0x00000000 },
3881         };
3882
3883         ret = 0;
3884         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3885                 u32 offset, rw_mask, ro_mask, save_val, val;
3886
3887                 offset = (u32) reg_tbl[i].offset;
3888                 rw_mask = reg_tbl[i].rw_mask;
3889                 ro_mask = reg_tbl[i].ro_mask;
3890
3891                 save_val = readl(bp->regview + offset);
3892
3893                 writel(0, bp->regview + offset);
3894
3895                 val = readl(bp->regview + offset);
3896                 if ((val & rw_mask) != 0) {
3897                         goto reg_test_err;
3898                 }
3899
3900                 if ((val & ro_mask) != (save_val & ro_mask)) {
3901                         goto reg_test_err;
3902                 }
3903
3904                 writel(0xffffffff, bp->regview + offset);
3905
3906                 val = readl(bp->regview + offset);
3907                 if ((val & rw_mask) != rw_mask) {
3908                         goto reg_test_err;
3909                 }
3910
3911                 if ((val & ro_mask) != (save_val & ro_mask)) {
3912                         goto reg_test_err;
3913                 }
3914
3915                 writel(save_val, bp->regview + offset);
3916                 continue;
3917
3918 reg_test_err:
3919                 writel(save_val, bp->regview + offset);
3920                 ret = -ENODEV;
3921                 break;
3922         }
3923         return ret;
3924 }
3925
3926 static int
3927 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3928 {
3929         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3930                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3931         int i;
3932
3933         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3934                 u32 offset;
3935
3936                 for (offset = 0; offset < size; offset += 4) {
3937
3938                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3939
3940                         if (REG_RD_IND(bp, start + offset) !=
3941                                 test_pattern[i]) {
3942                                 return -ENODEV;
3943                         }
3944                 }
3945         }
3946         return 0;
3947 }
3948
3949 static int
3950 bnx2_test_memory(struct bnx2 *bp)
3951 {
3952         int ret = 0;
3953         int i;
3954         static const struct {
3955                 u32   offset;
3956                 u32   len;
3957         } mem_tbl[] = {
3958                 { 0x60000,  0x4000 },
3959                 { 0xa0000,  0x3000 },
3960                 { 0xe0000,  0x4000 },
3961                 { 0x120000, 0x4000 },
3962                 { 0x1a0000, 0x4000 },
3963                 { 0x160000, 0x4000 },
3964                 { 0xffffffff, 0    },
3965         };
3966
3967         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3968                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3969                         mem_tbl[i].len)) != 0) {
3970                         return ret;
3971                 }
3972         }
3973         
3974         return ret;
3975 }
3976
3977 #define BNX2_MAC_LOOPBACK       0
3978 #define BNX2_PHY_LOOPBACK       1
3979
3980 static int
3981 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3982 {
3983         unsigned int pkt_size, num_pkts, i;
3984         struct sk_buff *skb, *rx_skb;
3985         unsigned char *packet;
3986         u16 rx_start_idx, rx_idx;
3987         dma_addr_t map;
3988         struct tx_bd *txbd;
3989         struct sw_bd *rx_buf;
3990         struct l2_fhdr *rx_hdr;
3991         int ret = -ENODEV;
3992
3993         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3994                 bp->loopback = MAC_LOOPBACK;
3995                 bnx2_set_mac_loopback(bp);
3996         }
3997         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3998                 bp->loopback = 0;
3999                 bnx2_set_phy_loopback(bp);
4000         }
4001         else
4002                 return -EINVAL;
4003
4004         pkt_size = 1514;
4005         skb = dev_alloc_skb(pkt_size);
4006         if (!skb)
4007                 return -ENOMEM;
4008         packet = skb_put(skb, pkt_size);
4009         memcpy(packet, bp->mac_addr, 6);
4010         memset(packet + 6, 0x0, 8);
4011         for (i = 14; i < pkt_size; i++)
4012                 packet[i] = (unsigned char) (i & 0xff);
4013
4014         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4015                 PCI_DMA_TODEVICE);
4016
4017         REG_WR(bp, BNX2_HC_COMMAND,
4018                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4019
4020         REG_RD(bp, BNX2_HC_COMMAND);
4021
4022         udelay(5);
4023         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4024
4025         num_pkts = 0;
4026
4027         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4028
4029         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4030         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4031         txbd->tx_bd_mss_nbytes = pkt_size;
4032         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4033
4034         num_pkts++;
4035         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4036         bp->tx_prod_bseq += pkt_size;
4037
4038         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
4039         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4040
4041         udelay(100);
4042
4043         REG_WR(bp, BNX2_HC_COMMAND,
4044                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4045
4046         REG_RD(bp, BNX2_HC_COMMAND);
4047
4048         udelay(5);
4049
4050         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4051         dev_kfree_skb(skb);
4052
4053         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4054                 goto loopback_test_done;
4055         }
4056
4057         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4058         if (rx_idx != rx_start_idx + num_pkts) {
4059                 goto loopback_test_done;
4060         }
4061
4062         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4063         rx_skb = rx_buf->skb;
4064
4065         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4066         skb_reserve(rx_skb, bp->rx_offset);
4067
4068         pci_dma_sync_single_for_cpu(bp->pdev,
4069                 pci_unmap_addr(rx_buf, mapping),
4070                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4071
4072         if (rx_hdr->l2_fhdr_status &
4073                 (L2_FHDR_ERRORS_BAD_CRC |
4074                 L2_FHDR_ERRORS_PHY_DECODE |
4075                 L2_FHDR_ERRORS_ALIGNMENT |
4076                 L2_FHDR_ERRORS_TOO_SHORT |
4077                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4078
4079                 goto loopback_test_done;
4080         }
4081
4082         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4083                 goto loopback_test_done;
4084         }
4085
4086         for (i = 14; i < pkt_size; i++) {
4087                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4088                         goto loopback_test_done;
4089                 }
4090         }
4091
4092         ret = 0;
4093
4094 loopback_test_done:
4095         bp->loopback = 0;
4096         return ret;
4097 }
4098
4099 #define BNX2_MAC_LOOPBACK_FAILED        1
4100 #define BNX2_PHY_LOOPBACK_FAILED        2
4101 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4102                                          BNX2_PHY_LOOPBACK_FAILED)
4103
4104 static int
4105 bnx2_test_loopback(struct bnx2 *bp)
4106 {
4107         int rc = 0;
4108
4109         if (!netif_running(bp->dev))
4110                 return BNX2_LOOPBACK_FAILED;
4111
4112         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4113         spin_lock_bh(&bp->phy_lock);
4114         bnx2_init_phy(bp);
4115         spin_unlock_bh(&bp->phy_lock);
4116         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4117                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4118         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4119                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4120         return rc;
4121 }
4122
4123 #define NVRAM_SIZE 0x200
4124 #define CRC32_RESIDUAL 0xdebb20e3
4125
4126 static int
4127 bnx2_test_nvram(struct bnx2 *bp)
4128 {
4129         u32 buf[NVRAM_SIZE / 4];
4130         u8 *data = (u8 *) buf;
4131         int rc = 0;
4132         u32 magic, csum;
4133
4134         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4135                 goto test_nvram_done;
4136
4137         magic = be32_to_cpu(buf[0]);
4138         if (magic != 0x669955aa) {
4139                 rc = -ENODEV;
4140                 goto test_nvram_done;
4141         }
4142
4143         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4144                 goto test_nvram_done;
4145
4146         csum = ether_crc_le(0x100, data);
4147         if (csum != CRC32_RESIDUAL) {
4148                 rc = -ENODEV;
4149                 goto test_nvram_done;
4150         }
4151
4152         csum = ether_crc_le(0x100, data + 0x100);
4153         if (csum != CRC32_RESIDUAL) {
4154                 rc = -ENODEV;
4155         }
4156
4157 test_nvram_done:
4158         return rc;
4159 }
4160
4161 static int
4162 bnx2_test_link(struct bnx2 *bp)
4163 {
4164         u32 bmsr;
4165
4166         spin_lock_bh(&bp->phy_lock);
4167         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4168         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4169         spin_unlock_bh(&bp->phy_lock);
4170                 
4171         if (bmsr & BMSR_LSTATUS) {
4172                 return 0;
4173         }
4174         return -ENODEV;
4175 }
4176
4177 static int
4178 bnx2_test_intr(struct bnx2 *bp)
4179 {
4180         int i;
4181         u16 status_idx;
4182
4183         if (!netif_running(bp->dev))
4184                 return -ENODEV;
4185
4186         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4187
4188         /* This register is not touched during run-time. */
4189         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4190         REG_RD(bp, BNX2_HC_COMMAND);
4191
4192         for (i = 0; i < 10; i++) {
4193                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4194                         status_idx) {
4195
4196                         break;
4197                 }
4198
4199                 current->state = TASK_INTERRUPTIBLE;
4200                 schedule_timeout(HZ / 100);
4201         }
4202         if (i < 10)
4203                 return 0;
4204
4205         return -ENODEV;
4206 }
4207
4208 static void
4209 bnx2_timer(unsigned long data)
4210 {
4211         struct bnx2 *bp = (struct bnx2 *) data;
4212         u32 msg;
4213
4214         if (!netif_running(bp->dev))
4215                 return;
4216
4217         if (atomic_read(&bp->intr_sem) != 0)
4218                 goto bnx2_restart_timer;
4219
4220         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4221         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4222
4223         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4224
4225         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4226             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4227
4228                 spin_lock(&bp->phy_lock);
4229                 if (bp->serdes_an_pending) {
4230                         bp->serdes_an_pending--;
4231                 }
4232                 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4233                         u32 bmcr;
4234
4235                         bp->current_interval = bp->timer_interval;
4236
4237                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4238
4239                         if (bmcr & BMCR_ANENABLE) {
4240                                 u32 phy1, phy2;
4241
4242                                 bnx2_write_phy(bp, 0x1c, 0x7c00);
4243                                 bnx2_read_phy(bp, 0x1c, &phy1);
4244
4245                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4246                                 bnx2_read_phy(bp, 0x15, &phy2);
4247                                 bnx2_write_phy(bp, 0x17, 0x0f01);
4248                                 bnx2_read_phy(bp, 0x15, &phy2);
4249
4250                                 if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4251                                         !(phy2 & 0x20)) {       /* no CONFIG */
4252
4253                                         bmcr &= ~BMCR_ANENABLE;
4254                                         bmcr |= BMCR_SPEED1000 |
4255                                                 BMCR_FULLDPLX;
4256                                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4257                                         bp->phy_flags |=
4258                                                 PHY_PARALLEL_DETECT_FLAG;
4259                                 }
4260                         }
4261                 }
4262                 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4263                         (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4264                         u32 phy2;
4265
4266                         bnx2_write_phy(bp, 0x17, 0x0f01);
4267                         bnx2_read_phy(bp, 0x15, &phy2);
4268                         if (phy2 & 0x20) {
4269                                 u32 bmcr;
4270
4271                                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4272                                 bmcr |= BMCR_ANENABLE;
4273                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4274
4275                                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4276
4277                         }
4278                 }
4279                 else
4280                         bp->current_interval = bp->timer_interval;
4281
4282                 spin_unlock(&bp->phy_lock);
4283         }
4284
4285 bnx2_restart_timer:
4286         mod_timer(&bp->timer, jiffies + bp->current_interval);
4287 }
4288
4289 /* Called with rtnl_lock */
4290 static int
4291 bnx2_open(struct net_device *dev)
4292 {
4293         struct bnx2 *bp = netdev_priv(dev);
4294         int rc;
4295
4296         bnx2_set_power_state(bp, PCI_D0);
4297         bnx2_disable_int(bp);
4298
4299         rc = bnx2_alloc_mem(bp);
4300         if (rc)
4301                 return rc;
4302
4303 #ifdef CONFIG_PCI_MSI
4304         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4305                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4306                 !disable_msi) {
4307
4308                 if (pci_enable_msi(bp->pdev) == 0) {
4309                         bp->flags |= USING_MSI_FLAG;
4310                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4311                                         dev);
4312                 }
4313                 else {
4314                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4315                                         SA_SHIRQ, dev->name, dev);
4316                 }
4317         }
4318         else
4319 #endif
4320         {
4321                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4322                                 dev->name, dev);
4323         }
4324         if (rc) {
4325                 bnx2_free_mem(bp);
4326                 return rc;
4327         }
4328
4329         rc = bnx2_init_nic(bp);
4330
4331         if (rc) {
4332                 free_irq(bp->pdev->irq, dev);
4333 #ifdef CONFIG_PCI_MSI
4334                 if (bp->flags & USING_MSI_FLAG) {
4335                         pci_disable_msi(bp->pdev);
4336                         bp->flags &= ~USING_MSI_FLAG;
4337                 }
4338 #endif
4339                 bnx2_free_skbs(bp);
4340                 bnx2_free_mem(bp);
4341                 return rc;
4342         }
4343         
4344         mod_timer(&bp->timer, jiffies + bp->current_interval);
4345
4346         atomic_set(&bp->intr_sem, 0);
4347
4348         bnx2_enable_int(bp);
4349
4350 #ifdef CONFIG_PCI_MSI
4351         if (bp->flags & USING_MSI_FLAG) {
4352                 /* Test MSI to make sure it is working
4353                  * If MSI test fails, go back to INTx mode
4354                  */
4355                 if (bnx2_test_intr(bp) != 0) {
4356                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4357                                " using MSI, switching to INTx mode. Please"
4358                                " report this failure to the PCI maintainer"
4359                                " and include system chipset information.\n",
4360                                bp->dev->name);
4361
4362                         bnx2_disable_int(bp);
4363                         free_irq(bp->pdev->irq, dev);
4364                         pci_disable_msi(bp->pdev);
4365                         bp->flags &= ~USING_MSI_FLAG;
4366
4367                         rc = bnx2_init_nic(bp);
4368
4369                         if (!rc) {
4370                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4371                                         SA_SHIRQ, dev->name, dev);
4372                         }
4373                         if (rc) {
4374                                 bnx2_free_skbs(bp);
4375                                 bnx2_free_mem(bp);
4376                                 del_timer_sync(&bp->timer);
4377                                 return rc;
4378                         }
4379                         bnx2_enable_int(bp);
4380                 }
4381         }
4382         if (bp->flags & USING_MSI_FLAG) {
4383                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4384         }
4385 #endif
4386
4387         netif_start_queue(dev);
4388
4389         return 0;
4390 }
4391
4392 static void
4393 bnx2_reset_task(void *data)
4394 {
4395         struct bnx2 *bp = data;
4396
4397         if (!netif_running(bp->dev))
4398                 return;
4399
4400         bp->in_reset_task = 1;
4401         bnx2_netif_stop(bp);
4402
4403         bnx2_init_nic(bp);
4404
4405         atomic_set(&bp->intr_sem, 1);
4406         bnx2_netif_start(bp);
4407         bp->in_reset_task = 0;
4408 }
4409
4410 static void
4411 bnx2_tx_timeout(struct net_device *dev)
4412 {
4413         struct bnx2 *bp = netdev_priv(dev);
4414
4415         /* This allows the netif to be shutdown gracefully before resetting */
4416 #if (LINUX_VERSION_CODE >= 0x20600)
4417         schedule_work(&bp->reset_task);
4418 #else
4419         schedule_task(&bp->reset_task);
4420 #endif
4421 }
4422
4423 #ifdef BCM_VLAN
4424 /* Called with rtnl_lock */
4425 static void
4426 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4427 {
4428         struct bnx2 *bp = netdev_priv(dev);
4429
4430         bnx2_netif_stop(bp);
4431
4432         bp->vlgrp = vlgrp;
4433         bnx2_set_rx_mode(dev);
4434
4435         bnx2_netif_start(bp);
4436 }
4437
4438 /* Called with rtnl_lock */
4439 static void
4440 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4441 {
4442         struct bnx2 *bp = netdev_priv(dev);
4443
4444         bnx2_netif_stop(bp);
4445
4446         if (bp->vlgrp)
4447                 bp->vlgrp->vlan_devices[vid] = NULL;
4448         bnx2_set_rx_mode(dev);
4449
4450         bnx2_netif_start(bp);
4451 }
4452 #endif
4453
4454 /* Test for DMA addresses > 40-bit.
4455  * Only 64-bit systems without IOMMU requires DMA address checking.
4456  */
4457 static inline int bnx2_40bit_overflow_test(struct bnx2 *bp, dma_addr_t mapping,
4458                                            int len)
4459 {
4460 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4461         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4462                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4463         return 0;
4464 #else
4465         return 0;
4466 #endif
4467 }
4468
4469 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4470 /* Workaround 40-bit hardware DMA bugs. */
4471 static int bnx2_dma_hwbug_workaround(struct bnx2 *bp, struct sk_buff **skb,
4472                                      u16 *last_plus_one, u32 base_flags,
4473                                      u32 mss)
4474 {
4475         struct sk_buff *new_skb = skb_copy(*skb, GFP_ATOMIC);
4476         dma_addr_t new_addr = 0;
4477         int i, ret = 0;
4478         u16 hw_prod = *last_plus_one;
4479         u16 start, hw_start, prod;
4480
4481         prod = TX_RING_IDX(hw_prod);
4482         start = prod - 1 - skb_shinfo(*skb)->nr_frags;
4483         hw_start = hw_prod - 1 - skb_shinfo(*skb)->nr_frags;
4484         start &= MAX_TX_DESC_CNT;
4485         if (start > prod) {
4486                 start--;
4487                 hw_start--;
4488         }
4489
4490         if (!new_skb) {
4491                 ret = -1;
4492         } else {
4493                 struct tx_bd *txbd;
4494
4495                 /* New SKB is guaranteed to be linear. */
4496                 new_addr = pci_map_single(bp->pdev, new_skb->data, new_skb->len,
4497                                           PCI_DMA_TODEVICE);
4498                 txbd = &bp->tx_desc_ring[start];
4499
4500                 txbd->tx_bd_haddr_hi = (u64) new_addr >> 32;
4501                 txbd->tx_bd_haddr_lo = (u64) new_addr & 0xffffffff;
4502                 txbd->tx_bd_mss_nbytes = new_skb->len | (mss << 16);
4503                 txbd->tx_bd_vlan_tag_flags = base_flags | TX_BD_FLAGS_START |
4504                                              TX_BD_FLAGS_END;
4505
4506                 *last_plus_one = NEXT_TX_BD(hw_start);
4507         }
4508
4509         /* Now clean up the sw ring entries. */
4510         i = 0;
4511         while (start != prod) {
4512                 int len;
4513
4514                 if (i == 0)
4515                         len = skb_headlen(*skb);
4516                 else
4517                         len = skb_shinfo(*skb)->frags[i-1].size;
4518
4519                 pci_unmap_single(bp->pdev,
4520                                  pci_unmap_addr(&tp->tx_buf_ring[start],
4521                                                 mapping),
4522                                  len, PCI_DMA_TODEVICE);
4523                 if (i == 0) {
4524                         bp->tx_buf_ring[start].skb = new_skb;
4525                         pci_unmap_addr_set(&bp->tx_buf_ring[start], mapping,
4526                                            new_addr);
4527                 }
4528                 hw_start = NEXT_TX_BD(hw_start);
4529                 start = TX_RING_IDX(hw_start);
4530                 i++;
4531         }
4532
4533         dev_kfree_skb(*skb);
4534
4535         *skb = new_skb;
4536
4537         return ret;
4538 }
4539 #endif
4540
4541 /* Called with netif_tx_lock.
4542  * hard_start_xmit is pseudo-lockless - a lock is only required when
4543  * the tx queue is full. This way, we get the benefit of lockless
4544  * operations most of the time without the complexities to handle
4545  * netif_stop_queue/wake_queue race conditions.
4546  */
4547 static int
4548 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4549 {
4550         struct bnx2 *bp = netdev_priv(dev);
4551         dma_addr_t mapping;
4552         struct tx_bd *txbd;
4553         struct sw_bd *tx_buf;
4554         u32 len, vlan_tag_flags, last_frag, mss;
4555         u16 prod, ring_prod;
4556         int i, would_hit_hwbug = 0;
4557
4558         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4559                 netif_stop_queue(dev);
4560                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4561                         dev->name);
4562
4563                 return NETDEV_TX_BUSY;
4564         }
4565         len = skb_headlen(skb);
4566         prod = bp->tx_prod;
4567         ring_prod = TX_RING_IDX(prod);
4568
4569         vlan_tag_flags = 0;
4570         if (skb->ip_summed == CHECKSUM_HW) {
4571                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4572         }
4573
4574 #ifdef BCM_VLAN
4575         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4576                 vlan_tag_flags |=
4577                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4578         }
4579 #endif
4580 #ifdef BCM_TSO 
4581         if ((mss = skb_shinfo(skb)->gso_size) &&
4582                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4583                 u32 tcp_opt_len, ip_tcp_len;
4584
4585 #if (LINUX_VERSION_CODE > 0x2060b)
4586                 if (skb_header_cloned(skb) &&
4587                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4588                         dev_kfree_skb(skb);
4589                         return NETDEV_TX_OK;
4590                 }
4591 #endif
4592
4593                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4594                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4595
4596                 tcp_opt_len = 0;
4597                 if (skb->h.th->doff > 5) {
4598                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4599                 }
4600                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4601
4602                 skb->nh.iph->check = 0;
4603                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4604                 skb->h.th->check =
4605                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4606                                             skb->nh.iph->daddr,
4607                                             0, IPPROTO_TCP, 0);
4608
4609                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4610                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4611                                 (tcp_opt_len >> 2)) << 8;
4612                 }
4613         }
4614         else
4615 #endif
4616         {
4617                 mss = 0;
4618         }
4619
4620         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4621         
4622         tx_buf = &bp->tx_buf_ring[ring_prod];
4623         tx_buf->skb = skb;
4624         pci_unmap_addr_set(tx_buf, mapping, mapping);
4625
4626         txbd = &bp->tx_desc_ring[ring_prod];
4627
4628         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4629         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4630         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4631         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4632
4633         last_frag = skb_shinfo(skb)->nr_frags;
4634
4635         for (i = 0; i < last_frag; i++) {
4636                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4637
4638                 prod = NEXT_TX_BD(prod);
4639                 ring_prod = TX_RING_IDX(prod);
4640                 txbd = &bp->tx_desc_ring[ring_prod];
4641
4642                 len = frag->size;
4643                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4644                         len, PCI_DMA_TODEVICE);
4645                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4646                                 mapping, mapping);
4647
4648                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4649                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4650                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4651                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4652
4653                 if (bnx2_40bit_overflow_test(bp, mapping, len))
4654                         would_hit_hwbug = 1;
4655         }
4656
4657         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4658
4659         prod = NEXT_TX_BD(prod);
4660
4661 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4662         if (unlikely(would_hit_hwbug)) {
4663                 /* If the workaround fails due to memory/mapping
4664                  * failure, silently drop this packet.
4665                  */
4666                 if (bnx2_dma_hwbug_workaround(bp, &skb, &prod,
4667                                               vlan_tag_flags, mss))
4668                         return NETDEV_TX_OK;
4669
4670         }
4671 #endif
4672
4673         bp->tx_prod_bseq += skb->len;
4674
4675         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4676         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4677
4678         mmiowb();
4679
4680         bp->tx_prod = prod;
4681         dev->trans_start = jiffies;
4682
4683         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4684                 spin_lock(&bp->tx_lock);
4685                 netif_stop_queue(dev);
4686                 
4687                 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4688                         netif_wake_queue(dev);
4689                 spin_unlock(&bp->tx_lock);
4690         }
4691
4692         return NETDEV_TX_OK;
4693 }
4694
4695 /* Called with rtnl_lock */
4696 static int
4697 bnx2_close(struct net_device *dev)
4698 {
4699         struct bnx2 *bp = netdev_priv(dev);
4700         u32 reset_code;
4701
4702         /* Calling flush_scheduled_work() may deadlock because
4703          * linkwatch_event() may be on the workqueue and it will try to get
4704          * the rtnl_lock which we are holding.
4705          */
4706         while (bp->in_reset_task) {
4707                 current->state = TASK_UNINTERRUPTIBLE;
4708                 schedule_timeout(1);
4709         }
4710
4711         bnx2_netif_stop(bp);
4712         del_timer_sync(&bp->timer);
4713         if (bp->flags & NO_WOL_FLAG)
4714                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4715         else if (bp->wol)
4716                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4717         else
4718                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4719         bnx2_reset_chip(bp, reset_code);
4720         free_irq(bp->pdev->irq, dev);
4721 #ifdef CONFIG_PCI_MSI
4722         if (bp->flags & USING_MSI_FLAG) {
4723                 pci_disable_msi(bp->pdev);
4724                 bp->flags &= ~USING_MSI_FLAG;
4725         }
4726 #endif
4727         bnx2_free_skbs(bp);
4728         bnx2_free_mem(bp);
4729         bp->link_up = 0;
4730         netif_carrier_off(bp->dev);
4731         bnx2_set_power_state(bp, PCI_D3hot);
4732         return 0;
4733 }
4734
4735 #define GET_NET_STATS64(ctr)                                    \
4736         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4737         (unsigned long) (ctr##_lo)
4738
4739 #define GET_NET_STATS32(ctr)            \
4740         (ctr##_lo)
4741
4742 #if (BITS_PER_LONG == 64)
4743 #define GET_NET_STATS   GET_NET_STATS64
4744 #else
4745 #define GET_NET_STATS   GET_NET_STATS32
4746 #endif
4747
4748 static struct net_device_stats *
4749 bnx2_get_stats(struct net_device *dev)
4750 {
4751         struct bnx2 *bp = netdev_priv(dev);
4752         struct statistics_block *stats_blk = bp->stats_blk;
4753         struct net_device_stats *net_stats = &bp->net_stats;
4754
4755         if (bp->stats_blk == NULL) {
4756                 return net_stats;
4757         }
4758         net_stats->rx_packets =
4759                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4760                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4761                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4762
4763         net_stats->tx_packets =
4764                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4765                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4766                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4767
4768         net_stats->rx_bytes =
4769                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4770
4771         net_stats->tx_bytes =
4772                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4773
4774         net_stats->multicast = 
4775                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4776
4777         net_stats->collisions = 
4778                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4779
4780         net_stats->rx_length_errors = 
4781                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4782                 stats_blk->stat_EtherStatsOverrsizePkts);
4783
4784         net_stats->rx_over_errors = 
4785                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4786
4787         net_stats->rx_frame_errors = 
4788                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4789
4790         net_stats->rx_crc_errors = 
4791                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4792
4793         net_stats->rx_errors = net_stats->rx_length_errors +
4794                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4795                 net_stats->rx_crc_errors;
4796
4797         net_stats->tx_aborted_errors =
4798                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4799                 stats_blk->stat_Dot3StatsLateCollisions);
4800
4801         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4802             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4803                 net_stats->tx_carrier_errors = 0;
4804         else {
4805                 net_stats->tx_carrier_errors =
4806                         (unsigned long)
4807                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4808         }
4809
4810         net_stats->tx_errors =
4811                 (unsigned long) 
4812                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4813                 +
4814                 net_stats->tx_aborted_errors +
4815                 net_stats->tx_carrier_errors;
4816
4817         net_stats->rx_missed_errors =
4818                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4819                 stats_blk->stat_FwRxDrop);
4820
4821         return net_stats;
4822 }
4823
4824 /* All ethtool functions called with rtnl_lock */
4825
4826 static int
4827 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4828 {
4829         struct bnx2 *bp = netdev_priv(dev);
4830
4831         cmd->supported = SUPPORTED_Autoneg;
4832         if (bp->phy_flags & PHY_SERDES_FLAG) {
4833                 cmd->supported |= SUPPORTED_1000baseT_Full |
4834                         SUPPORTED_FIBRE;
4835
4836                 cmd->port = PORT_FIBRE;
4837         }
4838         else {
4839                 cmd->supported |= SUPPORTED_10baseT_Half |
4840                         SUPPORTED_10baseT_Full |
4841                         SUPPORTED_100baseT_Half |
4842                         SUPPORTED_100baseT_Full |
4843                         SUPPORTED_1000baseT_Full |
4844                         SUPPORTED_TP;
4845
4846                 cmd->port = PORT_TP;
4847         }
4848
4849         cmd->advertising = bp->advertising;
4850
4851         if (bp->autoneg & AUTONEG_SPEED) {
4852                 cmd->autoneg = AUTONEG_ENABLE;
4853         }
4854         else {
4855                 cmd->autoneg = AUTONEG_DISABLE;
4856         }
4857
4858         if (netif_carrier_ok(dev)) {
4859                 cmd->speed = bp->line_speed;
4860                 cmd->duplex = bp->duplex;
4861         }
4862         else {
4863                 cmd->speed = -1;
4864                 cmd->duplex = -1;
4865         }
4866
4867         cmd->transceiver = XCVR_INTERNAL;
4868         cmd->phy_address = bp->phy_addr;
4869
4870         return 0;
4871 }
4872   
4873 static int
4874 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4875 {
4876         struct bnx2 *bp = netdev_priv(dev);
4877         u8 autoneg = bp->autoneg;
4878         u8 req_duplex = bp->req_duplex;
4879         u16 req_line_speed = bp->req_line_speed;
4880         u32 advertising = bp->advertising;
4881
4882         if (cmd->autoneg == AUTONEG_ENABLE) {
4883                 autoneg |= AUTONEG_SPEED;
4884
4885                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED; 
4886
4887                 /* allow advertising 1 speed */
4888                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4889                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4890                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4891                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4892
4893                         if (bp->phy_flags & PHY_SERDES_FLAG)
4894                                 return -EINVAL;
4895
4896                         advertising = cmd->advertising;
4897
4898                 }
4899                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4900                         advertising = cmd->advertising;
4901                 }
4902                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4903                         return -EINVAL;
4904                 }
4905                 else {
4906                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4907                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4908                         }
4909                         else {
4910                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4911                         }
4912                 }
4913                 advertising |= ADVERTISED_Autoneg;
4914         }
4915         else {
4916                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4917                         if ((cmd->speed != SPEED_1000) ||
4918                                 (cmd->duplex != DUPLEX_FULL)) {
4919                                 return -EINVAL;
4920                         }
4921                 }
4922                 else if (cmd->speed == SPEED_1000) {
4923                         return -EINVAL;
4924                 }
4925                 autoneg &= ~AUTONEG_SPEED;
4926                 req_line_speed = cmd->speed;
4927                 req_duplex = cmd->duplex;
4928                 advertising = 0;
4929         }
4930
4931         bp->autoneg = autoneg;
4932         bp->advertising = advertising;
4933         bp->req_line_speed = req_line_speed;
4934         bp->req_duplex = req_duplex;
4935
4936         spin_lock_bh(&bp->phy_lock);
4937
4938         bnx2_setup_phy(bp);
4939
4940         spin_unlock_bh(&bp->phy_lock);
4941
4942         return 0;
4943 }
4944
4945 static void
4946 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4947 {
4948         struct bnx2 *bp = netdev_priv(dev);
4949
4950         strcpy(info->driver, DRV_MODULE_NAME);
4951         strcpy(info->version, DRV_MODULE_VERSION);
4952         strcpy(info->bus_info, pci_name(bp->pdev));
4953         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4954         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4955         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4956         info->fw_version[1] = info->fw_version[3] = '.';
4957         info->fw_version[5] = 0;
4958 }
4959
4960 #define BNX2_REGDUMP_LEN                (32 * 1024)
4961
4962 static int
4963 bnx2_get_regs_len(struct net_device *dev)
4964 {
4965         return BNX2_REGDUMP_LEN;
4966 }
4967
4968 static void
4969 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4970 {
4971         u32 *p = _p, i, offset;
4972         u8 *orig_p = _p;
4973         struct bnx2 *bp = netdev_priv(dev);
4974         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4975                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4976                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4977                                  0x1040, 0x1048, 0x1080, 0x10a4,
4978                                  0x1400, 0x1490, 0x1498, 0x14f0,
4979                                  0x1500, 0x155c, 0x1580, 0x15dc,
4980                                  0x1600, 0x1658, 0x1680, 0x16d8,
4981                                  0x1800, 0x1820, 0x1840, 0x1854,
4982                                  0x1880, 0x1894, 0x1900, 0x1984,
4983                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4984                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4985                                  0x2000, 0x2030, 0x23c0, 0x2400,
4986                                  0x2800, 0x2820, 0x2830, 0x2850,
4987                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4988                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4989                                  0x4080, 0x4090, 0x43c0, 0x4458,
4990                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4991                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4992                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4993                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4994                                  0x6800, 0x6848, 0x684c, 0x6860,
4995                                  0x6888, 0x6910, 0x8000 };
4996
4997         regs->version = 0;
4998
4999         memset(p, 0, BNX2_REGDUMP_LEN);
5000
5001         if (!netif_running(bp->dev))
5002                 return;
5003
5004         i = 0;
5005         offset = reg_boundaries[0];
5006         p += offset;
5007         while (offset < BNX2_REGDUMP_LEN) {
5008                 *p++ = REG_RD(bp, offset);
5009                 offset += 4;
5010                 if (offset == reg_boundaries[i + 1]) {
5011                         offset = reg_boundaries[i + 2];
5012                         p = (u32 *) (orig_p + offset);
5013                         i += 2;
5014                 }
5015         }
5016 }
5017
5018 static void
5019 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5020 {
5021         struct bnx2 *bp = netdev_priv(dev);
5022
5023         if (bp->flags & NO_WOL_FLAG) {
5024                 wol->supported = 0;
5025                 wol->wolopts = 0;
5026         }
5027         else {
5028                 wol->supported = WAKE_MAGIC;
5029                 if (bp->wol)
5030                         wol->wolopts = WAKE_MAGIC;
5031                 else
5032                         wol->wolopts = 0;
5033         }
5034         memset(&wol->sopass, 0, sizeof(wol->sopass));
5035 }
5036
5037 static int
5038 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5039 {
5040         struct bnx2 *bp = netdev_priv(dev);
5041
5042         if (wol->wolopts & ~WAKE_MAGIC)
5043                 return -EINVAL;
5044
5045         if (wol->wolopts & WAKE_MAGIC) {
5046                 if (bp->flags & NO_WOL_FLAG)
5047                         return -EINVAL;
5048
5049                 bp->wol = 1;
5050         }
5051         else {
5052                 bp->wol = 0;
5053         }
5054         return 0;
5055 }
5056
5057 static int
5058 bnx2_nway_reset(struct net_device *dev)
5059 {
5060         struct bnx2 *bp = netdev_priv(dev);
5061         u32 bmcr;
5062
5063         if (!(bp->autoneg & AUTONEG_SPEED)) {
5064                 return -EINVAL;
5065         }
5066
5067         spin_lock_bh(&bp->phy_lock);
5068
5069         /* Force a link down visible on the other side */
5070         if (bp->phy_flags & PHY_SERDES_FLAG) {
5071                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
5072                 spin_unlock_bh(&bp->phy_lock);
5073
5074                 current->state = TASK_UNINTERRUPTIBLE;
5075                 schedule_timeout(HZ / 50);
5076
5077                 spin_lock_bh(&bp->phy_lock);
5078                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
5079                         bp->current_interval = SERDES_AN_TIMEOUT;
5080                         bp->serdes_an_pending = 1;
5081                         mod_timer(&bp->timer, jiffies + bp->current_interval);
5082                 }
5083         }
5084
5085         bnx2_read_phy(bp, MII_BMCR, &bmcr);
5086         bmcr &= ~BMCR_LOOPBACK;
5087         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5088
5089         spin_unlock_bh(&bp->phy_lock);
5090
5091         return 0;
5092 }
5093
5094 #if (LINUX_VERSION_CODE >= 0x20418)
5095 static int
5096 bnx2_get_eeprom_len(struct net_device *dev)
5097 {
5098         struct bnx2 *bp = netdev_priv(dev);
5099
5100         if (bp->flash_info == NULL)
5101                 return 0;
5102
5103         return (int) bp->flash_size;
5104 }
5105 #endif
5106
5107 #ifdef ETHTOOL_GEEPROM
5108 static int
5109 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5110                 u8 *eebuf)
5111 {
5112         struct bnx2 *bp = netdev_priv(dev);
5113         int rc;
5114
5115         /* parameters already validated in ethtool_get_eeprom */
5116
5117         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5118
5119         return rc;
5120 }
5121 #endif
5122
5123 #ifdef ETHTOOL_SEEPROM
5124 static int
5125 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5126                 u8 *eebuf)
5127 {
5128         struct bnx2 *bp = netdev_priv(dev);
5129         int rc;
5130
5131         /* parameters already validated in ethtool_set_eeprom */
5132
5133         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5134
5135         return rc;
5136 }
5137 #endif
5138
5139 static int
5140 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5141 {
5142         struct bnx2 *bp = netdev_priv(dev);
5143
5144         memset(coal, 0, sizeof(struct ethtool_coalesce));
5145
5146         coal->rx_coalesce_usecs = bp->rx_ticks;
5147         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5148         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5149         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5150
5151         coal->tx_coalesce_usecs = bp->tx_ticks;
5152         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5153         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5154         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5155
5156         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5157
5158         return 0;
5159 }
5160
5161 static int
5162 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5163 {
5164         struct bnx2 *bp = netdev_priv(dev);
5165
5166         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5167         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5168
5169         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames; 
5170         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5171
5172         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5173         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5174
5175         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5176         if (bp->rx_quick_cons_trip_int > 0xff)
5177                 bp->rx_quick_cons_trip_int = 0xff;
5178
5179         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5180         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5181
5182         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5183         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5184
5185         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5186         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5187
5188         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5189         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5190                 0xff;
5191
5192         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5193         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5194         bp->stats_ticks &= 0xffff00;
5195
5196         if (netif_running(bp->dev)) {
5197                 bnx2_netif_stop(bp);
5198                 bnx2_init_nic(bp);
5199                 bnx2_netif_start(bp);
5200         }
5201
5202         return 0;
5203 }
5204
5205 static void
5206 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5207 {
5208         struct bnx2 *bp = netdev_priv(dev);
5209
5210         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5211         ering->rx_mini_max_pending = 0;
5212         ering->rx_jumbo_max_pending = 0;
5213
5214         ering->rx_pending = bp->rx_ring_size;
5215         ering->rx_mini_pending = 0;
5216         ering->rx_jumbo_pending = 0;
5217
5218         ering->tx_max_pending = MAX_TX_DESC_CNT;
5219         ering->tx_pending = bp->tx_ring_size;
5220 }
5221
5222 static int
5223 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5224 {
5225         struct bnx2 *bp = netdev_priv(dev);
5226
5227         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5228                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5229                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5230
5231                 return -EINVAL;
5232         }
5233         if (netif_running(bp->dev)) {
5234                 bnx2_netif_stop(bp);
5235                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5236                 bnx2_free_skbs(bp);
5237                 bnx2_free_mem(bp);
5238         }
5239
5240         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5241         bp->tx_ring_size = ering->tx_pending;
5242
5243         if (netif_running(bp->dev)) {
5244                 int rc;
5245
5246                 rc = bnx2_alloc_mem(bp);
5247                 if (rc)
5248                         return rc;
5249                 bnx2_init_nic(bp);
5250                 bnx2_netif_start(bp);
5251         }
5252
5253         return 0;
5254 }
5255
5256 static void
5257 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5258 {
5259         struct bnx2 *bp = netdev_priv(dev);
5260
5261         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5262         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5263         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5264 }
5265
5266 static int
5267 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5268 {
5269         struct bnx2 *bp = netdev_priv(dev);
5270
5271         bp->req_flow_ctrl = 0;
5272         if (epause->rx_pause)
5273                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5274         if (epause->tx_pause)
5275                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5276
5277         if (epause->autoneg) {
5278                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5279         }
5280         else {
5281                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5282         }
5283
5284         spin_lock_bh(&bp->phy_lock);
5285
5286         bnx2_setup_phy(bp);
5287
5288         spin_unlock_bh(&bp->phy_lock);
5289
5290         return 0;
5291 }
5292
5293 static u32
5294 bnx2_get_rx_csum(struct net_device *dev)
5295 {
5296         struct bnx2 *bp = netdev_priv(dev);
5297
5298         return bp->rx_csum;
5299 }
5300
5301 static int
5302 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5303 {
5304         struct bnx2 *bp = netdev_priv(dev);
5305
5306         bp->rx_csum = data;
5307         return 0;
5308 }
5309
5310 #define BNX2_NUM_STATS 46
5311
5312 static struct {
5313         char string[ETH_GSTRING_LEN];
5314 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5315         { "rx_bytes" },
5316         { "rx_error_bytes" },
5317         { "tx_bytes" },
5318         { "tx_error_bytes" },
5319         { "rx_ucast_packets" },
5320         { "rx_mcast_packets" },
5321         { "rx_bcast_packets" },
5322         { "tx_ucast_packets" },
5323         { "tx_mcast_packets" },
5324         { "tx_bcast_packets" },
5325         { "tx_mac_errors" },
5326         { "tx_carrier_errors" },
5327         { "rx_crc_errors" },
5328         { "rx_align_errors" },
5329         { "tx_single_collisions" },
5330         { "tx_multi_collisions" },
5331         { "tx_deferred" },
5332         { "tx_excess_collisions" },
5333         { "tx_late_collisions" },
5334         { "tx_total_collisions" },
5335         { "rx_fragments" },
5336         { "rx_jabbers" },
5337         { "rx_undersize_packets" },
5338         { "rx_oversize_packets" },
5339         { "rx_64_byte_packets" },
5340         { "rx_65_to_127_byte_packets" },
5341         { "rx_128_to_255_byte_packets" },
5342         { "rx_256_to_511_byte_packets" },
5343         { "rx_512_to_1023_byte_packets" },
5344         { "rx_1024_to_1522_byte_packets" },
5345         { "rx_1523_to_9022_byte_packets" },
5346         { "tx_64_byte_packets" },
5347         { "tx_65_to_127_byte_packets" },
5348         { "tx_128_to_255_byte_packets" },
5349         { "tx_256_to_511_byte_packets" },
5350         { "tx_512_to_1023_byte_packets" },
5351         { "tx_1024_to_1522_byte_packets" },
5352         { "tx_1523_to_9022_byte_packets" },
5353         { "rx_xon_frames" },
5354         { "rx_xoff_frames" },
5355         { "tx_xon_frames" },
5356         { "tx_xoff_frames" },
5357         { "rx_mac_ctrl_frames" },
5358         { "rx_filtered_packets" },
5359         { "rx_discards" },
5360         { "rx_fw_discards" },
5361 };
5362
5363 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5364
5365 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5366     STATS_OFFSET32(stat_IfHCInOctets_hi),
5367     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5368     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5369     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5370     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5371     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5372     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5373     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5374     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5375     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5376     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5377     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),                 
5378     STATS_OFFSET32(stat_Dot3StatsFCSErrors),                          
5379     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),                    
5380     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),              
5381     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),            
5382     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),              
5383     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),                
5384     STATS_OFFSET32(stat_Dot3StatsLateCollisions),                     
5385     STATS_OFFSET32(stat_EtherStatsCollisions),                        
5386     STATS_OFFSET32(stat_EtherStatsFragments),                         
5387     STATS_OFFSET32(stat_EtherStatsJabbers),                           
5388     STATS_OFFSET32(stat_EtherStatsUndersizePkts),                     
5389     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),                     
5390     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),                    
5391     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),         
5392     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),        
5393     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),        
5394     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),       
5395     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),      
5396     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),      
5397     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),                    
5398     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),         
5399     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),        
5400     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),        
5401     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),       
5402     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),      
5403     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),      
5404     STATS_OFFSET32(stat_XonPauseFramesReceived),                      
5405     STATS_OFFSET32(stat_XoffPauseFramesReceived),                     
5406     STATS_OFFSET32(stat_OutXonSent),                                  
5407     STATS_OFFSET32(stat_OutXoffSent),                                 
5408     STATS_OFFSET32(stat_MacControlFramesReceived),                    
5409     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),                  
5410     STATS_OFFSET32(stat_IfInMBUFDiscards),                            
5411     STATS_OFFSET32(stat_FwRxDrop),
5412 };
5413
5414 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5415  * skipped because of errata.
5416  */               
5417 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5418         8,0,8,8,8,8,8,8,8,8,
5419         4,0,4,4,4,4,4,4,4,4,
5420         4,4,4,4,4,4,4,4,4,4,
5421         4,4,4,4,4,4,4,4,4,4,
5422         4,4,4,4,4,4,
5423 };
5424
5425 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5426         8,0,8,8,8,8,8,8,8,8,
5427         4,4,4,4,4,4,4,4,4,4,
5428         4,4,4,4,4,4,4,4,4,4,
5429         4,4,4,4,4,4,4,4,4,4,
5430         4,4,4,4,4,4,
5431 };
5432
5433 #define BNX2_NUM_TESTS 6
5434
5435 static struct {
5436         char string[ETH_GSTRING_LEN];
5437 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5438         { "register_test (offline)" },
5439         { "memory_test (offline)" },
5440         { "loopback_test (offline)" },
5441         { "nvram_test (online)" },
5442         { "interrupt_test (online)" },
5443         { "link_test (online)" },
5444 };
5445
5446 static int
5447 bnx2_self_test_count(struct net_device *dev)
5448 {
5449         return BNX2_NUM_TESTS;
5450 }
5451
5452 static void
5453 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5454 {
5455         struct bnx2 *bp = netdev_priv(dev);
5456
5457         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5458         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5459                 bnx2_netif_stop(bp);
5460                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5461                 bnx2_free_skbs(bp);
5462
5463                 if (bnx2_test_registers(bp) != 0) {
5464                         buf[0] = 1;
5465                         etest->flags |= ETH_TEST_FL_FAILED;
5466                 }
5467                 if (bnx2_test_memory(bp) != 0) {
5468                         buf[1] = 1;
5469                         etest->flags |= ETH_TEST_FL_FAILED;
5470                 }
5471                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5472                         etest->flags |= ETH_TEST_FL_FAILED;
5473
5474                 if (!netif_running(bp->dev)) {
5475                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5476                 }
5477                 else {
5478                         bnx2_init_nic(bp);
5479                         bnx2_netif_start(bp);
5480                 }
5481
5482                 /* wait for link up */
5483                 current->state = TASK_INTERRUPTIBLE;
5484                 schedule_timeout(3 * HZ);
5485                 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG)) {
5486                         current->state = TASK_INTERRUPTIBLE;
5487                         schedule_timeout(4 * HZ);
5488                 }
5489         }
5490
5491         if (bnx2_test_nvram(bp) != 0) {
5492                 buf[3] = 1;
5493                 etest->flags |= ETH_TEST_FL_FAILED;
5494         }
5495         if (bnx2_test_intr(bp) != 0) {
5496                 buf[4] = 1;
5497                 etest->flags |= ETH_TEST_FL_FAILED;
5498         }
5499
5500         if (bnx2_test_link(bp) != 0) {
5501                 buf[5] = 1;
5502                 etest->flags |= ETH_TEST_FL_FAILED;
5503
5504         }
5505 }
5506
5507 static void
5508 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5509 {
5510         switch (stringset) {
5511         case ETH_SS_STATS:
5512                 memcpy(buf, bnx2_stats_str_arr,
5513                         sizeof(bnx2_stats_str_arr));
5514                 break;
5515         case ETH_SS_TEST:
5516                 memcpy(buf, bnx2_tests_str_arr,
5517                         sizeof(bnx2_tests_str_arr));
5518                 break;
5519         }
5520 }
5521
5522 static int
5523 bnx2_get_stats_count(struct net_device *dev)
5524 {
5525         return BNX2_NUM_STATS;
5526 }
5527
5528 static void
5529 bnx2_get_ethtool_stats(struct net_device *dev,
5530                 struct ethtool_stats *stats, u64 *buf)
5531 {
5532         struct bnx2 *bp = netdev_priv(dev);
5533         int i;
5534         u32 *hw_stats = (u32 *) bp->stats_blk;
5535         u8 *stats_len_arr = NULL;
5536
5537         if (hw_stats == NULL) {
5538                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5539                 return;
5540         }
5541
5542         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5543             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5544             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5545             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5546                 stats_len_arr = bnx2_5706_stats_len_arr;
5547         else
5548                 stats_len_arr = bnx2_5708_stats_len_arr;
5549
5550         for (i = 0; i < BNX2_NUM_STATS; i++) {
5551                 if (stats_len_arr[i] == 0) {
5552                         /* skip this counter */
5553                         buf[i] = 0;
5554                         continue;
5555                 }
5556                 if (stats_len_arr[i] == 4) {
5557                         /* 4-byte counter */
5558                         buf[i] = (u64)
5559                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5560                         continue;
5561                 }
5562                 /* 8-byte counter */
5563                 buf[i] = (((u64) *(hw_stats +
5564                                         bnx2_stats_offset_arr[i])) << 32) +
5565                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5566         }
5567 }
5568
5569 static int
5570 bnx2_phys_id(struct net_device *dev, u32 data)
5571 {
5572         struct bnx2 *bp = netdev_priv(dev);
5573         int i;
5574         u32 save;
5575
5576         if (data == 0)
5577                 data = 2;
5578
5579         save = REG_RD(bp, BNX2_MISC_CFG);
5580         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5581
5582         for (i = 0; i < (data * 2); i++) {
5583                 if ((i % 2) == 0) {
5584                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5585                 }
5586                 else {
5587                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5588                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5589                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5590                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5591                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5592                                 BNX2_EMAC_LED_TRAFFIC);
5593                 }
5594                 current->state = TASK_INTERRUPTIBLE;
5595                 schedule_timeout(HZ / 2);
5596                 if (signal_pending(current))
5597                         break;
5598         }
5599         REG_WR(bp, BNX2_EMAC_LED, 0);
5600         REG_WR(bp, BNX2_MISC_CFG, save);
5601         return 0;
5602 }
5603
5604 static struct ethtool_ops bnx2_ethtool_ops = {
5605         .get_settings           = bnx2_get_settings,
5606         .set_settings           = bnx2_set_settings,
5607         .get_drvinfo            = bnx2_get_drvinfo,
5608         .get_regs_len           = bnx2_get_regs_len,
5609         .get_regs               = bnx2_get_regs,
5610         .get_wol                = bnx2_get_wol,
5611         .set_wol                = bnx2_set_wol,
5612         .nway_reset             = bnx2_nway_reset,
5613         .get_link               = ethtool_op_get_link,
5614 #if (LINUX_VERSION_CODE >= 0x20418)
5615         .get_eeprom_len         = bnx2_get_eeprom_len,
5616 #endif
5617 #ifdef ETHTOOL_GEEPROM
5618         .get_eeprom             = bnx2_get_eeprom,
5619 #endif
5620 #ifdef ETHTOOL_SEEPROM
5621         .set_eeprom             = bnx2_set_eeprom,
5622 #endif
5623         .get_coalesce           = bnx2_get_coalesce,
5624         .set_coalesce           = bnx2_set_coalesce,
5625         .get_ringparam          = bnx2_get_ringparam,
5626         .set_ringparam          = bnx2_set_ringparam,
5627         .get_pauseparam         = bnx2_get_pauseparam,
5628         .set_pauseparam         = bnx2_set_pauseparam,
5629         .get_rx_csum            = bnx2_get_rx_csum,
5630         .set_rx_csum            = bnx2_set_rx_csum,
5631         .get_tx_csum            = ethtool_op_get_tx_csum,
5632 #if (LINUX_VERSION_CODE >= 0x20418)
5633         .set_tx_csum            = ethtool_op_set_tx_csum,
5634 #endif
5635         .get_sg                 = ethtool_op_get_sg,
5636         .set_sg                 = ethtool_op_set_sg,
5637 #ifdef BCM_TSO
5638         .get_tso                = ethtool_op_get_tso,
5639         .set_tso                = ethtool_op_set_tso,
5640 #endif
5641         .self_test_count        = bnx2_self_test_count,
5642         .self_test              = bnx2_self_test,
5643         .get_strings            = bnx2_get_strings,
5644         .phys_id                = bnx2_phys_id,
5645         .get_stats_count        = bnx2_get_stats_count,
5646         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5647 #ifdef ETHTOOL_GPERMADDR
5648         .get_perm_addr          = ethtool_op_get_perm_addr,
5649 #endif
5650 };
5651
5652 /* Called with rtnl_lock */
5653 static int
5654 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5655 {
5656 #if (LINUX_VERSION_CODE >= 0x020607)
5657         struct mii_ioctl_data *data = if_mii(ifr);
5658 #else
5659         struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
5660 #endif
5661         struct bnx2 *bp = netdev_priv(dev);
5662         int err;
5663
5664         switch(cmd) {
5665         case SIOCGMIIPHY:
5666                 data->phy_id = bp->phy_addr;
5667
5668                 /* fallthru */
5669         case SIOCGMIIREG: {
5670                 u32 mii_regval;
5671
5672                 spin_lock_bh(&bp->phy_lock);
5673                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5674                 spin_unlock_bh(&bp->phy_lock);
5675
5676                 data->val_out = mii_regval;
5677
5678                 return err;
5679         }
5680
5681         case SIOCSMIIREG:
5682                 if (!capable(CAP_NET_ADMIN))
5683                         return -EPERM;
5684
5685                 spin_lock_bh(&bp->phy_lock);
5686                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5687                 spin_unlock_bh(&bp->phy_lock);
5688
5689                 return err;
5690
5691         default:
5692                 /* do nothing */
5693                 break;
5694         }
5695         return -EOPNOTSUPP;
5696 }
5697
5698 /* Called with rtnl_lock */
5699 static int
5700 bnx2_change_mac_addr(struct net_device *dev, void *p)
5701 {
5702         struct sockaddr *addr = p;
5703         struct bnx2 *bp = netdev_priv(dev);
5704
5705         if (!is_valid_ether_addr(addr->sa_data))
5706                 return -EINVAL;
5707
5708         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5709         if (netif_running(dev))
5710                 bnx2_set_mac_addr(bp);
5711
5712         return 0;
5713 }
5714
5715 /* Called with rtnl_lock */
5716 static int
5717 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5718 {
5719         struct bnx2 *bp = netdev_priv(dev);
5720
5721         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5722                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5723                 return -EINVAL;
5724
5725         dev->mtu = new_mtu;
5726         if (netif_running(dev)) {
5727                 bnx2_netif_stop(bp);
5728
5729                 bnx2_init_nic(bp);
5730
5731                 bnx2_netif_start(bp);
5732         }
5733         return 0;
5734 }
5735
5736 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5737 static void
5738 poll_bnx2(struct net_device *dev)
5739 {
5740         struct bnx2 *bp = netdev_priv(dev);
5741
5742 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020600)
5743         if (netdump_mode) {
5744                 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5745                 if (dev->poll_list.prev) {
5746                         int budget = 64;
5747
5748                         bnx2_poll(dev, &budget);
5749                 }
5750         }
5751         else
5752 #endif
5753         {
5754                 disable_irq(bp->pdev->irq);
5755                 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5756                 enable_irq(bp->pdev->irq);
5757         }
5758 }
5759 #endif
5760
5761 static int __devinit
5762 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5763 {
5764         struct bnx2 *bp;
5765         unsigned long mem_len;
5766         int rc;
5767         u32 reg;
5768         u64 dma_mask, persist_dma_mask;
5769
5770         SET_MODULE_OWNER(dev);
5771 #if (LINUX_VERSION_CODE >= 0x20419)
5772         SET_NETDEV_DEV(dev, &pdev->dev);
5773 #endif
5774         bp = netdev_priv(dev);
5775
5776         bp->flags = 0;
5777         bp->phy_flags = 0;
5778
5779         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5780         rc = pci_enable_device(pdev);
5781         if (rc) {
5782                 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5783                 goto err_out;
5784         }
5785
5786         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5787                 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5788                        "aborting.\n");
5789                 rc = -ENODEV;
5790                 goto err_out_disable;
5791         }
5792
5793         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5794         if (rc) {
5795                 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5796                 goto err_out_disable;
5797         }
5798
5799         pci_set_master(pdev);
5800
5801         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5802         if (bp->pm_cap == 0) {
5803                 printk(KERN_ERR PFX "Cannot find power management capability, "
5804                                "aborting.\n");
5805                 rc = -EIO;
5806                 goto err_out_release;
5807         }
5808
5809         bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5810         if (bp->pcix_cap == 0) {
5811                 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5812                 rc = -EIO;
5813                 goto err_out_release;
5814         }
5815
5816         bp->dev = dev;
5817         bp->pdev = pdev;
5818
5819         spin_lock_init(&bp->phy_lock);
5820         spin_lock_init(&bp->tx_lock);
5821 #if (LINUX_VERSION_CODE >= 0x20600)
5822         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5823 #else
5824         INIT_TQUEUE(&bp->reset_task, bnx2_reset_task, bp);
5825 #endif
5826
5827         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5828         mem_len = MB_GET_CID_ADDR(17);
5829         dev->mem_end = dev->mem_start + mem_len;
5830         dev->irq = pdev->irq;
5831
5832         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5833
5834         if (!bp->regview) {
5835                 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5836                 rc = -ENOMEM;
5837                 goto err_out_release;
5838         }
5839
5840         /* Configure byte swap and enable write to the reg_window registers.
5841          * Rely on CPU to do target byte swapping on big endian systems
5842          * The chip's target access swapping will not swap all accesses
5843          */
5844         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5845                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5846                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5847
5848         bnx2_set_power_state(bp, PCI_D0);
5849
5850         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5851
5852         /* 5708 cannot support DMA addresses > 40-bit.
5853          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
5854          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
5855          * do DMA address check in bnx2_start_xmit().
5856          */
5857         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5858                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
5859 #ifdef CONFIG_HIGHMEM
5860                 dma_mask = DMA_64BIT_MASK;
5861 #endif
5862         } else
5863                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
5864         
5865         /* Configure DMA attributes. */
5866         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
5867                 dev->features |= NETIF_F_HIGHDMA;
5868                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
5869                 if (rc) {
5870                         printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5871                                "failed, aborting.\n");
5872                         goto err_out_unmap;
5873                 }
5874         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
5875                 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5876                 goto err_out_unmap;
5877         }
5878
5879         /* Get bus information. */
5880         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5881         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5882                 u32 clkreg;
5883
5884                 bp->flags |= PCIX_FLAG;
5885
5886                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5887                 
5888                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5889                 switch (clkreg) {
5890                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5891                         bp->bus_speed_mhz = 133;
5892                         break;
5893
5894                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5895                         bp->bus_speed_mhz = 100;
5896                         break;
5897
5898                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5899                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5900                         bp->bus_speed_mhz = 66;
5901                         break;
5902
5903                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5904                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5905                         bp->bus_speed_mhz = 50;
5906                         break;
5907
5908                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5909                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5910                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5911                         bp->bus_speed_mhz = 33;
5912                         break;
5913                 }
5914         }
5915         else {
5916                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5917                         bp->bus_speed_mhz = 66;
5918                 else
5919                         bp->bus_speed_mhz = 33;
5920         }
5921
5922         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5923                 bp->flags |= PCI_32BIT_FLAG;
5924
5925         /* 5706A0 may falsely detect SERR and PERR. */
5926         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5927                 reg = REG_RD(bp, PCI_COMMAND);
5928                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5929                 REG_WR(bp, PCI_COMMAND, reg);
5930         }
5931         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5932                 !(bp->flags & PCIX_FLAG)) {
5933
5934                 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5935                        "aborting.\n");
5936                 goto err_out_unmap;
5937         }
5938
5939         bnx2_init_nvram(bp);
5940
5941         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5942
5943         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5944             BNX2_SHM_HDR_SIGNATURE_SIG)
5945                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5946         else
5947                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5948
5949         /* Get the permanent MAC address.  First we need to make sure the
5950          * firmware is actually running.
5951          */
5952         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5953
5954         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5955             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5956                 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5957                 rc = -ENODEV;
5958                 goto err_out_unmap;
5959         }
5960
5961         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5962
5963         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5964         bp->mac_addr[0] = (u8) (reg >> 8);
5965         bp->mac_addr[1] = (u8) reg;
5966
5967         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5968         bp->mac_addr[2] = (u8) (reg >> 24);
5969         bp->mac_addr[3] = (u8) (reg >> 16);
5970         bp->mac_addr[4] = (u8) (reg >> 8);
5971         bp->mac_addr[5] = (u8) reg;
5972
5973         bp->tx_ring_size = MAX_TX_DESC_CNT;
5974         bnx2_set_rx_ring_size(bp, 100);
5975
5976         bp->rx_csum = 1;
5977
5978         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5979
5980         bp->tx_quick_cons_trip_int = 20;
5981         bp->tx_quick_cons_trip = 20;
5982         bp->tx_ticks_int = 80;
5983         bp->tx_ticks = 80;
5984                 
5985         bp->rx_quick_cons_trip_int = 6;
5986         bp->rx_quick_cons_trip = 6;
5987         bp->rx_ticks_int = 18;
5988         bp->rx_ticks = 18;
5989
5990         bp->stats_ticks = 1000000 & 0xffff00;
5991
5992         bp->timer_interval =  HZ;
5993         bp->current_interval =  HZ;
5994
5995         bp->phy_addr = 1;
5996
5997         /* Disable WOL support if we are running on a SERDES chip. */
5998         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5999                 bp->phy_flags |= PHY_SERDES_FLAG;
6000                 bp->flags |= NO_WOL_FLAG;
6001                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6002                         bp->phy_addr = 2;
6003                         reg = REG_RD_IND(bp, bp->shmem_base +
6004                                          BNX2_SHARED_HW_CFG_CONFIG);
6005                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6006                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6007                 }
6008         }
6009
6010         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6011             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6012             (CHIP_ID(bp) == CHIP_ID_5708_B1))
6013                 bp->flags |= NO_WOL_FLAG;
6014
6015         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6016                 bp->tx_quick_cons_trip_int =
6017                         bp->tx_quick_cons_trip;
6018                 bp->tx_ticks_int = bp->tx_ticks;
6019                 bp->rx_quick_cons_trip_int =
6020                         bp->rx_quick_cons_trip;
6021                 bp->rx_ticks_int = bp->rx_ticks;
6022                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6023                 bp->com_ticks_int = bp->com_ticks;
6024                 bp->cmd_ticks_int = bp->cmd_ticks;
6025         }
6026
6027         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6028         bp->req_line_speed = 0;
6029         if (bp->phy_flags & PHY_SERDES_FLAG) {
6030                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6031
6032                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6033                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6034                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6035                         bp->autoneg = 0;
6036                         bp->req_line_speed = bp->line_speed = SPEED_1000;
6037                         bp->req_duplex = DUPLEX_FULL;
6038                 }
6039         }
6040         else {
6041                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6042         }
6043
6044         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6045
6046         init_timer(&bp->timer);
6047         bp->timer.expires = RUN_AT(bp->timer_interval);
6048         bp->timer.data = (unsigned long) bp;
6049         bp->timer.function = bnx2_timer;
6050
6051         return 0;
6052
6053 err_out_unmap:
6054         if (bp->regview) {
6055                 iounmap(bp->regview);
6056                 bp->regview = NULL;
6057         }
6058
6059 err_out_release:
6060         pci_release_regions(pdev);
6061
6062 err_out_disable:
6063         pci_disable_device(pdev);
6064         pci_set_drvdata(pdev, NULL);
6065
6066 err_out:
6067         return rc;
6068 }
6069
6070 static int __devinit
6071 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6072 {
6073         static int version_printed = 0;
6074         struct net_device *dev = NULL;
6075         struct bnx2 *bp;
6076         int rc, i;
6077
6078         if (version_printed++ == 0)
6079                 printk(KERN_INFO "%s", version);
6080
6081         /* dev zeroed in init_etherdev */
6082 #if (LINUX_VERSION_CODE >= 0x20418)
6083         dev = alloc_etherdev(sizeof(*bp));
6084 #else
6085         dev = init_etherdev(NULL, sizeof(*bp));
6086 #endif
6087
6088         if (!dev)
6089                 return -ENOMEM;
6090
6091         rc = bnx2_init_board(pdev, dev);
6092         if (rc < 0) {
6093 #if (LINUX_VERSION_CODE >= 0x20418)
6094                 free_netdev(dev);
6095 #else
6096                 unregister_netdev(dev);
6097                 kfree(dev);
6098 #endif
6099                 return rc;
6100         }
6101
6102         dev->open = bnx2_open;
6103         dev->hard_start_xmit = bnx2_start_xmit;
6104         dev->stop = bnx2_close;
6105         dev->get_stats = bnx2_get_stats;
6106         dev->set_multicast_list = bnx2_set_rx_mode;
6107         dev->do_ioctl = bnx2_ioctl;
6108         dev->set_mac_address = bnx2_change_mac_addr;
6109         dev->change_mtu = bnx2_change_mtu;
6110         dev->tx_timeout = bnx2_tx_timeout;
6111         dev->watchdog_timeo = TX_TIMEOUT;
6112 #ifdef BCM_VLAN
6113         dev->vlan_rx_register = bnx2_vlan_rx_register;
6114         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6115 #endif
6116         dev->poll = bnx2_poll;
6117         dev->ethtool_ops = &bnx2_ethtool_ops;
6118         dev->weight = 64;
6119
6120         bp = netdev_priv(dev);
6121
6122 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6123         dev->poll_controller = poll_bnx2;
6124 #endif
6125
6126 #if (LINUX_VERSION_CODE >= 0x20418)
6127         if ((rc = register_netdev(dev))) {
6128                 printk(KERN_ERR PFX "Cannot register net device\n");
6129                 if (bp->regview)
6130                         iounmap(bp->regview);
6131                 pci_release_regions(pdev);
6132                 pci_disable_device(pdev);
6133                 pci_set_drvdata(pdev, NULL);
6134                 free_netdev(dev);
6135                 return rc;
6136         }
6137 #endif
6138
6139         pci_set_drvdata(pdev, dev);
6140
6141         memcpy(dev->dev_addr, bp->mac_addr, 6);
6142 #ifdef ETHTOOL_GPERMADDR
6143         memcpy(dev->perm_addr, bp->mac_addr, 6);
6144 #endif
6145         bp->name = board_info[ent->driver_data].name,
6146         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6147                 "IRQ %d, ",
6148                 dev->name,
6149                 bp->name,
6150                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6151                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6152                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6153                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6154                 bp->bus_speed_mhz,
6155                 dev->base_addr,
6156                 bp->pdev->irq);
6157
6158         printk("node addr ");
6159         for (i = 0; i < 6; i++)
6160                 printk("%2.2x", dev->dev_addr[i]);
6161         printk("\n");
6162
6163         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6164 #ifdef BCM_VLAN
6165         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6166 #endif
6167 #ifdef BCM_TSO
6168         dev->features |= NETIF_F_TSO;
6169 #endif
6170
6171         netif_carrier_off(bp->dev);
6172
6173         return 0;
6174 }
6175
6176 static void __devexit
6177 bnx2_remove_one(struct pci_dev *pdev)
6178 {
6179         struct net_device *dev = pci_get_drvdata(pdev);
6180         struct bnx2 *bp = netdev_priv(dev);
6181
6182 #if (LINUX_VERSION_CODE >= 0x20600)
6183         flush_scheduled_work();
6184 #endif
6185
6186         unregister_netdev(dev);
6187
6188         if (bp->regview)
6189                 iounmap(bp->regview);
6190
6191 #if (LINUX_VERSION_CODE >= 0x20418)
6192         free_netdev(dev);
6193 #else
6194         kfree(dev);
6195 #endif
6196         pci_release_regions(pdev);
6197         pci_disable_device(pdev);
6198         pci_set_drvdata(pdev, NULL);
6199 }
6200
6201 static int
6202 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6203 {
6204         struct net_device *dev = pci_get_drvdata(pdev);
6205         struct bnx2 *bp = netdev_priv(dev);
6206         u32 reset_code;
6207
6208         if (!netif_running(dev))
6209                 return 0;
6210
6211         bnx2_netif_stop(bp);
6212         netif_device_detach(dev);
6213         del_timer_sync(&bp->timer);
6214         if (bp->flags & NO_WOL_FLAG)
6215                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
6216         else if (bp->wol)
6217                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6218         else
6219                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6220         bnx2_reset_chip(bp, reset_code);
6221         bnx2_free_skbs(bp);
6222 #if (LINUX_VERSION_CODE < 0x2060b)
6223         bnx2_set_power_state(bp, state);
6224 #else
6225         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6226 #endif
6227         return 0;
6228 }
6229
6230 static int
6231 bnx2_resume(struct pci_dev *pdev)
6232 {
6233         struct net_device *dev = pci_get_drvdata(pdev);
6234         struct bnx2 *bp = netdev_priv(dev);
6235
6236         if (!netif_running(dev))
6237                 return 0;
6238
6239         bnx2_set_power_state(bp, PCI_D0);
6240         netif_device_attach(dev);
6241         bnx2_init_nic(bp);
6242         bnx2_netif_start(bp);
6243         return 0;
6244 }
6245
6246 static struct pci_driver bnx2_pci_driver = {
6247         .name           = DRV_MODULE_NAME,
6248         .id_table       = bnx2_pci_tbl,
6249         .probe          = bnx2_init_one,
6250         .remove         = __devexit_p(bnx2_remove_one),
6251         .suspend        = bnx2_suspend,
6252         .resume         = bnx2_resume,
6253 };
6254
6255 static int __init bnx2_init(void)
6256 {
6257         return pci_module_init(&bnx2_pci_driver);
6258 }
6259
6260 static void __exit bnx2_cleanup(void)
6261 {
6262         pci_unregister_driver(&bnx2_pci_driver);
6263 }
6264
6265 module_init(bnx2_init);
6266 module_exit(bnx2_cleanup);
6267
6268
6269