1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #include <linux/config.h>
14 #include <linux/version.h>
15 #if (LINUX_VERSION_CODE < 0x020500)
16 #if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
18 #include <linux/modversions.h>
22 #include <linux/module.h>
23 #if (LINUX_VERSION_CODE >= 0x020600)
24 #include <linux/moduleparam.h>
27 #include <linux/kernel.h>
28 #include <linux/timer.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/interrupt.h>
34 #include <linux/pci.h>
35 #include <linux/init.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #if (LINUX_VERSION_CODE >= 0x020600)
40 #include <linux/dma-mapping.h>
42 #include <asm/bitops.h>
45 #include <linux/delay.h>
46 #include <asm/byteorder.h>
48 #include <linux/time.h>
49 #include <linux/ethtool.h>
50 #include <linux/mii.h>
51 #ifdef NETIF_F_HW_VLAN_TX
52 #include <linux/if_vlan.h>
58 #include <net/checksum.h>
61 #define gso_size tso_size
64 #if (LINUX_VERSION_CODE >= 0x020600)
65 #include <linux/workqueue.h>
67 #ifndef BNX2_BOOT_DISK
68 #include <linux/crc32.h>
70 #include <linux/prefetch.h>
71 #include <linux/cache.h>
72 #include <linux/zlib.h>
77 #define DRV_MODULE_NAME "bnx2"
78 #define PFX DRV_MODULE_NAME ": "
79 #define DRV_MODULE_VERSION "1.4.43f"
80 #define DRV_MODULE_RELDATE "June 26, 2006"
82 #define RUN_AT(x) (jiffies + (x))
84 /* Time in jiffies before concluding the transmitter is hung. */
85 #define TX_TIMEOUT (5*HZ)
87 static const char version[] __devinitdata =
88 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
91 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
92 MODULE_LICENSE("GPL");
93 MODULE_VERSION(DRV_MODULE_VERSION);
95 #if (LINUX_VERSION_CODE >= 0x20600)
96 static int disable_msi = 0;
98 module_param(disable_msi, int, 0);
99 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
112 /* indexed by board_t, above */
113 static const struct {
115 } board_info[] __devinitdata = {
116 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
117 { "HP NC370T Multifunction Gigabit Server Adapter" },
118 { "HP NC370i Multifunction Gigabit Server Adapter" },
119 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
120 { "HP NC370F Multifunction Gigabit Server Adapter" },
121 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
122 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
125 static struct pci_device_id bnx2_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
143 static struct flash_spec flash_table[] =
146 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
147 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
148 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
150 /* Expansion entry 0001 */
151 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
152 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 /* Saifun SA25F010 (non-buffered flash) */
156 /* strap, cfg1, & write1 need updates */
157 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
158 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
159 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
160 "Non-buffered flash (128kB)"},
161 /* Saifun SA25F020 (non-buffered flash) */
162 /* strap, cfg1, & write1 need updates */
163 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
164 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
165 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
166 "Non-buffered flash (256kB)"},
167 /* Expansion entry 0100 */
168 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
172 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
173 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
174 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
175 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
176 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
177 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
178 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
179 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
180 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
181 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
182 /* Saifun SA25F005 (non-buffered flash) */
183 /* strap, cfg1, & write1 need updates */
184 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
185 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
186 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
187 "Non-buffered flash (64kB)"},
189 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
190 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
191 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
193 /* Expansion entry 1001 */
194 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
195 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
196 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
198 /* Expansion entry 1010 */
199 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
200 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
201 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
203 /* ATMEL AT45DB011B (buffered flash) */
204 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
205 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
206 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
207 "Buffered flash (128kB)"},
208 /* Expansion entry 1100 */
209 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
210 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
211 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
213 /* Expansion entry 1101 */
214 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
215 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
216 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
218 /* Ateml Expansion entry 1110 */
219 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
220 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
221 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
222 "Entry 1110 (Atmel)"},
223 /* ATMEL AT45DB021B (buffered flash) */
224 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
225 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
226 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
227 "Buffered flash (256kB)"},
230 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
232 #ifdef BNX2_BOOT_DISK
233 u32 ether_crc_le(size_t len, unsigned char const *p)
237 #define CRCPOLY_LE 0xedb88320
241 for (i = 0; i < 8; i++)
242 crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0);
248 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
250 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
252 if (diff > MAX_TX_DESC_CNT)
253 diff = (diff & MAX_TX_DESC_CNT) - 1;
254 return (bp->tx_ring_size - diff);
258 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
260 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
261 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
265 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
267 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
268 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
272 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
275 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
276 REG_WR(bp, BNX2_CTX_DATA, val);
280 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
285 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
286 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
287 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
290 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
295 val1 = (bp->phy_addr << 21) | (reg << 16) |
296 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
297 BNX2_EMAC_MDIO_COMM_START_BUSY;
298 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300 for (i = 0; i < 50; i++) {
303 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
304 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
308 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
314 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
323 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
328 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
337 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
342 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
343 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
344 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
347 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
352 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
353 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
354 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
355 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357 for (i = 0; i < 50; i++) {
360 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
361 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
367 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
372 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
373 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
374 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
377 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
386 bnx2_disable_int(struct bnx2 *bp)
388 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
389 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
390 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
394 bnx2_enable_int(struct bnx2 *bp)
396 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
397 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
398 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
401 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
407 bnx2_disable_int_sync(struct bnx2 *bp)
409 atomic_inc(&bp->intr_sem);
410 bnx2_disable_int(bp);
411 #if (LINUX_VERSION_CODE >= 0x2051c)
412 synchronize_irq(bp->pdev->irq);
419 bnx2_netif_stop(struct bnx2 *bp)
421 bnx2_disable_int_sync(bp);
422 if (netif_running(bp->dev)) {
423 netif_poll_disable(bp->dev);
424 netif_tx_disable(bp->dev);
425 bp->dev->trans_start = jiffies; /* prevent tx timeout */
430 bnx2_netif_start(struct bnx2 *bp)
432 if (atomic_dec_and_test(&bp->intr_sem)) {
433 if (netif_running(bp->dev)) {
434 netif_wake_queue(bp->dev);
435 netif_poll_enable(bp->dev);
442 bnx2_free_mem(struct bnx2 *bp)
446 if (bp->status_blk) {
447 pci_free_consistent(bp->pdev, bp->status_stats_size,
448 bp->status_blk, bp->status_blk_mapping);
449 bp->status_blk = NULL;
450 bp->stats_blk = NULL;
452 if (bp->tx_desc_ring) {
453 pci_free_consistent(bp->pdev,
454 sizeof(struct tx_bd) * TX_DESC_CNT,
455 bp->tx_desc_ring, bp->tx_desc_mapping);
456 bp->tx_desc_ring = NULL;
458 kfree(bp->tx_buf_ring);
459 bp->tx_buf_ring = NULL;
460 for (i = 0; i < bp->rx_max_ring; i++) {
461 if (bp->rx_desc_ring[i])
462 pci_free_consistent(bp->pdev,
463 sizeof(struct rx_bd) * RX_DESC_CNT,
465 bp->rx_desc_mapping[i]);
466 bp->rx_desc_ring[i] = NULL;
468 vfree(bp->rx_buf_ring);
469 bp->rx_buf_ring = NULL;
473 bnx2_alloc_mem(struct bnx2 *bp)
475 int i, status_blk_size;
477 bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
479 if (bp->tx_buf_ring == NULL)
482 memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
483 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
484 sizeof(struct tx_bd) *
486 &bp->tx_desc_mapping);
487 if (bp->tx_desc_ring == NULL)
490 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
492 if (bp->rx_buf_ring == NULL)
495 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
498 for (i = 0; i < bp->rx_max_ring; i++) {
499 bp->rx_desc_ring[i] =
500 pci_alloc_consistent(bp->pdev,
501 sizeof(struct rx_bd) * RX_DESC_CNT,
502 &bp->rx_desc_mapping[i]);
503 if (bp->rx_desc_ring[i] == NULL)
508 /* Combine status and statistics blocks into one allocation. */
509 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
510 bp->status_stats_size = status_blk_size +
511 sizeof(struct statistics_block);
513 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
514 &bp->status_blk_mapping);
515 if (bp->status_blk == NULL)
518 memset(bp->status_blk, 0, bp->status_stats_size);
520 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
523 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
533 bnx2_report_fw_link(struct bnx2 *bp)
535 u32 fw_link_status = 0;
540 switch (bp->line_speed) {
542 if (bp->duplex == DUPLEX_HALF)
543 fw_link_status = BNX2_LINK_STATUS_10HALF;
545 fw_link_status = BNX2_LINK_STATUS_10FULL;
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_100HALF;
551 fw_link_status = BNX2_LINK_STATUS_100FULL;
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_1000HALF;
557 fw_link_status = BNX2_LINK_STATUS_1000FULL;
560 if (bp->duplex == DUPLEX_HALF)
561 fw_link_status = BNX2_LINK_STATUS_2500HALF;
563 fw_link_status = BNX2_LINK_STATUS_2500FULL;
567 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
570 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
572 bnx2_read_phy(bp, MII_BMSR, &bmsr);
573 bnx2_read_phy(bp, MII_BMSR, &bmsr);
575 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
576 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
577 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
579 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
583 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
585 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
589 bnx2_report_link(struct bnx2 *bp)
592 netif_carrier_on(bp->dev);
593 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
595 printk("%d Mbps ", bp->line_speed);
597 if (bp->duplex == DUPLEX_FULL)
598 printk("full duplex");
600 printk("half duplex");
603 if (bp->flow_ctrl & FLOW_CTRL_RX) {
604 printk(", receive ");
605 if (bp->flow_ctrl & FLOW_CTRL_TX)
606 printk("& transmit ");
609 printk(", transmit ");
611 printk("flow control ON");
616 netif_carrier_off(bp->dev);
617 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
620 bnx2_report_fw_link(bp);
624 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
626 u32 local_adv, remote_adv;
629 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
630 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
632 if (bp->duplex == DUPLEX_FULL) {
633 bp->flow_ctrl = bp->req_flow_ctrl;
638 if (bp->duplex != DUPLEX_FULL) {
642 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
643 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
646 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
647 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
648 bp->flow_ctrl |= FLOW_CTRL_TX;
649 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
650 bp->flow_ctrl |= FLOW_CTRL_RX;
654 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
655 bnx2_read_phy(bp, MII_LPA, &remote_adv);
657 if (bp->phy_flags & PHY_SERDES_FLAG) {
658 u32 new_local_adv = 0;
659 u32 new_remote_adv = 0;
661 if (local_adv & ADVERTISE_1000XPAUSE)
662 new_local_adv |= ADVERTISE_PAUSE_CAP;
663 if (local_adv & ADVERTISE_1000XPSE_ASYM)
664 new_local_adv |= ADVERTISE_PAUSE_ASYM;
665 if (remote_adv & ADVERTISE_1000XPAUSE)
666 new_remote_adv |= ADVERTISE_PAUSE_CAP;
667 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
668 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
670 local_adv = new_local_adv;
671 remote_adv = new_remote_adv;
674 /* See Table 28B-3 of 802.3ab-1999 spec. */
675 if (local_adv & ADVERTISE_PAUSE_CAP) {
676 if(local_adv & ADVERTISE_PAUSE_ASYM) {
677 if (remote_adv & ADVERTISE_PAUSE_CAP) {
678 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
680 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
681 bp->flow_ctrl = FLOW_CTRL_RX;
685 if (remote_adv & ADVERTISE_PAUSE_CAP) {
686 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
690 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
691 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
692 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
694 bp->flow_ctrl = FLOW_CTRL_TX;
700 bnx2_5708s_linkup(struct bnx2 *bp)
705 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
706 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
707 case BCM5708S_1000X_STAT1_SPEED_10:
708 bp->line_speed = SPEED_10;
710 case BCM5708S_1000X_STAT1_SPEED_100:
711 bp->line_speed = SPEED_100;
713 case BCM5708S_1000X_STAT1_SPEED_1G:
714 bp->line_speed = SPEED_1000;
716 case BCM5708S_1000X_STAT1_SPEED_2G5:
717 bp->line_speed = SPEED_2500;
720 if (val & BCM5708S_1000X_STAT1_FD)
721 bp->duplex = DUPLEX_FULL;
723 bp->duplex = DUPLEX_HALF;
729 bnx2_5706s_linkup(struct bnx2 *bp)
731 u32 bmcr, local_adv, remote_adv, common;
734 bp->line_speed = SPEED_1000;
736 bnx2_read_phy(bp, MII_BMCR, &bmcr);
737 if (bmcr & BMCR_FULLDPLX) {
738 bp->duplex = DUPLEX_FULL;
741 bp->duplex = DUPLEX_HALF;
744 if (!(bmcr & BMCR_ANENABLE)) {
748 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
749 bnx2_read_phy(bp, MII_LPA, &remote_adv);
751 common = local_adv & remote_adv;
752 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
754 if (common & ADVERTISE_1000XFULL) {
755 bp->duplex = DUPLEX_FULL;
758 bp->duplex = DUPLEX_HALF;
766 bnx2_copper_linkup(struct bnx2 *bp)
770 bnx2_read_phy(bp, MII_BMCR, &bmcr);
771 if (bmcr & BMCR_ANENABLE) {
772 u32 local_adv, remote_adv, common;
774 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
775 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
777 common = local_adv & (remote_adv >> 2);
778 if (common & ADVERTISE_1000FULL) {
779 bp->line_speed = SPEED_1000;
780 bp->duplex = DUPLEX_FULL;
782 else if (common & ADVERTISE_1000HALF) {
783 bp->line_speed = SPEED_1000;
784 bp->duplex = DUPLEX_HALF;
787 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
788 bnx2_read_phy(bp, MII_LPA, &remote_adv);
790 common = local_adv & remote_adv;
791 if (common & ADVERTISE_100FULL) {
792 bp->line_speed = SPEED_100;
793 bp->duplex = DUPLEX_FULL;
795 else if (common & ADVERTISE_100HALF) {
796 bp->line_speed = SPEED_100;
797 bp->duplex = DUPLEX_HALF;
799 else if (common & ADVERTISE_10FULL) {
800 bp->line_speed = SPEED_10;
801 bp->duplex = DUPLEX_FULL;
803 else if (common & ADVERTISE_10HALF) {
804 bp->line_speed = SPEED_10;
805 bp->duplex = DUPLEX_HALF;
814 if (bmcr & BMCR_SPEED100) {
815 bp->line_speed = SPEED_100;
818 bp->line_speed = SPEED_10;
820 if (bmcr & BMCR_FULLDPLX) {
821 bp->duplex = DUPLEX_FULL;
824 bp->duplex = DUPLEX_HALF;
832 bnx2_set_mac_link(struct bnx2 *bp)
836 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
837 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
838 (bp->duplex == DUPLEX_HALF)) {
839 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
842 /* Configure the EMAC mode register. */
843 val = REG_RD(bp, BNX2_EMAC_MODE);
845 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
846 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
850 switch (bp->line_speed) {
852 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
853 val |= BNX2_EMAC_MODE_PORT_MII_10;
858 val |= BNX2_EMAC_MODE_PORT_MII;
861 val |= BNX2_EMAC_MODE_25G;
864 val |= BNX2_EMAC_MODE_PORT_GMII;
869 val |= BNX2_EMAC_MODE_PORT_GMII;
872 /* Set the MAC to operate in the appropriate duplex mode. */
873 if (bp->duplex == DUPLEX_HALF)
874 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
875 REG_WR(bp, BNX2_EMAC_MODE, val);
877 /* Enable/disable rx PAUSE. */
878 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
880 if (bp->flow_ctrl & FLOW_CTRL_RX)
881 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
882 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
884 /* Enable/disable tx PAUSE. */
885 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
886 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
888 if (bp->flow_ctrl & FLOW_CTRL_TX)
889 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
890 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
892 /* Acknowledge the interrupt. */
893 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
899 bnx2_set_link(struct bnx2 *bp)
904 if (bp->loopback == MAC_LOOPBACK) {
909 link_up = bp->link_up;
911 bnx2_read_phy(bp, MII_BMSR, &bmsr);
912 bnx2_read_phy(bp, MII_BMSR, &bmsr);
914 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
915 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
918 val = REG_RD(bp, BNX2_EMAC_STATUS);
919 if (val & BNX2_EMAC_STATUS_LINK)
920 bmsr |= BMSR_LSTATUS;
922 bmsr &= ~BMSR_LSTATUS;
925 if (bmsr & BMSR_LSTATUS) {
928 if (bp->phy_flags & PHY_SERDES_FLAG) {
929 if (CHIP_NUM(bp) == CHIP_NUM_5706)
930 bnx2_5706s_linkup(bp);
931 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
932 bnx2_5708s_linkup(bp);
935 bnx2_copper_linkup(bp);
937 bnx2_resolve_flow_ctrl(bp);
940 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
941 (bp->autoneg & AUTONEG_SPEED)) {
945 bnx2_read_phy(bp, MII_BMCR, &bmcr);
946 if (!(bmcr & BMCR_ANENABLE)) {
947 bnx2_write_phy(bp, MII_BMCR, bmcr |
951 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
955 if (bp->link_up != link_up) {
956 bnx2_report_link(bp);
959 bnx2_set_mac_link(bp);
965 bnx2_reset_phy(struct bnx2 *bp)
970 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
972 #define PHY_RESET_MAX_WAIT 100
973 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
976 bnx2_read_phy(bp, MII_BMCR, ®);
977 if (!(reg & BMCR_RESET)) {
982 if (i == PHY_RESET_MAX_WAIT) {
989 bnx2_phy_get_pause_adv(struct bnx2 *bp)
993 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
994 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
996 if (bp->phy_flags & PHY_SERDES_FLAG) {
997 adv = ADVERTISE_1000XPAUSE;
1000 adv = ADVERTISE_PAUSE_CAP;
1003 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1004 if (bp->phy_flags & PHY_SERDES_FLAG) {
1005 adv = ADVERTISE_1000XPSE_ASYM;
1008 adv = ADVERTISE_PAUSE_ASYM;
1011 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1012 if (bp->phy_flags & PHY_SERDES_FLAG) {
1013 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1016 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1023 bnx2_setup_serdes_phy(struct bnx2 *bp)
1028 if (!(bp->autoneg & AUTONEG_SPEED)) {
1030 int force_link_down = 0;
1032 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1033 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1034 if (up1 & BCM5708S_UP1_2G5) {
1035 up1 &= ~BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1037 force_link_down = 1;
1041 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1042 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1044 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1045 new_bmcr = bmcr & ~BMCR_ANENABLE;
1046 new_bmcr |= BMCR_SPEED1000;
1047 if (bp->req_duplex == DUPLEX_FULL) {
1048 adv |= ADVERTISE_1000XFULL;
1049 new_bmcr |= BMCR_FULLDPLX;
1052 adv |= ADVERTISE_1000XHALF;
1053 new_bmcr &= ~BMCR_FULLDPLX;
1055 if ((new_bmcr != bmcr) || (force_link_down)) {
1056 /* Force a link down visible on the other side */
1058 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1059 ~(ADVERTISE_1000XFULL |
1060 ADVERTISE_1000XHALF));
1061 bnx2_write_phy(bp, MII_BMCR, bmcr |
1062 BMCR_ANRESTART | BMCR_ANENABLE);
1065 netif_carrier_off(bp->dev);
1066 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1068 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1069 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1074 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1075 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1076 up1 |= BCM5708S_UP1_2G5;
1077 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1080 if (bp->advertising & ADVERTISED_1000baseT_Full)
1081 new_adv |= ADVERTISE_1000XFULL;
1083 new_adv |= bnx2_phy_get_pause_adv(bp);
1085 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1086 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1088 bp->serdes_an_pending = 0;
1089 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1090 /* Force a link down visible on the other side */
1094 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1095 for (i = 0; i < 110; i++) {
1100 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1101 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1103 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1104 /* Speed up link-up time when the link partner
1105 * does not autonegotiate which is very common
1106 * in blade servers. Some blade servers use
1107 * IPMI for kerboard input and it's important
1108 * to minimize link disruptions. Autoneg. involves
1109 * exchanging base pages plus 3 next pages and
1110 * normally completes in about 120 msec.
1112 bp->current_interval = SERDES_AN_TIMEOUT;
1113 bp->serdes_an_pending = 1;
1114 mod_timer(&bp->timer, jiffies + bp->current_interval);
1121 #define ETHTOOL_ALL_FIBRE_SPEED \
1122 (ADVERTISED_1000baseT_Full)
1124 #define ETHTOOL_ALL_COPPER_SPEED \
1125 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1126 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1127 ADVERTISED_1000baseT_Full)
1129 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1130 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1132 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1135 bnx2_setup_copper_phy(struct bnx2 *bp)
1140 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1142 if (bp->autoneg & AUTONEG_SPEED) {
1143 u32 adv_reg, adv1000_reg;
1144 u32 new_adv_reg = 0;
1145 u32 new_adv1000_reg = 0;
1147 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1148 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1149 ADVERTISE_PAUSE_ASYM);
1151 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1152 adv1000_reg &= PHY_ALL_1000_SPEED;
1154 if (bp->advertising & ADVERTISED_10baseT_Half)
1155 new_adv_reg |= ADVERTISE_10HALF;
1156 if (bp->advertising & ADVERTISED_10baseT_Full)
1157 new_adv_reg |= ADVERTISE_10FULL;
1158 if (bp->advertising & ADVERTISED_100baseT_Half)
1159 new_adv_reg |= ADVERTISE_100HALF;
1160 if (bp->advertising & ADVERTISED_100baseT_Full)
1161 new_adv_reg |= ADVERTISE_100FULL;
1162 if (bp->advertising & ADVERTISED_1000baseT_Full)
1163 new_adv1000_reg |= ADVERTISE_1000FULL;
1165 new_adv_reg |= ADVERTISE_CSMA;
1167 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1169 if ((adv1000_reg != new_adv1000_reg) ||
1170 (adv_reg != new_adv_reg) ||
1171 ((bmcr & BMCR_ANENABLE) == 0)) {
1173 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1174 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1175 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1178 else if (bp->link_up) {
1179 /* Flow ctrl may have changed from auto to forced */
1180 /* or vice-versa. */
1182 bnx2_resolve_flow_ctrl(bp);
1183 bnx2_set_mac_link(bp);
1189 if (bp->req_line_speed == SPEED_100) {
1190 new_bmcr |= BMCR_SPEED100;
1192 if (bp->req_duplex == DUPLEX_FULL) {
1193 new_bmcr |= BMCR_FULLDPLX;
1195 if (new_bmcr != bmcr) {
1199 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1200 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1202 if (bmsr & BMSR_LSTATUS) {
1203 /* Force link down */
1204 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1207 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1210 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1213 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1215 /* Normally, the new speed is setup after the link has
1216 * gone down and up again. In some cases, link will not go
1217 * down so we need to set up the new speed here.
1219 if (bmsr & BMSR_LSTATUS) {
1220 bp->line_speed = bp->req_line_speed;
1221 bp->duplex = bp->req_duplex;
1222 bnx2_resolve_flow_ctrl(bp);
1223 bnx2_set_mac_link(bp);
1230 bnx2_setup_phy(struct bnx2 *bp)
1232 if (bp->loopback == MAC_LOOPBACK)
1235 if (bp->phy_flags & PHY_SERDES_FLAG) {
1236 return (bnx2_setup_serdes_phy(bp));
1239 return (bnx2_setup_copper_phy(bp));
1244 bnx2_init_5708s_phy(struct bnx2 *bp)
1248 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1249 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1250 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1252 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1253 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1254 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1256 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1257 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1258 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1260 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1261 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1262 val |= BCM5708S_UP1_2G5;
1263 bnx2_write_phy(bp, BCM5708S_UP1, val);
1266 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1267 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1268 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1269 /* increase tx signal amplitude */
1270 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1271 BCM5708S_BLK_ADDR_TX_MISC);
1272 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1273 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1274 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1275 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1278 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1279 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1284 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1285 BNX2_SHARED_HW_CFG_CONFIG);
1286 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1287 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1288 BCM5708S_BLK_ADDR_TX_MISC);
1289 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1290 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1291 BCM5708S_BLK_ADDR_DIG);
1298 bnx2_init_5706s_phy(struct bnx2 *bp)
1300 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1302 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1303 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1306 if (bp->dev->mtu > 1500) {
1309 /* Set extended packet length bit */
1310 bnx2_write_phy(bp, 0x18, 0x7);
1311 bnx2_read_phy(bp, 0x18, &val);
1312 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1314 bnx2_write_phy(bp, 0x1c, 0x6c00);
1315 bnx2_read_phy(bp, 0x1c, &val);
1316 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1321 bnx2_write_phy(bp, 0x18, 0x7);
1322 bnx2_read_phy(bp, 0x18, &val);
1323 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1325 bnx2_write_phy(bp, 0x1c, 0x6c00);
1326 bnx2_read_phy(bp, 0x1c, &val);
1327 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1334 bnx2_init_copper_phy(struct bnx2 *bp)
1338 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1340 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1341 bnx2_write_phy(bp, 0x18, 0x0c00);
1342 bnx2_write_phy(bp, 0x17, 0x000a);
1343 bnx2_write_phy(bp, 0x15, 0x310b);
1344 bnx2_write_phy(bp, 0x17, 0x201f);
1345 bnx2_write_phy(bp, 0x15, 0x9506);
1346 bnx2_write_phy(bp, 0x17, 0x401f);
1347 bnx2_write_phy(bp, 0x15, 0x14e2);
1348 bnx2_write_phy(bp, 0x18, 0x0400);
1351 if (bp->dev->mtu > 1500) {
1352 /* Set extended packet length bit */
1353 bnx2_write_phy(bp, 0x18, 0x7);
1354 bnx2_read_phy(bp, 0x18, &val);
1355 bnx2_write_phy(bp, 0x18, val | 0x4000);
1357 bnx2_read_phy(bp, 0x10, &val);
1358 bnx2_write_phy(bp, 0x10, val | 0x1);
1361 bnx2_write_phy(bp, 0x18, 0x7);
1362 bnx2_read_phy(bp, 0x18, &val);
1363 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1365 bnx2_read_phy(bp, 0x10, &val);
1366 bnx2_write_phy(bp, 0x10, val & ~0x1);
1369 /* ethernet@wirespeed */
1370 bnx2_write_phy(bp, 0x18, 0x7007);
1371 bnx2_read_phy(bp, 0x18, &val);
1372 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1378 bnx2_init_phy(struct bnx2 *bp)
1383 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1384 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1386 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1390 bnx2_read_phy(bp, MII_PHYSID1, &val);
1391 bp->phy_id = val << 16;
1392 bnx2_read_phy(bp, MII_PHYSID2, &val);
1393 bp->phy_id |= val & 0xffff;
1395 if (bp->phy_flags & PHY_SERDES_FLAG) {
1396 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1397 rc = bnx2_init_5706s_phy(bp);
1398 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1399 rc = bnx2_init_5708s_phy(bp);
1402 rc = bnx2_init_copper_phy(bp);
1411 bnx2_set_mac_loopback(struct bnx2 *bp)
1415 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1416 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1417 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1418 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1423 static int bnx2_test_link(struct bnx2 *);
1426 bnx2_set_phy_loopback(struct bnx2 *bp)
1431 spin_lock_bh(&bp->phy_lock);
1432 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1434 spin_unlock_bh(&bp->phy_lock);
1438 for (i = 0; i < 10; i++) {
1439 if (bnx2_test_link(bp) == 0)
1444 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1445 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1446 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1447 BNX2_EMAC_MODE_25G);
1449 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1450 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1456 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1462 msg_data |= bp->fw_wr_seq;
1464 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1466 /* wait for an acknowledgement. */
1467 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1468 current->state = TASK_UNINTERRUPTIBLE;
1469 schedule_timeout(HZ / 100);
1471 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1473 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1476 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1479 /* If we timed out, inform the firmware that this is the case. */
1480 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1482 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1485 msg_data &= ~BNX2_DRV_MSG_CODE;
1486 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1488 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1493 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1500 bnx2_init_context(struct bnx2 *bp)
1506 u32 vcid_addr, pcid_addr, offset;
1510 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1513 vcid_addr = GET_PCID_ADDR(vcid);
1515 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1520 pcid_addr = GET_PCID_ADDR(new_vcid);
1523 vcid_addr = GET_CID_ADDR(vcid);
1524 pcid_addr = vcid_addr;
1527 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1528 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1530 /* Zero out the context. */
1531 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1532 CTX_WR(bp, 0x00, offset, 0);
1535 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1536 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1541 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1547 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1548 if (good_mbuf == NULL) {
1549 printk(KERN_ERR PFX "Failed to allocate memory in "
1550 "bnx2_alloc_bad_rbuf\n");
1554 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1555 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1559 /* Allocate a bunch of mbufs and save the good ones in an array. */
1560 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1561 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1562 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1564 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1566 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1568 /* The addresses with Bit 9 set are bad memory blocks. */
1569 if (!(val & (1 << 9))) {
1570 good_mbuf[good_mbuf_cnt] = (u16) val;
1574 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1577 /* Free the good ones back to the mbuf pool thus discarding
1578 * all the bad ones. */
1579 while (good_mbuf_cnt) {
1582 val = good_mbuf[good_mbuf_cnt];
1583 val = (val << 9) | val | 1;
1585 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1592 bnx2_set_mac_addr(struct bnx2 *bp)
1595 u8 *mac_addr = bp->dev->dev_addr;
1597 val = (mac_addr[0] << 8) | mac_addr[1];
1599 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1601 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1602 (mac_addr[4] << 8) | mac_addr[5];
1604 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1608 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1610 struct sk_buff *skb;
1611 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1613 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1614 unsigned long align;
1616 skb = dev_alloc_skb(bp->rx_buf_size);
1621 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1622 skb_reserve(skb, 8 - align);
1626 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1627 PCI_DMA_FROMDEVICE);
1630 pci_unmap_addr_set(rx_buf, mapping, mapping);
1632 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1633 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1635 bp->rx_prod_bseq += bp->rx_buf_use_size;
1641 bnx2_phy_int(struct bnx2 *bp)
1643 u32 new_link_state, old_link_state;
1645 new_link_state = bp->status_blk->status_attn_bits &
1646 STATUS_ATTN_BITS_LINK_STATE;
1647 old_link_state = bp->status_blk->status_attn_bits_ack &
1648 STATUS_ATTN_BITS_LINK_STATE;
1649 if (new_link_state != old_link_state) {
1650 if (new_link_state) {
1651 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1652 STATUS_ATTN_BITS_LINK_STATE);
1655 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1656 STATUS_ATTN_BITS_LINK_STATE);
1663 bnx2_tx_int(struct bnx2 *bp)
1665 struct status_block *sblk = bp->status_blk;
1666 u16 hw_cons, sw_cons, sw_ring_cons;
1669 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1670 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1673 sw_cons = bp->tx_cons;
1675 while (sw_cons != hw_cons) {
1676 struct sw_bd *tx_buf;
1677 struct sk_buff *skb;
1680 sw_ring_cons = TX_RING_IDX(sw_cons);
1682 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1685 /* partial BD completions possible with TSO packets */
1686 if (skb_shinfo(skb)->gso_size) {
1687 u16 last_idx, last_ring_idx;
1689 last_idx = sw_cons +
1690 skb_shinfo(skb)->nr_frags + 1;
1691 last_ring_idx = sw_ring_cons +
1692 skb_shinfo(skb)->nr_frags + 1;
1693 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1696 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1701 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1702 skb_headlen(skb), PCI_DMA_TODEVICE);
1705 last = skb_shinfo(skb)->nr_frags;
1707 for (i = 0; i < last; i++) {
1708 sw_cons = NEXT_TX_BD(sw_cons);
1710 pci_unmap_page(bp->pdev,
1712 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1714 skb_shinfo(skb)->frags[i].size,
1718 sw_cons = NEXT_TX_BD(sw_cons);
1720 tx_free_bd += last + 1;
1724 hw_cons = bp->hw_tx_cons =
1725 sblk->status_tx_quick_consumer_index0;
1727 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1732 bp->tx_cons = sw_cons;
1734 if (unlikely(netif_queue_stopped(bp->dev))) {
1735 spin_lock(&bp->tx_lock);
1736 if ((netif_queue_stopped(bp->dev)) &&
1737 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) {
1739 netif_wake_queue(bp->dev);
1741 spin_unlock(&bp->tx_lock);
1747 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1750 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1751 struct rx_bd *cons_bd, *prod_bd;
1753 cons_rx_buf = &bp->rx_buf_ring[cons];
1754 prod_rx_buf = &bp->rx_buf_ring[prod];
1756 pci_dma_sync_single_for_device(bp->pdev,
1757 pci_unmap_addr(cons_rx_buf, mapping),
1758 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1760 bp->rx_prod_bseq += bp->rx_buf_use_size;
1762 prod_rx_buf->skb = skb;
1767 pci_unmap_addr_set(prod_rx_buf, mapping,
1768 pci_unmap_addr(cons_rx_buf, mapping));
1770 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1771 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1772 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1773 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1777 bnx2_rx_int(struct bnx2 *bp, int budget)
1779 struct status_block *sblk = bp->status_blk;
1780 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1781 struct l2_fhdr *rx_hdr;
1784 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1785 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1788 sw_cons = bp->rx_cons;
1789 sw_prod = bp->rx_prod;
1791 /* Memory barrier necessary as speculative reads of the rx
1792 * buffer can be ahead of the index in the status block
1795 while (sw_cons != hw_cons) {
1798 struct sw_bd *rx_buf;
1799 struct sk_buff *skb;
1800 dma_addr_t dma_addr;
1802 sw_ring_cons = RX_RING_IDX(sw_cons);
1803 sw_ring_prod = RX_RING_IDX(sw_prod);
1805 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1810 dma_addr = pci_unmap_addr(rx_buf, mapping);
1812 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1813 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1815 rx_hdr = (struct l2_fhdr *) skb->data;
1816 len = rx_hdr->l2_fhdr_pkt_len - 4;
1818 if ((status = rx_hdr->l2_fhdr_status) &
1819 (L2_FHDR_ERRORS_BAD_CRC |
1820 L2_FHDR_ERRORS_PHY_DECODE |
1821 L2_FHDR_ERRORS_ALIGNMENT |
1822 L2_FHDR_ERRORS_TOO_SHORT |
1823 L2_FHDR_ERRORS_GIANT_FRAME)) {
1828 /* Since we don't have a jumbo ring, copy small packets
1831 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1832 struct sk_buff *new_skb;
1834 new_skb = dev_alloc_skb(len + 2);
1835 if (new_skb == NULL)
1839 memcpy(new_skb->data,
1840 skb->data + bp->rx_offset - 2,
1843 skb_reserve(new_skb, 2);
1844 skb_put(new_skb, len);
1845 new_skb->dev = bp->dev;
1847 bnx2_reuse_rx_skb(bp, skb,
1848 sw_ring_cons, sw_ring_prod);
1852 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1853 pci_unmap_single(bp->pdev, dma_addr,
1854 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1856 skb_reserve(skb, bp->rx_offset);
1861 bnx2_reuse_rx_skb(bp, skb,
1862 sw_ring_cons, sw_ring_prod);
1866 skb->protocol = eth_type_trans(skb, bp->dev);
1868 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1869 (ntohs(skb->protocol) != 0x8100)) {
1876 skb->ip_summed = CHECKSUM_NONE;
1878 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1879 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1881 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1882 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1883 skb->ip_summed = CHECKSUM_UNNECESSARY;
1887 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1888 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1889 rx_hdr->l2_fhdr_vlan_tag);
1893 netif_receive_skb(skb);
1895 bp->dev->last_rx = jiffies;
1899 sw_cons = NEXT_RX_BD(sw_cons);
1900 sw_prod = NEXT_RX_BD(sw_prod);
1902 if ((rx_pkt == budget))
1905 /* Refresh hw_cons to see if there is new work */
1906 if (sw_cons == hw_cons) {
1907 hw_cons = bp->hw_rx_cons =
1908 sblk->status_rx_quick_consumer_index0;
1909 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1914 bp->rx_cons = sw_cons;
1915 bp->rx_prod = sw_prod;
1917 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1919 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1927 #ifdef CONFIG_PCI_MSI
1928 /* MSI ISR - The only difference between this and the INTx ISR
1929 * is that the MSI interrupt is always serviced.
1932 bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
1934 struct net_device *dev = dev_instance;
1935 struct bnx2 *bp = netdev_priv(dev);
1937 prefetch(bp->status_blk);
1938 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1939 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1940 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1942 /* Return here if interrupt is disabled. */
1943 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1946 netif_rx_schedule(dev);
1953 bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1955 struct net_device *dev = dev_instance;
1956 struct bnx2 *bp = netdev_priv(dev);
1958 /* When using INTx, it is possible for the interrupt to arrive
1959 * at the CPU before the status block posted prior to the
1960 * interrupt. Reading a register will flush the status block.
1961 * When using MSI, the MSI message will always complete after
1962 * the status block write.
1964 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1965 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1966 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1969 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1970 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1971 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1973 /* Return here if interrupt is shared and is disabled. */
1974 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1977 netif_rx_schedule(dev);
1983 bnx2_has_work(struct bnx2 *bp)
1985 struct status_block *sblk = bp->status_blk;
1987 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1988 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1991 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1999 bnx2_poll(struct net_device *dev, int *budget)
2001 struct bnx2 *bp = netdev_priv(dev);
2003 if ((bp->status_blk->status_attn_bits &
2004 STATUS_ATTN_BITS_LINK_STATE) !=
2005 (bp->status_blk->status_attn_bits_ack &
2006 STATUS_ATTN_BITS_LINK_STATE)) {
2008 spin_lock(&bp->phy_lock);
2010 spin_unlock(&bp->phy_lock);
2012 /* This is needed to take care of transient status
2013 * during link changes.
2015 REG_WR(bp, BNX2_HC_COMMAND,
2016 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2017 REG_RD(bp, BNX2_HC_COMMAND);
2020 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2023 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2024 int orig_budget = *budget;
2027 if (orig_budget > dev->quota)
2028 orig_budget = dev->quota;
2030 work_done = bnx2_rx_int(bp, orig_budget);
2031 *budget -= work_done;
2032 dev->quota -= work_done;
2035 bp->last_status_idx = bp->status_blk->status_idx;
2038 if (!bnx2_has_work(bp)) {
2039 netif_rx_complete(dev);
2040 if (likely(bp->flags & USING_MSI_FLAG)) {
2041 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2042 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2043 bp->last_status_idx);
2046 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2047 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2048 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2049 bp->last_status_idx);
2051 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2052 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2053 bp->last_status_idx);
2060 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2061 * from set_multicast.
2064 bnx2_set_rx_mode(struct net_device *dev)
2066 struct bnx2 *bp = netdev_priv(dev);
2067 u32 rx_mode, sort_mode;
2070 spin_lock_bh(&bp->phy_lock);
2072 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2073 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2074 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2076 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2077 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2079 if (!(bp->flags & ASF_ENABLE_FLAG))
2080 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2082 if (dev->flags & IFF_PROMISC) {
2083 /* Promiscuous mode. */
2084 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2085 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
2087 else if (dev->flags & IFF_ALLMULTI) {
2088 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2089 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2092 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2095 /* Accept one or more multicast(s). */
2096 struct dev_mc_list *mclist;
2097 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2102 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2104 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2105 i++, mclist = mclist->next) {
2107 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2109 regidx = (bit & 0xe0) >> 5;
2111 mc_filter[regidx] |= (1 << bit);
2114 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2115 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2119 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2122 if (rx_mode != bp->rx_mode) {
2123 bp->rx_mode = rx_mode;
2124 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2127 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2128 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2129 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2131 spin_unlock_bh(&bp->phy_lock);
2134 #define FW_BUF_SIZE 0x8000
2137 bnx2_gunzip_init(struct bnx2 *bp)
2139 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2142 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2145 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2146 if (bp->strm->workspace == NULL)
2156 vfree(bp->gunzip_buf);
2157 bp->gunzip_buf = NULL;
2160 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2161 "uncompression.\n", bp->dev->name);
2166 bnx2_gunzip_end(struct bnx2 *bp)
2168 kfree(bp->strm->workspace);
2173 if (bp->gunzip_buf) {
2174 vfree(bp->gunzip_buf);
2175 bp->gunzip_buf = NULL;
2180 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2184 /* check gzip header */
2185 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2191 if (zbuf[3] & FNAME)
2192 while ((zbuf[n++] != 0) && (n < len));
2194 bp->strm->next_in = zbuf + n;
2195 bp->strm->avail_in = len - n;
2196 bp->strm->next_out = bp->gunzip_buf;
2197 bp->strm->avail_out = FW_BUF_SIZE;
2199 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2203 rc = zlib_inflate(bp->strm, Z_FINISH);
2205 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2206 *outbuf = bp->gunzip_buf;
2208 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2209 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2210 bp->dev->name, bp->strm->msg);
2212 zlib_inflateEnd(bp->strm);
2214 if (rc == Z_STREAM_END)
2221 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2228 for (i = 0; i < rv2p_code_len; i += 8) {
2229 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2231 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2234 if (rv2p_proc == RV2P_PROC1) {
2235 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2236 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2239 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2240 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2244 /* Reset the processor, un-stall is done later. */
2245 if (rv2p_proc == RV2P_PROC1) {
2246 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2249 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2254 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2260 val = REG_RD_IND(bp, cpu_reg->mode);
2261 val |= cpu_reg->mode_value_halt;
2262 REG_WR_IND(bp, cpu_reg->mode, val);
2263 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2265 /* Load the Text area. */
2266 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2270 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2271 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2275 /* Load the Data area. */
2276 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2280 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2281 REG_WR_IND(bp, offset, fw->data[j]);
2285 /* Load the SBSS area. */
2286 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2290 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2291 REG_WR_IND(bp, offset, fw->sbss[j]);
2295 /* Load the BSS area. */
2296 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2300 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2301 REG_WR_IND(bp, offset, fw->bss[j]);
2305 /* Load the Read-Only area. */
2306 offset = cpu_reg->spad_base +
2307 (fw->rodata_addr - cpu_reg->mips_view_base);
2311 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2312 REG_WR_IND(bp, offset, fw->rodata[j]);
2316 /* Clear the pre-fetch instruction. */
2317 REG_WR_IND(bp, cpu_reg->inst, 0);
2318 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2320 /* Start the CPU. */
2321 val = REG_RD_IND(bp, cpu_reg->mode);
2322 val &= ~cpu_reg->mode_value_halt;
2323 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2324 REG_WR_IND(bp, cpu_reg->mode, val);
2328 bnx2_init_cpus(struct bnx2 *bp)
2330 struct cpu_reg cpu_reg;
2336 if ((rc = bnx2_gunzip_init(bp)) != 0)
2339 /* Initialize the RV2P processor. */
2340 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2345 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2347 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2352 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2354 /* Initialize the RX Processor. */
2355 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2356 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2357 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2358 cpu_reg.state = BNX2_RXP_CPU_STATE;
2359 cpu_reg.state_value_clear = 0xffffff;
2360 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2361 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2362 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2363 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2364 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2365 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2366 cpu_reg.mips_view_base = 0x8000000;
2368 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2369 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2370 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2371 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2373 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2374 fw.text_len = bnx2_RXP_b06FwTextLen;
2377 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2384 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2385 fw.data_len = bnx2_RXP_b06FwDataLen;
2387 fw.data = bnx2_RXP_b06FwData;
2389 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2390 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2392 fw.sbss = bnx2_RXP_b06FwSbss;
2394 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2395 fw.bss_len = bnx2_RXP_b06FwBssLen;
2397 fw.bss = bnx2_RXP_b06FwBss;
2399 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2400 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2401 fw.rodata_index = 0;
2402 fw.rodata = bnx2_RXP_b06FwRodata;
2404 load_cpu_fw(bp, &cpu_reg, &fw);
2406 /* Initialize the TX Processor. */
2407 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2408 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2409 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2410 cpu_reg.state = BNX2_TXP_CPU_STATE;
2411 cpu_reg.state_value_clear = 0xffffff;
2412 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2413 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2414 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2415 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2416 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2417 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2418 cpu_reg.mips_view_base = 0x8000000;
2420 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2421 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2422 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2423 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2425 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2426 fw.text_len = bnx2_TXP_b06FwTextLen;
2429 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2436 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2437 fw.data_len = bnx2_TXP_b06FwDataLen;
2439 fw.data = bnx2_TXP_b06FwData;
2441 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2442 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2444 fw.sbss = bnx2_TXP_b06FwSbss;
2446 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2447 fw.bss_len = bnx2_TXP_b06FwBssLen;
2449 fw.bss = bnx2_TXP_b06FwBss;
2451 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2452 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2453 fw.rodata_index = 0;
2454 fw.rodata = bnx2_TXP_b06FwRodata;
2456 load_cpu_fw(bp, &cpu_reg, &fw);
2458 /* Initialize the TX Patch-up Processor. */
2459 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2460 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2461 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2462 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2463 cpu_reg.state_value_clear = 0xffffff;
2464 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2465 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2466 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2467 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2468 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2469 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2470 cpu_reg.mips_view_base = 0x8000000;
2472 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2473 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2474 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2475 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2477 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2478 fw.text_len = bnx2_TPAT_b06FwTextLen;
2481 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2488 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2489 fw.data_len = bnx2_TPAT_b06FwDataLen;
2491 fw.data = bnx2_TPAT_b06FwData;
2493 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2494 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2496 fw.sbss = bnx2_TPAT_b06FwSbss;
2498 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2499 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2501 fw.bss = bnx2_TPAT_b06FwBss;
2503 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2504 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2505 fw.rodata_index = 0;
2506 fw.rodata = bnx2_TPAT_b06FwRodata;
2508 load_cpu_fw(bp, &cpu_reg, &fw);
2510 /* Initialize the Completion Processor. */
2511 cpu_reg.mode = BNX2_COM_CPU_MODE;
2512 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2513 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2514 cpu_reg.state = BNX2_COM_CPU_STATE;
2515 cpu_reg.state_value_clear = 0xffffff;
2516 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2517 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2518 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2519 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2520 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2521 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2522 cpu_reg.mips_view_base = 0x8000000;
2524 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2525 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2526 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2527 fw.start_addr = bnx2_COM_b06FwStartAddr;
2529 fw.text_addr = bnx2_COM_b06FwTextAddr;
2530 fw.text_len = bnx2_COM_b06FwTextLen;
2533 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2540 fw.data_addr = bnx2_COM_b06FwDataAddr;
2541 fw.data_len = bnx2_COM_b06FwDataLen;
2543 fw.data = bnx2_COM_b06FwData;
2545 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2546 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2548 fw.sbss = bnx2_COM_b06FwSbss;
2550 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2551 fw.bss_len = bnx2_COM_b06FwBssLen;
2553 fw.bss = bnx2_COM_b06FwBss;
2555 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2556 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2557 fw.rodata_index = 0;
2558 fw.rodata = bnx2_COM_b06FwRodata;
2560 load_cpu_fw(bp, &cpu_reg, &fw);
2563 bnx2_gunzip_end(bp);
2568 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2572 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2578 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2579 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2580 PCI_PM_CTRL_PME_STATUS);
2582 if (pmcsr & PCI_PM_CTRL_STATE_MASK) {
2583 /* delay required during transition out of D3hot */
2584 current->state = TASK_UNINTERRUPTIBLE;
2585 schedule_timeout(HZ / 50);
2588 val = REG_RD(bp, BNX2_EMAC_MODE);
2589 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2590 val &= ~BNX2_EMAC_MODE_MPKT;
2591 REG_WR(bp, BNX2_EMAC_MODE, val);
2593 val = REG_RD(bp, BNX2_RPM_CONFIG);
2594 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2595 REG_WR(bp, BNX2_RPM_CONFIG, val);
2606 autoneg = bp->autoneg;
2607 advertising = bp->advertising;
2609 bp->autoneg = AUTONEG_SPEED;
2610 bp->advertising = ADVERTISED_10baseT_Half |
2611 ADVERTISED_10baseT_Full |
2612 ADVERTISED_100baseT_Half |
2613 ADVERTISED_100baseT_Full |
2616 bnx2_setup_copper_phy(bp);
2618 bp->autoneg = autoneg;
2619 bp->advertising = advertising;
2621 bnx2_set_mac_addr(bp);
2623 val = REG_RD(bp, BNX2_EMAC_MODE);
2625 /* Enable port mode. */
2626 val &= ~BNX2_EMAC_MODE_PORT;
2627 val |= BNX2_EMAC_MODE_PORT_MII |
2628 BNX2_EMAC_MODE_MPKT_RCVD |
2629 BNX2_EMAC_MODE_ACPI_RCVD |
2630 BNX2_EMAC_MODE_MPKT;
2632 REG_WR(bp, BNX2_EMAC_MODE, val);
2634 /* receive all multicast */
2635 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2636 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2639 REG_WR(bp, BNX2_EMAC_RX_MODE,
2640 BNX2_EMAC_RX_MODE_SORT_MODE);
2642 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2643 BNX2_RPM_SORT_USER0_MC_EN;
2644 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2645 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2646 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2647 BNX2_RPM_SORT_USER0_ENA);
2649 /* Need to enable EMAC and RPM for WOL. */
2650 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2651 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2652 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2653 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2655 val = REG_RD(bp, BNX2_RPM_CONFIG);
2656 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2657 REG_WR(bp, BNX2_RPM_CONFIG, val);
2659 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2662 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2665 if (!(bp->flags & NO_WOL_FLAG))
2666 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2668 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2669 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2670 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2679 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2681 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2684 /* No more memory access after this point until
2685 * device is brought back to D0.
2697 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2702 /* Request access to the flash interface. */
2703 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2704 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2705 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2706 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2712 if (j >= NVRAM_TIMEOUT_COUNT)
2719 bnx2_release_nvram_lock(struct bnx2 *bp)
2724 /* Relinquish nvram interface. */
2725 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2727 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2728 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2729 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2735 if (j >= NVRAM_TIMEOUT_COUNT)
2743 bnx2_enable_nvram_write(struct bnx2 *bp)
2747 val = REG_RD(bp, BNX2_MISC_CFG);
2748 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2750 if (!bp->flash_info->buffered) {
2753 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2754 REG_WR(bp, BNX2_NVM_COMMAND,
2755 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2757 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2760 val = REG_RD(bp, BNX2_NVM_COMMAND);
2761 if (val & BNX2_NVM_COMMAND_DONE)
2765 if (j >= NVRAM_TIMEOUT_COUNT)
2772 bnx2_disable_nvram_write(struct bnx2 *bp)
2776 val = REG_RD(bp, BNX2_MISC_CFG);
2777 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2782 bnx2_enable_nvram_access(struct bnx2 *bp)
2786 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2787 /* Enable both bits, even on read. */
2788 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2789 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2793 bnx2_disable_nvram_access(struct bnx2 *bp)
2797 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2798 /* Disable both bits, even after read. */
2799 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2800 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2801 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2805 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2810 if (bp->flash_info->buffered)
2811 /* Buffered flash, no erase needed */
2814 /* Build an erase command */
2815 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2816 BNX2_NVM_COMMAND_DOIT;
2818 /* Need to clear DONE bit separately. */
2819 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2821 /* Address of the NVRAM to read from. */
2822 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2824 /* Issue an erase command. */
2825 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2827 /* Wait for completion. */
2828 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2833 val = REG_RD(bp, BNX2_NVM_COMMAND);
2834 if (val & BNX2_NVM_COMMAND_DONE)
2838 if (j >= NVRAM_TIMEOUT_COUNT)
2845 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2850 /* Build the command word. */
2851 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2853 /* Calculate an offset of a buffered flash. */
2854 if (bp->flash_info->buffered) {
2855 offset = ((offset / bp->flash_info->page_size) <<
2856 bp->flash_info->page_bits) +
2857 (offset % bp->flash_info->page_size);
2860 /* Need to clear DONE bit separately. */
2861 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2863 /* Address of the NVRAM to read from. */
2864 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2866 /* Issue a read command. */
2867 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2869 /* Wait for completion. */
2870 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2875 val = REG_RD(bp, BNX2_NVM_COMMAND);
2876 if (val & BNX2_NVM_COMMAND_DONE) {
2877 val = REG_RD(bp, BNX2_NVM_READ);
2879 val = be32_to_cpu(val);
2880 memcpy(ret_val, &val, 4);
2884 if (j >= NVRAM_TIMEOUT_COUNT)
2892 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2897 /* Build the command word. */
2898 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2900 /* Calculate an offset of a buffered flash. */
2901 if (bp->flash_info->buffered) {
2902 offset = ((offset / bp->flash_info->page_size) <<
2903 bp->flash_info->page_bits) +
2904 (offset % bp->flash_info->page_size);
2907 /* Need to clear DONE bit separately. */
2908 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2910 memcpy(&val32, val, 4);
2911 val32 = cpu_to_be32(val32);
2913 /* Write the data. */
2914 REG_WR(bp, BNX2_NVM_WRITE, val32);
2916 /* Address of the NVRAM to write to. */
2917 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2919 /* Issue the write command. */
2920 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2922 /* Wait for completion. */
2923 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2926 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2929 if (j >= NVRAM_TIMEOUT_COUNT)
2936 bnx2_init_nvram(struct bnx2 *bp)
2939 int j, entry_count, rc;
2940 struct flash_spec *flash;
2942 /* Determine the selected interface. */
2943 val = REG_RD(bp, BNX2_NVM_CFG1);
2945 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2948 if (val & 0x40000000) {
2949 /* Flash interface has been reconfigured */
2950 for (j = 0, flash = &flash_table[0]; j < entry_count;
2952 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2953 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2954 bp->flash_info = flash;
2961 /* Not yet been reconfigured */
2963 if (val & (1 << 23))
2964 mask = FLASH_BACKUP_STRAP_MASK;
2966 mask = FLASH_STRAP_MASK;
2968 for (j = 0, flash = &flash_table[0]; j < entry_count;
2971 if ((val & mask) == (flash->strapping & mask)) {
2972 bp->flash_info = flash;
2974 /* Request access to the flash interface. */
2975 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2978 /* Enable access to flash interface */
2979 bnx2_enable_nvram_access(bp);
2981 /* Reconfigure the flash interface */
2982 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2983 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2984 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2985 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2987 /* Disable access to flash interface */
2988 bnx2_disable_nvram_access(bp);
2989 bnx2_release_nvram_lock(bp);
2994 } /* if (val & 0x40000000) */
2996 if (j == entry_count) {
2997 bp->flash_info = NULL;
2998 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3002 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3003 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3005 bp->flash_size = val;
3007 bp->flash_size = bp->flash_info->total_size;
3013 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3017 u32 cmd_flags, offset32, len32, extra;
3022 /* Request access to the flash interface. */
3023 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3026 /* Enable access to flash interface */
3027 bnx2_enable_nvram_access(bp);
3040 pre_len = 4 - (offset & 3);
3042 if (pre_len >= len32) {
3044 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3045 BNX2_NVM_COMMAND_LAST;
3048 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3051 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3056 memcpy(ret_buf, buf + (offset & 3), pre_len);
3063 extra = 4 - (len32 & 3);
3064 len32 = (len32 + 4) & ~3;
3071 cmd_flags = BNX2_NVM_COMMAND_LAST;
3073 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3074 BNX2_NVM_COMMAND_LAST;
3076 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3078 memcpy(ret_buf, buf, 4 - extra);
3080 else if (len32 > 0) {
3083 /* Read the first word. */
3087 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3089 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3091 /* Advance to the next dword. */
3096 while (len32 > 4 && rc == 0) {
3097 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3099 /* Advance to the next dword. */
3108 cmd_flags = BNX2_NVM_COMMAND_LAST;
3109 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3111 memcpy(ret_buf, buf, 4 - extra);
3114 /* Disable access to flash interface */
3115 bnx2_disable_nvram_access(bp);
3117 bnx2_release_nvram_lock(bp);
3123 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3126 u32 written, offset32, len32;
3127 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3129 int align_start, align_end;
3134 align_start = align_end = 0;
3136 if ((align_start = (offset32 & 3))) {
3138 len32 += align_start;
3139 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3144 if ((len32 > 4) || !align_start) {
3145 align_end = 4 - (len32 & 3);
3147 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3154 if (align_start || align_end) {
3155 buf = kmalloc(len32, GFP_KERNEL);
3159 memcpy(buf, start, 4);
3162 memcpy(buf + len32 - 4, end, 4);
3164 memcpy(buf + align_start, data_buf, buf_size);
3167 if (bp->flash_info->buffered == 0) {
3168 flash_buffer = kmalloc(264, GFP_KERNEL);
3169 if (flash_buffer == NULL) {
3171 goto nvram_write_end;
3176 while ((written < len32) && (rc == 0)) {
3177 u32 page_start, page_end, data_start, data_end;
3178 u32 addr, cmd_flags;
3181 /* Find the page_start addr */
3182 page_start = offset32 + written;
3183 page_start -= (page_start % bp->flash_info->page_size);
3184 /* Find the page_end addr */
3185 page_end = page_start + bp->flash_info->page_size;
3186 /* Find the data_start addr */
3187 data_start = (written == 0) ? offset32 : page_start;
3188 /* Find the data_end addr */
3189 data_end = (page_end > offset32 + len32) ?
3190 (offset32 + len32) : page_end;
3192 /* Request access to the flash interface. */
3193 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3194 goto nvram_write_end;
3196 /* Enable access to flash interface */
3197 bnx2_enable_nvram_access(bp);
3199 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3200 if (bp->flash_info->buffered == 0) {
3203 /* Read the whole page into the buffer
3204 * (non-buffer flash only) */
3205 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3206 if (j == (bp->flash_info->page_size - 4)) {
3207 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3209 rc = bnx2_nvram_read_dword(bp,
3215 goto nvram_write_end;
3221 /* Enable writes to flash interface (unlock write-protect) */
3222 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3223 goto nvram_write_end;
3225 /* Erase the page */
3226 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3227 goto nvram_write_end;
3229 /* Re-enable the write again for the actual write */
3230 bnx2_enable_nvram_write(bp);
3232 /* Loop to write back the buffer data from page_start to
3235 if (bp->flash_info->buffered == 0) {
3236 for (addr = page_start; addr < data_start;
3237 addr += 4, i += 4) {
3239 rc = bnx2_nvram_write_dword(bp, addr,
3240 &flash_buffer[i], cmd_flags);
3243 goto nvram_write_end;
3249 /* Loop to write the new data from data_start to data_end */
3250 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3251 if ((addr == page_end - 4) ||
3252 ((bp->flash_info->buffered) &&
3253 (addr == data_end - 4))) {
3255 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3257 rc = bnx2_nvram_write_dword(bp, addr, buf,
3261 goto nvram_write_end;
3267 /* Loop to write back the buffer data from data_end
3269 if (bp->flash_info->buffered == 0) {
3270 for (addr = data_end; addr < page_end;
3271 addr += 4, i += 4) {
3273 if (addr == page_end-4) {
3274 cmd_flags = BNX2_NVM_COMMAND_LAST;
3276 rc = bnx2_nvram_write_dword(bp, addr,
3277 &flash_buffer[i], cmd_flags);
3280 goto nvram_write_end;
3286 /* Disable writes to flash interface (lock write-protect) */
3287 bnx2_disable_nvram_write(bp);
3289 /* Disable access to flash interface */
3290 bnx2_disable_nvram_access(bp);
3291 bnx2_release_nvram_lock(bp);
3293 /* Increment written */
3294 written += data_end - data_start;
3298 if (bp->flash_info->buffered == 0)
3299 kfree(flash_buffer);
3301 if (align_start || align_end)
3307 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3312 /* Wait for the current PCI transaction to complete before
3313 * issuing a reset. */
3314 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3315 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3316 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3317 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3318 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3319 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3322 /* Wait for the firmware to tell us it is ok to issue a reset. */
3323 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3325 /* Deposit a driver reset signature so the firmware knows that
3326 * this is a soft reset. */
3327 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3328 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3330 /* Do a dummy read to force the chip to complete all current transaction
3331 * before we issue a reset. */
3332 val = REG_RD(bp, BNX2_MISC_ID);
3334 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3335 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3336 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3339 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3341 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3342 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3343 current->state = TASK_UNINTERRUPTIBLE;
3344 schedule_timeout(HZ / 50);
3347 /* Reset takes approximate 30 usec */
3348 for (i = 0; i < 10; i++) {
3349 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3350 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3351 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3357 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3358 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3359 printk(KERN_ERR PFX "Chip reset did not complete\n");
3363 /* Make sure byte swapping is properly configured. */
3364 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3365 if (val != 0x01020304) {
3366 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3370 /* Wait for the firmware to finish its initialization. */
3371 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3375 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3376 /* Adjust the voltage regular to two steps lower. The default
3377 * of this register is 0x0000000e. */
3378 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3380 /* Remove bad rbuf memory from the free pool. */
3381 rc = bnx2_alloc_bad_rbuf(bp);
3388 bnx2_init_chip(struct bnx2 *bp)
3393 /* Make sure the interrupt is not active. */
3394 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3396 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3397 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3399 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3401 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3402 DMA_READ_CHANS << 12 |
3403 DMA_WRITE_CHANS << 16;
3405 val |= (0x2 << 20) | (1 << 11);
3407 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3410 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3411 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3412 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3414 REG_WR(bp, BNX2_DMA_CONFIG, val);
3416 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3417 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3418 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3419 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3422 if (bp->flags & PCIX_FLAG) {
3425 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3427 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3428 val16 & ~PCI_X_CMD_ERO);
3431 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3432 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3433 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3434 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3436 /* Initialize context mapping and zero out the quick contexts. The
3437 * context block must have already been enabled. */
3438 bnx2_init_context(bp);
3440 if ((rc = bnx2_init_cpus(bp)) != 0)
3443 bnx2_init_nvram(bp);
3445 bnx2_set_mac_addr(bp);
3447 val = REG_RD(bp, BNX2_MQ_CONFIG);
3448 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3449 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3450 REG_WR(bp, BNX2_MQ_CONFIG, val);
3452 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3453 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3454 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3456 val = (BCM_PAGE_BITS - 8) << 24;
3457 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3459 /* Configure page size. */
3460 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3461 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3462 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3463 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3465 val = bp->mac_addr[0] +
3466 (bp->mac_addr[1] << 8) +
3467 (bp->mac_addr[2] << 16) +
3469 (bp->mac_addr[4] << 8) +
3470 (bp->mac_addr[5] << 16);
3471 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3473 /* Program the MTU. Also include 4 bytes for CRC32. */
3474 val = bp->dev->mtu + ETH_HLEN + 4;
3475 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3476 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3477 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3479 bp->last_status_idx = 0;
3480 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3482 /* Set up how to generate a link change interrupt. */
3483 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3485 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3486 (u64) bp->status_blk_mapping & 0xffffffff);
3487 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3489 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3490 (u64) bp->stats_blk_mapping & 0xffffffff);
3491 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3492 (u64) bp->stats_blk_mapping >> 32);
3494 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3495 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3497 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3498 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3500 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3501 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3503 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3505 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3507 REG_WR(bp, BNX2_HC_COM_TICKS,
3508 (bp->com_ticks_int << 16) | bp->com_ticks);
3510 REG_WR(bp, BNX2_HC_CMD_TICKS,
3511 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3513 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3514 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3516 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3517 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3519 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3520 BNX2_HC_CONFIG_TX_TMR_MODE |
3521 BNX2_HC_CONFIG_COLLECT_STATS);
3524 /* Clear internal stats counters. */
3525 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3527 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3529 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3530 BNX2_PORT_FEATURE_ASF_ENABLED)
3531 bp->flags |= ASF_ENABLE_FLAG;
3533 /* Initialize the receive filter. */
3534 bnx2_set_rx_mode(bp->dev);
3536 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3539 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3540 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3544 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3551 bnx2_init_tx_ring(struct bnx2 *bp)
3556 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3558 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3559 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3564 bp->tx_prod_bseq = 0;
3566 val = BNX2_L2CTX_TYPE_TYPE_L2;
3567 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3568 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3570 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3572 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3574 val = (u64) bp->tx_desc_mapping >> 32;
3575 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3577 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3578 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3582 bnx2_init_rx_ring(struct bnx2 *bp)
3586 u16 prod, ring_prod;
3589 /* 8 for CRC and VLAN */
3590 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3591 /* 8 for alignment */
3592 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3594 ring_prod = prod = bp->rx_prod = 0;
3597 bp->rx_prod_bseq = 0;
3599 for (i = 0; i < bp->rx_max_ring; i++) {
3602 rxbd = &bp->rx_desc_ring[i][0];
3603 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3604 rxbd->rx_bd_len = bp->rx_buf_use_size;
3605 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3607 if (i == (bp->rx_max_ring - 1))
3611 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3612 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3616 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3617 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3619 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3621 val = (u64) bp->rx_desc_mapping[0] >> 32;
3622 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3624 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3625 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3627 for (i = 0; i < bp->rx_ring_size; i++) {
3628 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3631 prod = NEXT_RX_BD(prod);
3632 ring_prod = RX_RING_IDX(prod);
3636 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3638 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3642 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3646 bp->rx_ring_size = size;
3648 while (size > MAX_RX_DESC_CNT) {
3649 size -= MAX_RX_DESC_CNT;
3652 /* round to next power of 2 */
3654 while ((max & num_rings) == 0)
3657 if (num_rings != max)
3660 bp->rx_max_ring = max;
3661 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3665 bnx2_free_tx_skbs(struct bnx2 *bp)
3669 if (bp->tx_buf_ring == NULL)
3672 for (i = 0; i < TX_DESC_CNT; ) {
3673 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3674 struct sk_buff *skb = tx_buf->skb;
3682 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3683 skb_headlen(skb), PCI_DMA_TODEVICE);
3687 last = skb_shinfo(skb)->nr_frags;
3688 for (j = 0; j < last; j++) {
3689 tx_buf = &bp->tx_buf_ring[i + j + 1];
3690 pci_unmap_page(bp->pdev,
3691 pci_unmap_addr(tx_buf, mapping),
3692 skb_shinfo(skb)->frags[j].size,
3695 dev_kfree_skb_any(skb);
3702 bnx2_free_rx_skbs(struct bnx2 *bp)
3706 if (bp->rx_buf_ring == NULL)
3709 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3710 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3711 struct sk_buff *skb = rx_buf->skb;
3716 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3717 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3721 dev_kfree_skb_any(skb);
3726 bnx2_free_skbs(struct bnx2 *bp)
3728 bnx2_free_tx_skbs(bp);
3729 bnx2_free_rx_skbs(bp);
3733 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3737 rc = bnx2_reset_chip(bp, reset_code);
3742 if ((rc = bnx2_init_chip(bp)) != 0)
3745 bnx2_init_tx_ring(bp);
3746 bnx2_init_rx_ring(bp);
3751 bnx2_init_nic(struct bnx2 *bp)
3755 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3764 bnx2_test_registers(struct bnx2 *bp)
3768 static const struct {
3774 { 0x006c, 0, 0x00000000, 0x0000003f },
3775 { 0x0090, 0, 0xffffffff, 0x00000000 },
3776 { 0x0094, 0, 0x00000000, 0x00000000 },
3778 { 0x0404, 0, 0x00003f00, 0x00000000 },
3779 { 0x0418, 0, 0x00000000, 0xffffffff },
3780 { 0x041c, 0, 0x00000000, 0xffffffff },
3781 { 0x0420, 0, 0x00000000, 0x80ffffff },
3782 { 0x0424, 0, 0x00000000, 0x00000000 },
3783 { 0x0428, 0, 0x00000000, 0x00000001 },
3784 { 0x0450, 0, 0x00000000, 0x0000ffff },
3785 { 0x0454, 0, 0x00000000, 0xffffffff },
3786 { 0x0458, 0, 0x00000000, 0xffffffff },
3788 { 0x0808, 0, 0x00000000, 0xffffffff },
3789 { 0x0854, 0, 0x00000000, 0xffffffff },
3790 { 0x0868, 0, 0x00000000, 0x77777777 },
3791 { 0x086c, 0, 0x00000000, 0x77777777 },
3792 { 0x0870, 0, 0x00000000, 0x77777777 },
3793 { 0x0874, 0, 0x00000000, 0x77777777 },
3795 { 0x0c00, 0, 0x00000000, 0x00000001 },
3796 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3797 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3799 { 0x1000, 0, 0x00000000, 0x00000001 },
3800 { 0x1004, 0, 0x00000000, 0x000f0001 },
3802 { 0x1408, 0, 0x01c00800, 0x00000000 },
3803 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3804 { 0x14a8, 0, 0x00000000, 0x000001ff },
3805 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3806 { 0x14b0, 0, 0x00000002, 0x00000001 },
3807 { 0x14b8, 0, 0x00000000, 0x00000000 },
3808 { 0x14c0, 0, 0x00000000, 0x00000009 },
3809 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3810 { 0x14cc, 0, 0x00000000, 0x00000001 },
3811 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3813 { 0x1800, 0, 0x00000000, 0x00000001 },
3814 { 0x1804, 0, 0x00000000, 0x00000003 },
3816 { 0x2800, 0, 0x00000000, 0x00000001 },
3817 { 0x2804, 0, 0x00000000, 0x00003f01 },
3818 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3819 { 0x2810, 0, 0xffff0000, 0x00000000 },
3820 { 0x2814, 0, 0xffff0000, 0x00000000 },
3821 { 0x2818, 0, 0xffff0000, 0x00000000 },
3822 { 0x281c, 0, 0xffff0000, 0x00000000 },
3823 { 0x2834, 0, 0xffffffff, 0x00000000 },
3824 { 0x2840, 0, 0x00000000, 0xffffffff },
3825 { 0x2844, 0, 0x00000000, 0xffffffff },
3826 { 0x2848, 0, 0xffffffff, 0x00000000 },
3827 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3829 { 0x2c00, 0, 0x00000000, 0x00000011 },
3830 { 0x2c04, 0, 0x00000000, 0x00030007 },
3832 { 0x3c00, 0, 0x00000000, 0x00000001 },
3833 { 0x3c04, 0, 0x00000000, 0x00070000 },
3834 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3835 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3836 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3837 { 0x3c14, 0, 0x00000000, 0xffffffff },
3838 { 0x3c18, 0, 0x00000000, 0xffffffff },
3839 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3840 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3842 { 0x5004, 0, 0x00000000, 0x0000007f },
3843 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3844 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3846 { 0x5c00, 0, 0x00000000, 0x00000001 },
3847 { 0x5c04, 0, 0x00000000, 0x0003000f },
3848 { 0x5c08, 0, 0x00000003, 0x00000000 },
3849 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3850 { 0x5c10, 0, 0x00000000, 0xffffffff },
3851 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3852 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3853 { 0x5c88, 0, 0x00000000, 0x00077373 },
3854 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3856 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3857 { 0x680c, 0, 0xffffffff, 0x00000000 },
3858 { 0x6810, 0, 0xffffffff, 0x00000000 },
3859 { 0x6814, 0, 0xffffffff, 0x00000000 },
3860 { 0x6818, 0, 0xffffffff, 0x00000000 },
3861 { 0x681c, 0, 0xffffffff, 0x00000000 },
3862 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3863 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3864 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3865 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3866 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3867 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3868 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3869 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3870 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3871 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3872 { 0x684c, 0, 0xffffffff, 0x00000000 },
3873 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3874 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3875 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3876 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3877 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3878 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3880 { 0xffff, 0, 0x00000000, 0x00000000 },
3884 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3885 u32 offset, rw_mask, ro_mask, save_val, val;
3887 offset = (u32) reg_tbl[i].offset;
3888 rw_mask = reg_tbl[i].rw_mask;
3889 ro_mask = reg_tbl[i].ro_mask;
3891 save_val = readl(bp->regview + offset);
3893 writel(0, bp->regview + offset);
3895 val = readl(bp->regview + offset);
3896 if ((val & rw_mask) != 0) {
3900 if ((val & ro_mask) != (save_val & ro_mask)) {
3904 writel(0xffffffff, bp->regview + offset);
3906 val = readl(bp->regview + offset);
3907 if ((val & rw_mask) != rw_mask) {
3911 if ((val & ro_mask) != (save_val & ro_mask)) {
3915 writel(save_val, bp->regview + offset);
3919 writel(save_val, bp->regview + offset);
3927 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3929 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3930 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3933 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3936 for (offset = 0; offset < size; offset += 4) {
3938 REG_WR_IND(bp, start + offset, test_pattern[i]);
3940 if (REG_RD_IND(bp, start + offset) !=
3950 bnx2_test_memory(struct bnx2 *bp)
3954 static const struct {
3958 { 0x60000, 0x4000 },
3959 { 0xa0000, 0x3000 },
3960 { 0xe0000, 0x4000 },
3961 { 0x120000, 0x4000 },
3962 { 0x1a0000, 0x4000 },
3963 { 0x160000, 0x4000 },
3967 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3968 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3969 mem_tbl[i].len)) != 0) {
3977 #define BNX2_MAC_LOOPBACK 0
3978 #define BNX2_PHY_LOOPBACK 1
3981 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3983 unsigned int pkt_size, num_pkts, i;
3984 struct sk_buff *skb, *rx_skb;
3985 unsigned char *packet;
3986 u16 rx_start_idx, rx_idx;
3989 struct sw_bd *rx_buf;
3990 struct l2_fhdr *rx_hdr;
3993 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3994 bp->loopback = MAC_LOOPBACK;
3995 bnx2_set_mac_loopback(bp);
3997 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3999 bnx2_set_phy_loopback(bp);
4005 skb = dev_alloc_skb(pkt_size);
4008 packet = skb_put(skb, pkt_size);
4009 memcpy(packet, bp->mac_addr, 6);
4010 memset(packet + 6, 0x0, 8);
4011 for (i = 14; i < pkt_size; i++)
4012 packet[i] = (unsigned char) (i & 0xff);
4014 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4017 REG_WR(bp, BNX2_HC_COMMAND,
4018 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4020 REG_RD(bp, BNX2_HC_COMMAND);
4023 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4027 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4029 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4030 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4031 txbd->tx_bd_mss_nbytes = pkt_size;
4032 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4035 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4036 bp->tx_prod_bseq += pkt_size;
4038 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
4039 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4043 REG_WR(bp, BNX2_HC_COMMAND,
4044 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4046 REG_RD(bp, BNX2_HC_COMMAND);
4050 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4053 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4054 goto loopback_test_done;
4057 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4058 if (rx_idx != rx_start_idx + num_pkts) {
4059 goto loopback_test_done;
4062 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4063 rx_skb = rx_buf->skb;
4065 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4066 skb_reserve(rx_skb, bp->rx_offset);
4068 pci_dma_sync_single_for_cpu(bp->pdev,
4069 pci_unmap_addr(rx_buf, mapping),
4070 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4072 if (rx_hdr->l2_fhdr_status &
4073 (L2_FHDR_ERRORS_BAD_CRC |
4074 L2_FHDR_ERRORS_PHY_DECODE |
4075 L2_FHDR_ERRORS_ALIGNMENT |
4076 L2_FHDR_ERRORS_TOO_SHORT |
4077 L2_FHDR_ERRORS_GIANT_FRAME)) {
4079 goto loopback_test_done;
4082 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4083 goto loopback_test_done;
4086 for (i = 14; i < pkt_size; i++) {
4087 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4088 goto loopback_test_done;
4099 #define BNX2_MAC_LOOPBACK_FAILED 1
4100 #define BNX2_PHY_LOOPBACK_FAILED 2
4101 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4102 BNX2_PHY_LOOPBACK_FAILED)
4105 bnx2_test_loopback(struct bnx2 *bp)
4109 if (!netif_running(bp->dev))
4110 return BNX2_LOOPBACK_FAILED;
4112 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4113 spin_lock_bh(&bp->phy_lock);
4115 spin_unlock_bh(&bp->phy_lock);
4116 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4117 rc |= BNX2_MAC_LOOPBACK_FAILED;
4118 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4119 rc |= BNX2_PHY_LOOPBACK_FAILED;
4123 #define NVRAM_SIZE 0x200
4124 #define CRC32_RESIDUAL 0xdebb20e3
4127 bnx2_test_nvram(struct bnx2 *bp)
4129 u32 buf[NVRAM_SIZE / 4];
4130 u8 *data = (u8 *) buf;
4134 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4135 goto test_nvram_done;
4137 magic = be32_to_cpu(buf[0]);
4138 if (magic != 0x669955aa) {
4140 goto test_nvram_done;
4143 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4144 goto test_nvram_done;
4146 csum = ether_crc_le(0x100, data);
4147 if (csum != CRC32_RESIDUAL) {
4149 goto test_nvram_done;
4152 csum = ether_crc_le(0x100, data + 0x100);
4153 if (csum != CRC32_RESIDUAL) {
4162 bnx2_test_link(struct bnx2 *bp)
4166 spin_lock_bh(&bp->phy_lock);
4167 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4168 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4169 spin_unlock_bh(&bp->phy_lock);
4171 if (bmsr & BMSR_LSTATUS) {
4178 bnx2_test_intr(struct bnx2 *bp)
4183 if (!netif_running(bp->dev))
4186 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4188 /* This register is not touched during run-time. */
4189 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4190 REG_RD(bp, BNX2_HC_COMMAND);
4192 for (i = 0; i < 10; i++) {
4193 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4199 current->state = TASK_INTERRUPTIBLE;
4200 schedule_timeout(HZ / 100);
4209 bnx2_timer(unsigned long data)
4211 struct bnx2 *bp = (struct bnx2 *) data;
4214 if (!netif_running(bp->dev))
4217 if (atomic_read(&bp->intr_sem) != 0)
4218 goto bnx2_restart_timer;
4220 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4221 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4223 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4225 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4226 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
4228 spin_lock(&bp->phy_lock);
4229 if (bp->serdes_an_pending) {
4230 bp->serdes_an_pending--;
4232 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4235 bp->current_interval = bp->timer_interval;
4237 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4239 if (bmcr & BMCR_ANENABLE) {
4242 bnx2_write_phy(bp, 0x1c, 0x7c00);
4243 bnx2_read_phy(bp, 0x1c, &phy1);
4245 bnx2_write_phy(bp, 0x17, 0x0f01);
4246 bnx2_read_phy(bp, 0x15, &phy2);
4247 bnx2_write_phy(bp, 0x17, 0x0f01);
4248 bnx2_read_phy(bp, 0x15, &phy2);
4250 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4251 !(phy2 & 0x20)) { /* no CONFIG */
4253 bmcr &= ~BMCR_ANENABLE;
4254 bmcr |= BMCR_SPEED1000 |
4256 bnx2_write_phy(bp, MII_BMCR, bmcr);
4258 PHY_PARALLEL_DETECT_FLAG;
4262 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4263 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4266 bnx2_write_phy(bp, 0x17, 0x0f01);
4267 bnx2_read_phy(bp, 0x15, &phy2);
4271 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4272 bmcr |= BMCR_ANENABLE;
4273 bnx2_write_phy(bp, MII_BMCR, bmcr);
4275 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4280 bp->current_interval = bp->timer_interval;
4282 spin_unlock(&bp->phy_lock);
4286 mod_timer(&bp->timer, jiffies + bp->current_interval);
4289 /* Called with rtnl_lock */
4291 bnx2_open(struct net_device *dev)
4293 struct bnx2 *bp = netdev_priv(dev);
4296 bnx2_set_power_state(bp, PCI_D0);
4297 bnx2_disable_int(bp);
4299 rc = bnx2_alloc_mem(bp);
4303 #ifdef CONFIG_PCI_MSI
4304 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4305 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4308 if (pci_enable_msi(bp->pdev) == 0) {
4309 bp->flags |= USING_MSI_FLAG;
4310 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4314 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4315 SA_SHIRQ, dev->name, dev);
4321 rc = request_irq(bp->pdev->irq, bnx2_interrupt, SA_SHIRQ,
4329 rc = bnx2_init_nic(bp);
4332 free_irq(bp->pdev->irq, dev);
4333 #ifdef CONFIG_PCI_MSI
4334 if (bp->flags & USING_MSI_FLAG) {
4335 pci_disable_msi(bp->pdev);
4336 bp->flags &= ~USING_MSI_FLAG;
4344 mod_timer(&bp->timer, jiffies + bp->current_interval);
4346 atomic_set(&bp->intr_sem, 0);
4348 bnx2_enable_int(bp);
4350 #ifdef CONFIG_PCI_MSI
4351 if (bp->flags & USING_MSI_FLAG) {
4352 /* Test MSI to make sure it is working
4353 * If MSI test fails, go back to INTx mode
4355 if (bnx2_test_intr(bp) != 0) {
4356 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4357 " using MSI, switching to INTx mode. Please"
4358 " report this failure to the PCI maintainer"
4359 " and include system chipset information.\n",
4362 bnx2_disable_int(bp);
4363 free_irq(bp->pdev->irq, dev);
4364 pci_disable_msi(bp->pdev);
4365 bp->flags &= ~USING_MSI_FLAG;
4367 rc = bnx2_init_nic(bp);
4370 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4371 SA_SHIRQ, dev->name, dev);
4376 del_timer_sync(&bp->timer);
4379 bnx2_enable_int(bp);
4382 if (bp->flags & USING_MSI_FLAG) {
4383 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4387 netif_start_queue(dev);
4393 bnx2_reset_task(void *data)
4395 struct bnx2 *bp = data;
4397 if (!netif_running(bp->dev))
4400 bp->in_reset_task = 1;
4401 bnx2_netif_stop(bp);
4405 atomic_set(&bp->intr_sem, 1);
4406 bnx2_netif_start(bp);
4407 bp->in_reset_task = 0;
4411 bnx2_tx_timeout(struct net_device *dev)
4413 struct bnx2 *bp = netdev_priv(dev);
4415 /* This allows the netif to be shutdown gracefully before resetting */
4416 #if (LINUX_VERSION_CODE >= 0x20600)
4417 schedule_work(&bp->reset_task);
4419 schedule_task(&bp->reset_task);
4424 /* Called with rtnl_lock */
4426 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4428 struct bnx2 *bp = netdev_priv(dev);
4430 bnx2_netif_stop(bp);
4433 bnx2_set_rx_mode(dev);
4435 bnx2_netif_start(bp);
4438 /* Called with rtnl_lock */
4440 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4442 struct bnx2 *bp = netdev_priv(dev);
4444 bnx2_netif_stop(bp);
4447 bp->vlgrp->vlan_devices[vid] = NULL;
4448 bnx2_set_rx_mode(dev);
4450 bnx2_netif_start(bp);
4454 /* Test for DMA addresses > 40-bit.
4455 * Only 64-bit systems without IOMMU requires DMA address checking.
4457 static inline int bnx2_40bit_overflow_test(struct bnx2 *bp, dma_addr_t mapping,
4460 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4461 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4462 return (((u64) mapping + len) > DMA_40BIT_MASK);
4469 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4470 /* Workaround 40-bit hardware DMA bugs. */
4471 static int bnx2_dma_hwbug_workaround(struct bnx2 *bp, struct sk_buff **skb,
4472 u16 *last_plus_one, u32 base_flags,
4475 struct sk_buff *new_skb = skb_copy(*skb, GFP_ATOMIC);
4476 dma_addr_t new_addr = 0;
4478 u16 hw_prod = *last_plus_one;
4479 u16 start, hw_start, prod;
4481 prod = TX_RING_IDX(hw_prod);
4482 start = prod - 1 - skb_shinfo(*skb)->nr_frags;
4483 hw_start = hw_prod - 1 - skb_shinfo(*skb)->nr_frags;
4484 start &= MAX_TX_DESC_CNT;
4495 /* New SKB is guaranteed to be linear. */
4496 new_addr = pci_map_single(bp->pdev, new_skb->data, new_skb->len,
4498 txbd = &bp->tx_desc_ring[start];
4500 txbd->tx_bd_haddr_hi = (u64) new_addr >> 32;
4501 txbd->tx_bd_haddr_lo = (u64) new_addr & 0xffffffff;
4502 txbd->tx_bd_mss_nbytes = new_skb->len | (mss << 16);
4503 txbd->tx_bd_vlan_tag_flags = base_flags | TX_BD_FLAGS_START |
4506 *last_plus_one = NEXT_TX_BD(hw_start);
4509 /* Now clean up the sw ring entries. */
4511 while (start != prod) {
4515 len = skb_headlen(*skb);
4517 len = skb_shinfo(*skb)->frags[i-1].size;
4519 pci_unmap_single(bp->pdev,
4520 pci_unmap_addr(&tp->tx_buf_ring[start],
4522 len, PCI_DMA_TODEVICE);
4524 bp->tx_buf_ring[start].skb = new_skb;
4525 pci_unmap_addr_set(&bp->tx_buf_ring[start], mapping,
4528 hw_start = NEXT_TX_BD(hw_start);
4529 start = TX_RING_IDX(hw_start);
4533 dev_kfree_skb(*skb);
4541 /* Called with netif_tx_lock.
4542 * hard_start_xmit is pseudo-lockless - a lock is only required when
4543 * the tx queue is full. This way, we get the benefit of lockless
4544 * operations most of the time without the complexities to handle
4545 * netif_stop_queue/wake_queue race conditions.
4548 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4550 struct bnx2 *bp = netdev_priv(dev);
4553 struct sw_bd *tx_buf;
4554 u32 len, vlan_tag_flags, last_frag, mss;
4555 u16 prod, ring_prod;
4556 int i, would_hit_hwbug = 0;
4558 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4559 netif_stop_queue(dev);
4560 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4563 return NETDEV_TX_BUSY;
4565 len = skb_headlen(skb);
4567 ring_prod = TX_RING_IDX(prod);
4570 if (skb->ip_summed == CHECKSUM_HW) {
4571 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4575 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4577 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4581 if ((mss = skb_shinfo(skb)->gso_size) &&
4582 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4583 u32 tcp_opt_len, ip_tcp_len;
4585 #if (LINUX_VERSION_CODE > 0x2060b)
4586 if (skb_header_cloned(skb) &&
4587 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4589 return NETDEV_TX_OK;
4593 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4594 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4597 if (skb->h.th->doff > 5) {
4598 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4600 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4602 skb->nh.iph->check = 0;
4603 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4605 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4609 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4610 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4611 (tcp_opt_len >> 2)) << 8;
4620 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4622 tx_buf = &bp->tx_buf_ring[ring_prod];
4624 pci_unmap_addr_set(tx_buf, mapping, mapping);
4626 txbd = &bp->tx_desc_ring[ring_prod];
4628 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4629 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4630 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4631 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4633 last_frag = skb_shinfo(skb)->nr_frags;
4635 for (i = 0; i < last_frag; i++) {
4636 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4638 prod = NEXT_TX_BD(prod);
4639 ring_prod = TX_RING_IDX(prod);
4640 txbd = &bp->tx_desc_ring[ring_prod];
4643 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4644 len, PCI_DMA_TODEVICE);
4645 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4648 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4649 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4650 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4651 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4653 if (bnx2_40bit_overflow_test(bp, mapping, len))
4654 would_hit_hwbug = 1;
4657 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4659 prod = NEXT_TX_BD(prod);
4661 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4662 if (unlikely(would_hit_hwbug)) {
4663 /* If the workaround fails due to memory/mapping
4664 * failure, silently drop this packet.
4666 if (bnx2_dma_hwbug_workaround(bp, &skb, &prod,
4667 vlan_tag_flags, mss))
4668 return NETDEV_TX_OK;
4673 bp->tx_prod_bseq += skb->len;
4675 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4676 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4681 dev->trans_start = jiffies;
4683 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4684 spin_lock(&bp->tx_lock);
4685 netif_stop_queue(dev);
4687 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4688 netif_wake_queue(dev);
4689 spin_unlock(&bp->tx_lock);
4692 return NETDEV_TX_OK;
4695 /* Called with rtnl_lock */
4697 bnx2_close(struct net_device *dev)
4699 struct bnx2 *bp = netdev_priv(dev);
4702 /* Calling flush_scheduled_work() may deadlock because
4703 * linkwatch_event() may be on the workqueue and it will try to get
4704 * the rtnl_lock which we are holding.
4706 while (bp->in_reset_task) {
4707 current->state = TASK_UNINTERRUPTIBLE;
4708 schedule_timeout(1);
4711 bnx2_netif_stop(bp);
4712 del_timer_sync(&bp->timer);
4713 if (bp->flags & NO_WOL_FLAG)
4714 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4716 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4718 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4719 bnx2_reset_chip(bp, reset_code);
4720 free_irq(bp->pdev->irq, dev);
4721 #ifdef CONFIG_PCI_MSI
4722 if (bp->flags & USING_MSI_FLAG) {
4723 pci_disable_msi(bp->pdev);
4724 bp->flags &= ~USING_MSI_FLAG;
4730 netif_carrier_off(bp->dev);
4731 bnx2_set_power_state(bp, PCI_D3hot);
4735 #define GET_NET_STATS64(ctr) \
4736 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4737 (unsigned long) (ctr##_lo)
4739 #define GET_NET_STATS32(ctr) \
4742 #if (BITS_PER_LONG == 64)
4743 #define GET_NET_STATS GET_NET_STATS64
4745 #define GET_NET_STATS GET_NET_STATS32
4748 static struct net_device_stats *
4749 bnx2_get_stats(struct net_device *dev)
4751 struct bnx2 *bp = netdev_priv(dev);
4752 struct statistics_block *stats_blk = bp->stats_blk;
4753 struct net_device_stats *net_stats = &bp->net_stats;
4755 if (bp->stats_blk == NULL) {
4758 net_stats->rx_packets =
4759 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4760 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4761 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4763 net_stats->tx_packets =
4764 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4765 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4766 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4768 net_stats->rx_bytes =
4769 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4771 net_stats->tx_bytes =
4772 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4774 net_stats->multicast =
4775 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4777 net_stats->collisions =
4778 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4780 net_stats->rx_length_errors =
4781 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4782 stats_blk->stat_EtherStatsOverrsizePkts);
4784 net_stats->rx_over_errors =
4785 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4787 net_stats->rx_frame_errors =
4788 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4790 net_stats->rx_crc_errors =
4791 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4793 net_stats->rx_errors = net_stats->rx_length_errors +
4794 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4795 net_stats->rx_crc_errors;
4797 net_stats->tx_aborted_errors =
4798 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4799 stats_blk->stat_Dot3StatsLateCollisions);
4801 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4802 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4803 net_stats->tx_carrier_errors = 0;
4805 net_stats->tx_carrier_errors =
4807 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4810 net_stats->tx_errors =
4812 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4814 net_stats->tx_aborted_errors +
4815 net_stats->tx_carrier_errors;
4817 net_stats->rx_missed_errors =
4818 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4819 stats_blk->stat_FwRxDrop);
4824 /* All ethtool functions called with rtnl_lock */
4827 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4829 struct bnx2 *bp = netdev_priv(dev);
4831 cmd->supported = SUPPORTED_Autoneg;
4832 if (bp->phy_flags & PHY_SERDES_FLAG) {
4833 cmd->supported |= SUPPORTED_1000baseT_Full |
4836 cmd->port = PORT_FIBRE;
4839 cmd->supported |= SUPPORTED_10baseT_Half |
4840 SUPPORTED_10baseT_Full |
4841 SUPPORTED_100baseT_Half |
4842 SUPPORTED_100baseT_Full |
4843 SUPPORTED_1000baseT_Full |
4846 cmd->port = PORT_TP;
4849 cmd->advertising = bp->advertising;
4851 if (bp->autoneg & AUTONEG_SPEED) {
4852 cmd->autoneg = AUTONEG_ENABLE;
4855 cmd->autoneg = AUTONEG_DISABLE;
4858 if (netif_carrier_ok(dev)) {
4859 cmd->speed = bp->line_speed;
4860 cmd->duplex = bp->duplex;
4867 cmd->transceiver = XCVR_INTERNAL;
4868 cmd->phy_address = bp->phy_addr;
4874 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4876 struct bnx2 *bp = netdev_priv(dev);
4877 u8 autoneg = bp->autoneg;
4878 u8 req_duplex = bp->req_duplex;
4879 u16 req_line_speed = bp->req_line_speed;
4880 u32 advertising = bp->advertising;
4882 if (cmd->autoneg == AUTONEG_ENABLE) {
4883 autoneg |= AUTONEG_SPEED;
4885 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4887 /* allow advertising 1 speed */
4888 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4889 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4890 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4891 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4893 if (bp->phy_flags & PHY_SERDES_FLAG)
4896 advertising = cmd->advertising;
4899 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4900 advertising = cmd->advertising;
4902 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4906 if (bp->phy_flags & PHY_SERDES_FLAG) {
4907 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4910 advertising = ETHTOOL_ALL_COPPER_SPEED;
4913 advertising |= ADVERTISED_Autoneg;
4916 if (bp->phy_flags & PHY_SERDES_FLAG) {
4917 if ((cmd->speed != SPEED_1000) ||
4918 (cmd->duplex != DUPLEX_FULL)) {
4922 else if (cmd->speed == SPEED_1000) {
4925 autoneg &= ~AUTONEG_SPEED;
4926 req_line_speed = cmd->speed;
4927 req_duplex = cmd->duplex;
4931 bp->autoneg = autoneg;
4932 bp->advertising = advertising;
4933 bp->req_line_speed = req_line_speed;
4934 bp->req_duplex = req_duplex;
4936 spin_lock_bh(&bp->phy_lock);
4940 spin_unlock_bh(&bp->phy_lock);
4946 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4948 struct bnx2 *bp = netdev_priv(dev);
4950 strcpy(info->driver, DRV_MODULE_NAME);
4951 strcpy(info->version, DRV_MODULE_VERSION);
4952 strcpy(info->bus_info, pci_name(bp->pdev));
4953 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4954 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4955 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4956 info->fw_version[1] = info->fw_version[3] = '.';
4957 info->fw_version[5] = 0;
4960 #define BNX2_REGDUMP_LEN (32 * 1024)
4963 bnx2_get_regs_len(struct net_device *dev)
4965 return BNX2_REGDUMP_LEN;
4969 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4971 u32 *p = _p, i, offset;
4973 struct bnx2 *bp = netdev_priv(dev);
4974 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4975 0x0800, 0x0880, 0x0c00, 0x0c10,
4976 0x0c30, 0x0d08, 0x1000, 0x101c,
4977 0x1040, 0x1048, 0x1080, 0x10a4,
4978 0x1400, 0x1490, 0x1498, 0x14f0,
4979 0x1500, 0x155c, 0x1580, 0x15dc,
4980 0x1600, 0x1658, 0x1680, 0x16d8,
4981 0x1800, 0x1820, 0x1840, 0x1854,
4982 0x1880, 0x1894, 0x1900, 0x1984,
4983 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4984 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4985 0x2000, 0x2030, 0x23c0, 0x2400,
4986 0x2800, 0x2820, 0x2830, 0x2850,
4987 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4988 0x3c00, 0x3c94, 0x4000, 0x4010,
4989 0x4080, 0x4090, 0x43c0, 0x4458,
4990 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4991 0x4fc0, 0x5010, 0x53c0, 0x5444,
4992 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4993 0x5fc0, 0x6000, 0x6400, 0x6428,
4994 0x6800, 0x6848, 0x684c, 0x6860,
4995 0x6888, 0x6910, 0x8000 };
4999 memset(p, 0, BNX2_REGDUMP_LEN);
5001 if (!netif_running(bp->dev))
5005 offset = reg_boundaries[0];
5007 while (offset < BNX2_REGDUMP_LEN) {
5008 *p++ = REG_RD(bp, offset);
5010 if (offset == reg_boundaries[i + 1]) {
5011 offset = reg_boundaries[i + 2];
5012 p = (u32 *) (orig_p + offset);
5019 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5021 struct bnx2 *bp = netdev_priv(dev);
5023 if (bp->flags & NO_WOL_FLAG) {
5028 wol->supported = WAKE_MAGIC;
5030 wol->wolopts = WAKE_MAGIC;
5034 memset(&wol->sopass, 0, sizeof(wol->sopass));
5038 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5040 struct bnx2 *bp = netdev_priv(dev);
5042 if (wol->wolopts & ~WAKE_MAGIC)
5045 if (wol->wolopts & WAKE_MAGIC) {
5046 if (bp->flags & NO_WOL_FLAG)
5058 bnx2_nway_reset(struct net_device *dev)
5060 struct bnx2 *bp = netdev_priv(dev);
5063 if (!(bp->autoneg & AUTONEG_SPEED)) {
5067 spin_lock_bh(&bp->phy_lock);
5069 /* Force a link down visible on the other side */
5070 if (bp->phy_flags & PHY_SERDES_FLAG) {
5071 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
5072 spin_unlock_bh(&bp->phy_lock);
5074 current->state = TASK_UNINTERRUPTIBLE;
5075 schedule_timeout(HZ / 50);
5077 spin_lock_bh(&bp->phy_lock);
5078 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
5079 bp->current_interval = SERDES_AN_TIMEOUT;
5080 bp->serdes_an_pending = 1;
5081 mod_timer(&bp->timer, jiffies + bp->current_interval);
5085 bnx2_read_phy(bp, MII_BMCR, &bmcr);
5086 bmcr &= ~BMCR_LOOPBACK;
5087 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5089 spin_unlock_bh(&bp->phy_lock);
5094 #if (LINUX_VERSION_CODE >= 0x20418)
5096 bnx2_get_eeprom_len(struct net_device *dev)
5098 struct bnx2 *bp = netdev_priv(dev);
5100 if (bp->flash_info == NULL)
5103 return (int) bp->flash_size;
5107 #ifdef ETHTOOL_GEEPROM
5109 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5112 struct bnx2 *bp = netdev_priv(dev);
5115 /* parameters already validated in ethtool_get_eeprom */
5117 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5123 #ifdef ETHTOOL_SEEPROM
5125 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5128 struct bnx2 *bp = netdev_priv(dev);
5131 /* parameters already validated in ethtool_set_eeprom */
5133 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5140 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5142 struct bnx2 *bp = netdev_priv(dev);
5144 memset(coal, 0, sizeof(struct ethtool_coalesce));
5146 coal->rx_coalesce_usecs = bp->rx_ticks;
5147 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5148 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5149 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5151 coal->tx_coalesce_usecs = bp->tx_ticks;
5152 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5153 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5154 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5156 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5162 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5164 struct bnx2 *bp = netdev_priv(dev);
5166 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5167 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5169 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5170 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5172 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5173 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5175 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5176 if (bp->rx_quick_cons_trip_int > 0xff)
5177 bp->rx_quick_cons_trip_int = 0xff;
5179 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5180 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5182 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5183 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5185 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5186 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5188 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5189 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5192 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5193 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5194 bp->stats_ticks &= 0xffff00;
5196 if (netif_running(bp->dev)) {
5197 bnx2_netif_stop(bp);
5199 bnx2_netif_start(bp);
5206 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5208 struct bnx2 *bp = netdev_priv(dev);
5210 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5211 ering->rx_mini_max_pending = 0;
5212 ering->rx_jumbo_max_pending = 0;
5214 ering->rx_pending = bp->rx_ring_size;
5215 ering->rx_mini_pending = 0;
5216 ering->rx_jumbo_pending = 0;
5218 ering->tx_max_pending = MAX_TX_DESC_CNT;
5219 ering->tx_pending = bp->tx_ring_size;
5223 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5225 struct bnx2 *bp = netdev_priv(dev);
5227 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5228 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5229 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5233 if (netif_running(bp->dev)) {
5234 bnx2_netif_stop(bp);
5235 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5240 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5241 bp->tx_ring_size = ering->tx_pending;
5243 if (netif_running(bp->dev)) {
5246 rc = bnx2_alloc_mem(bp);
5250 bnx2_netif_start(bp);
5257 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5259 struct bnx2 *bp = netdev_priv(dev);
5261 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5262 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5263 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5267 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5269 struct bnx2 *bp = netdev_priv(dev);
5271 bp->req_flow_ctrl = 0;
5272 if (epause->rx_pause)
5273 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5274 if (epause->tx_pause)
5275 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5277 if (epause->autoneg) {
5278 bp->autoneg |= AUTONEG_FLOW_CTRL;
5281 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5284 spin_lock_bh(&bp->phy_lock);
5288 spin_unlock_bh(&bp->phy_lock);
5294 bnx2_get_rx_csum(struct net_device *dev)
5296 struct bnx2 *bp = netdev_priv(dev);
5302 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5304 struct bnx2 *bp = netdev_priv(dev);
5310 #define BNX2_NUM_STATS 46
5313 char string[ETH_GSTRING_LEN];
5314 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5316 { "rx_error_bytes" },
5318 { "tx_error_bytes" },
5319 { "rx_ucast_packets" },
5320 { "rx_mcast_packets" },
5321 { "rx_bcast_packets" },
5322 { "tx_ucast_packets" },
5323 { "tx_mcast_packets" },
5324 { "tx_bcast_packets" },
5325 { "tx_mac_errors" },
5326 { "tx_carrier_errors" },
5327 { "rx_crc_errors" },
5328 { "rx_align_errors" },
5329 { "tx_single_collisions" },
5330 { "tx_multi_collisions" },
5332 { "tx_excess_collisions" },
5333 { "tx_late_collisions" },
5334 { "tx_total_collisions" },
5337 { "rx_undersize_packets" },
5338 { "rx_oversize_packets" },
5339 { "rx_64_byte_packets" },
5340 { "rx_65_to_127_byte_packets" },
5341 { "rx_128_to_255_byte_packets" },
5342 { "rx_256_to_511_byte_packets" },
5343 { "rx_512_to_1023_byte_packets" },
5344 { "rx_1024_to_1522_byte_packets" },
5345 { "rx_1523_to_9022_byte_packets" },
5346 { "tx_64_byte_packets" },
5347 { "tx_65_to_127_byte_packets" },
5348 { "tx_128_to_255_byte_packets" },
5349 { "tx_256_to_511_byte_packets" },
5350 { "tx_512_to_1023_byte_packets" },
5351 { "tx_1024_to_1522_byte_packets" },
5352 { "tx_1523_to_9022_byte_packets" },
5353 { "rx_xon_frames" },
5354 { "rx_xoff_frames" },
5355 { "tx_xon_frames" },
5356 { "tx_xoff_frames" },
5357 { "rx_mac_ctrl_frames" },
5358 { "rx_filtered_packets" },
5360 { "rx_fw_discards" },
5363 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5365 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5366 STATS_OFFSET32(stat_IfHCInOctets_hi),
5367 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5368 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5369 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5370 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5371 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5372 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5373 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5374 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5375 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5376 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5377 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5378 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5379 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5380 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5381 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5382 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5383 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5384 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5385 STATS_OFFSET32(stat_EtherStatsCollisions),
5386 STATS_OFFSET32(stat_EtherStatsFragments),
5387 STATS_OFFSET32(stat_EtherStatsJabbers),
5388 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5389 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5390 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5391 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5392 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5393 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5394 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5395 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5396 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5397 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5398 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5399 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5400 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5401 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5402 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5403 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5404 STATS_OFFSET32(stat_XonPauseFramesReceived),
5405 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5406 STATS_OFFSET32(stat_OutXonSent),
5407 STATS_OFFSET32(stat_OutXoffSent),
5408 STATS_OFFSET32(stat_MacControlFramesReceived),
5409 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5410 STATS_OFFSET32(stat_IfInMBUFDiscards),
5411 STATS_OFFSET32(stat_FwRxDrop),
5414 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5415 * skipped because of errata.
5417 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5418 8,0,8,8,8,8,8,8,8,8,
5419 4,0,4,4,4,4,4,4,4,4,
5420 4,4,4,4,4,4,4,4,4,4,
5421 4,4,4,4,4,4,4,4,4,4,
5425 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5426 8,0,8,8,8,8,8,8,8,8,
5427 4,4,4,4,4,4,4,4,4,4,
5428 4,4,4,4,4,4,4,4,4,4,
5429 4,4,4,4,4,4,4,4,4,4,
5433 #define BNX2_NUM_TESTS 6
5436 char string[ETH_GSTRING_LEN];
5437 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5438 { "register_test (offline)" },
5439 { "memory_test (offline)" },
5440 { "loopback_test (offline)" },
5441 { "nvram_test (online)" },
5442 { "interrupt_test (online)" },
5443 { "link_test (online)" },
5447 bnx2_self_test_count(struct net_device *dev)
5449 return BNX2_NUM_TESTS;
5453 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5455 struct bnx2 *bp = netdev_priv(dev);
5457 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5458 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5459 bnx2_netif_stop(bp);
5460 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5463 if (bnx2_test_registers(bp) != 0) {
5465 etest->flags |= ETH_TEST_FL_FAILED;
5467 if (bnx2_test_memory(bp) != 0) {
5469 etest->flags |= ETH_TEST_FL_FAILED;
5471 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5472 etest->flags |= ETH_TEST_FL_FAILED;
5474 if (!netif_running(bp->dev)) {
5475 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5479 bnx2_netif_start(bp);
5482 /* wait for link up */
5483 current->state = TASK_INTERRUPTIBLE;
5484 schedule_timeout(3 * HZ);
5485 if ((!bp->link_up) && !(bp->phy_flags & PHY_SERDES_FLAG)) {
5486 current->state = TASK_INTERRUPTIBLE;
5487 schedule_timeout(4 * HZ);
5491 if (bnx2_test_nvram(bp) != 0) {
5493 etest->flags |= ETH_TEST_FL_FAILED;
5495 if (bnx2_test_intr(bp) != 0) {
5497 etest->flags |= ETH_TEST_FL_FAILED;
5500 if (bnx2_test_link(bp) != 0) {
5502 etest->flags |= ETH_TEST_FL_FAILED;
5508 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5510 switch (stringset) {
5512 memcpy(buf, bnx2_stats_str_arr,
5513 sizeof(bnx2_stats_str_arr));
5516 memcpy(buf, bnx2_tests_str_arr,
5517 sizeof(bnx2_tests_str_arr));
5523 bnx2_get_stats_count(struct net_device *dev)
5525 return BNX2_NUM_STATS;
5529 bnx2_get_ethtool_stats(struct net_device *dev,
5530 struct ethtool_stats *stats, u64 *buf)
5532 struct bnx2 *bp = netdev_priv(dev);
5534 u32 *hw_stats = (u32 *) bp->stats_blk;
5535 u8 *stats_len_arr = NULL;
5537 if (hw_stats == NULL) {
5538 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5542 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5543 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5544 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5545 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5546 stats_len_arr = bnx2_5706_stats_len_arr;
5548 stats_len_arr = bnx2_5708_stats_len_arr;
5550 for (i = 0; i < BNX2_NUM_STATS; i++) {
5551 if (stats_len_arr[i] == 0) {
5552 /* skip this counter */
5556 if (stats_len_arr[i] == 4) {
5557 /* 4-byte counter */
5559 *(hw_stats + bnx2_stats_offset_arr[i]);
5562 /* 8-byte counter */
5563 buf[i] = (((u64) *(hw_stats +
5564 bnx2_stats_offset_arr[i])) << 32) +
5565 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5570 bnx2_phys_id(struct net_device *dev, u32 data)
5572 struct bnx2 *bp = netdev_priv(dev);
5579 save = REG_RD(bp, BNX2_MISC_CFG);
5580 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5582 for (i = 0; i < (data * 2); i++) {
5584 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5587 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5588 BNX2_EMAC_LED_1000MB_OVERRIDE |
5589 BNX2_EMAC_LED_100MB_OVERRIDE |
5590 BNX2_EMAC_LED_10MB_OVERRIDE |
5591 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5592 BNX2_EMAC_LED_TRAFFIC);
5594 current->state = TASK_INTERRUPTIBLE;
5595 schedule_timeout(HZ / 2);
5596 if (signal_pending(current))
5599 REG_WR(bp, BNX2_EMAC_LED, 0);
5600 REG_WR(bp, BNX2_MISC_CFG, save);
5604 static struct ethtool_ops bnx2_ethtool_ops = {
5605 .get_settings = bnx2_get_settings,
5606 .set_settings = bnx2_set_settings,
5607 .get_drvinfo = bnx2_get_drvinfo,
5608 .get_regs_len = bnx2_get_regs_len,
5609 .get_regs = bnx2_get_regs,
5610 .get_wol = bnx2_get_wol,
5611 .set_wol = bnx2_set_wol,
5612 .nway_reset = bnx2_nway_reset,
5613 .get_link = ethtool_op_get_link,
5614 #if (LINUX_VERSION_CODE >= 0x20418)
5615 .get_eeprom_len = bnx2_get_eeprom_len,
5617 #ifdef ETHTOOL_GEEPROM
5618 .get_eeprom = bnx2_get_eeprom,
5620 #ifdef ETHTOOL_SEEPROM
5621 .set_eeprom = bnx2_set_eeprom,
5623 .get_coalesce = bnx2_get_coalesce,
5624 .set_coalesce = bnx2_set_coalesce,
5625 .get_ringparam = bnx2_get_ringparam,
5626 .set_ringparam = bnx2_set_ringparam,
5627 .get_pauseparam = bnx2_get_pauseparam,
5628 .set_pauseparam = bnx2_set_pauseparam,
5629 .get_rx_csum = bnx2_get_rx_csum,
5630 .set_rx_csum = bnx2_set_rx_csum,
5631 .get_tx_csum = ethtool_op_get_tx_csum,
5632 #if (LINUX_VERSION_CODE >= 0x20418)
5633 .set_tx_csum = ethtool_op_set_tx_csum,
5635 .get_sg = ethtool_op_get_sg,
5636 .set_sg = ethtool_op_set_sg,
5638 .get_tso = ethtool_op_get_tso,
5639 .set_tso = ethtool_op_set_tso,
5641 .self_test_count = bnx2_self_test_count,
5642 .self_test = bnx2_self_test,
5643 .get_strings = bnx2_get_strings,
5644 .phys_id = bnx2_phys_id,
5645 .get_stats_count = bnx2_get_stats_count,
5646 .get_ethtool_stats = bnx2_get_ethtool_stats,
5647 #ifdef ETHTOOL_GPERMADDR
5648 .get_perm_addr = ethtool_op_get_perm_addr,
5652 /* Called with rtnl_lock */
5654 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5656 #if (LINUX_VERSION_CODE >= 0x020607)
5657 struct mii_ioctl_data *data = if_mii(ifr);
5659 struct mii_ioctl_data *data = (struct mii_ioctl_data *) &ifr->ifr_ifru;
5661 struct bnx2 *bp = netdev_priv(dev);
5666 data->phy_id = bp->phy_addr;
5672 spin_lock_bh(&bp->phy_lock);
5673 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5674 spin_unlock_bh(&bp->phy_lock);
5676 data->val_out = mii_regval;
5682 if (!capable(CAP_NET_ADMIN))
5685 spin_lock_bh(&bp->phy_lock);
5686 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5687 spin_unlock_bh(&bp->phy_lock);
5698 /* Called with rtnl_lock */
5700 bnx2_change_mac_addr(struct net_device *dev, void *p)
5702 struct sockaddr *addr = p;
5703 struct bnx2 *bp = netdev_priv(dev);
5705 if (!is_valid_ether_addr(addr->sa_data))
5708 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5709 if (netif_running(dev))
5710 bnx2_set_mac_addr(bp);
5715 /* Called with rtnl_lock */
5717 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5719 struct bnx2 *bp = netdev_priv(dev);
5721 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5722 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5726 if (netif_running(dev)) {
5727 bnx2_netif_stop(bp);
5731 bnx2_netif_start(bp);
5736 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5738 poll_bnx2(struct net_device *dev)
5740 struct bnx2 *bp = netdev_priv(dev);
5742 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020600)
5744 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5745 if (dev->poll_list.prev) {
5748 bnx2_poll(dev, &budget);
5754 disable_irq(bp->pdev->irq);
5755 bnx2_interrupt(bp->pdev->irq, dev, NULL);
5756 enable_irq(bp->pdev->irq);
5761 static int __devinit
5762 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5765 unsigned long mem_len;
5768 u64 dma_mask, persist_dma_mask;
5770 SET_MODULE_OWNER(dev);
5771 #if (LINUX_VERSION_CODE >= 0x20419)
5772 SET_NETDEV_DEV(dev, &pdev->dev);
5774 bp = netdev_priv(dev);
5779 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5780 rc = pci_enable_device(pdev);
5782 printk(KERN_ERR PFX "Cannot enable PCI device, aborting.");
5786 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5787 printk(KERN_ERR PFX "Cannot find PCI device base address, "
5790 goto err_out_disable;
5793 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5795 printk(KERN_ERR PFX "Cannot obtain PCI resources, aborting.\n");
5796 goto err_out_disable;
5799 pci_set_master(pdev);
5801 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5802 if (bp->pm_cap == 0) {
5803 printk(KERN_ERR PFX "Cannot find power management capability, "
5806 goto err_out_release;
5809 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5810 if (bp->pcix_cap == 0) {
5811 printk(KERN_ERR PFX "Cannot find PCIX capability, aborting.\n");
5813 goto err_out_release;
5819 spin_lock_init(&bp->phy_lock);
5820 spin_lock_init(&bp->tx_lock);
5821 #if (LINUX_VERSION_CODE >= 0x20600)
5822 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5824 INIT_TQUEUE(&bp->reset_task, bnx2_reset_task, bp);
5827 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5828 mem_len = MB_GET_CID_ADDR(17);
5829 dev->mem_end = dev->mem_start + mem_len;
5830 dev->irq = pdev->irq;
5832 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5835 printk(KERN_ERR PFX "Cannot map register space, aborting.\n");
5837 goto err_out_release;
5840 /* Configure byte swap and enable write to the reg_window registers.
5841 * Rely on CPU to do target byte swapping on big endian systems
5842 * The chip's target access swapping will not swap all accesses
5844 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5845 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5846 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5848 bnx2_set_power_state(bp, PCI_D0);
5850 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5852 /* 5708 cannot support DMA addresses > 40-bit.
5853 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
5854 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
5855 * do DMA address check in bnx2_start_xmit().
5857 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5858 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
5859 #ifdef CONFIG_HIGHMEM
5860 dma_mask = DMA_64BIT_MASK;
5863 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
5865 /* Configure DMA attributes. */
5866 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
5867 dev->features |= NETIF_F_HIGHDMA;
5868 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
5870 printk(KERN_ERR PFX "pci_set_consistent_dma_mask "
5871 "failed, aborting.\n");
5874 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
5875 printk(KERN_ERR PFX "System does not support DMA, aborting.\n");
5879 /* Get bus information. */
5880 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5881 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5884 bp->flags |= PCIX_FLAG;
5886 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5888 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5890 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5891 bp->bus_speed_mhz = 133;
5894 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5895 bp->bus_speed_mhz = 100;
5898 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5899 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5900 bp->bus_speed_mhz = 66;
5903 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5904 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5905 bp->bus_speed_mhz = 50;
5908 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5909 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5910 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5911 bp->bus_speed_mhz = 33;
5916 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5917 bp->bus_speed_mhz = 66;
5919 bp->bus_speed_mhz = 33;
5922 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5923 bp->flags |= PCI_32BIT_FLAG;
5925 /* 5706A0 may falsely detect SERR and PERR. */
5926 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5927 reg = REG_RD(bp, PCI_COMMAND);
5928 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5929 REG_WR(bp, PCI_COMMAND, reg);
5931 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5932 !(bp->flags & PCIX_FLAG)) {
5934 printk(KERN_ERR PFX "5706 A1 can only be used in a PCIX bus, "
5939 bnx2_init_nvram(bp);
5941 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5943 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5944 BNX2_SHM_HDR_SIGNATURE_SIG)
5945 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5947 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5949 /* Get the permanent MAC address. First we need to make sure the
5950 * firmware is actually running.
5952 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5954 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5955 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5956 printk(KERN_ERR PFX "Firmware not running, aborting.\n");
5961 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5963 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5964 bp->mac_addr[0] = (u8) (reg >> 8);
5965 bp->mac_addr[1] = (u8) reg;
5967 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5968 bp->mac_addr[2] = (u8) (reg >> 24);
5969 bp->mac_addr[3] = (u8) (reg >> 16);
5970 bp->mac_addr[4] = (u8) (reg >> 8);
5971 bp->mac_addr[5] = (u8) reg;
5973 bp->tx_ring_size = MAX_TX_DESC_CNT;
5974 bnx2_set_rx_ring_size(bp, 100);
5978 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5980 bp->tx_quick_cons_trip_int = 20;
5981 bp->tx_quick_cons_trip = 20;
5982 bp->tx_ticks_int = 80;
5985 bp->rx_quick_cons_trip_int = 6;
5986 bp->rx_quick_cons_trip = 6;
5987 bp->rx_ticks_int = 18;
5990 bp->stats_ticks = 1000000 & 0xffff00;
5992 bp->timer_interval = HZ;
5993 bp->current_interval = HZ;
5997 /* Disable WOL support if we are running on a SERDES chip. */
5998 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5999 bp->phy_flags |= PHY_SERDES_FLAG;
6000 bp->flags |= NO_WOL_FLAG;
6001 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6003 reg = REG_RD_IND(bp, bp->shmem_base +
6004 BNX2_SHARED_HW_CFG_CONFIG);
6005 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6006 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6010 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6011 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6012 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6013 bp->flags |= NO_WOL_FLAG;
6015 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6016 bp->tx_quick_cons_trip_int =
6017 bp->tx_quick_cons_trip;
6018 bp->tx_ticks_int = bp->tx_ticks;
6019 bp->rx_quick_cons_trip_int =
6020 bp->rx_quick_cons_trip;
6021 bp->rx_ticks_int = bp->rx_ticks;
6022 bp->comp_prod_trip_int = bp->comp_prod_trip;
6023 bp->com_ticks_int = bp->com_ticks;
6024 bp->cmd_ticks_int = bp->cmd_ticks;
6027 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6028 bp->req_line_speed = 0;
6029 if (bp->phy_flags & PHY_SERDES_FLAG) {
6030 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6032 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6033 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6034 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6036 bp->req_line_speed = bp->line_speed = SPEED_1000;
6037 bp->req_duplex = DUPLEX_FULL;
6041 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6044 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6046 init_timer(&bp->timer);
6047 bp->timer.expires = RUN_AT(bp->timer_interval);
6048 bp->timer.data = (unsigned long) bp;
6049 bp->timer.function = bnx2_timer;
6055 iounmap(bp->regview);
6060 pci_release_regions(pdev);
6063 pci_disable_device(pdev);
6064 pci_set_drvdata(pdev, NULL);
6070 static int __devinit
6071 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6073 static int version_printed = 0;
6074 struct net_device *dev = NULL;
6078 if (version_printed++ == 0)
6079 printk(KERN_INFO "%s", version);
6081 /* dev zeroed in init_etherdev */
6082 #if (LINUX_VERSION_CODE >= 0x20418)
6083 dev = alloc_etherdev(sizeof(*bp));
6085 dev = init_etherdev(NULL, sizeof(*bp));
6091 rc = bnx2_init_board(pdev, dev);
6093 #if (LINUX_VERSION_CODE >= 0x20418)
6096 unregister_netdev(dev);
6102 dev->open = bnx2_open;
6103 dev->hard_start_xmit = bnx2_start_xmit;
6104 dev->stop = bnx2_close;
6105 dev->get_stats = bnx2_get_stats;
6106 dev->set_multicast_list = bnx2_set_rx_mode;
6107 dev->do_ioctl = bnx2_ioctl;
6108 dev->set_mac_address = bnx2_change_mac_addr;
6109 dev->change_mtu = bnx2_change_mtu;
6110 dev->tx_timeout = bnx2_tx_timeout;
6111 dev->watchdog_timeo = TX_TIMEOUT;
6113 dev->vlan_rx_register = bnx2_vlan_rx_register;
6114 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6116 dev->poll = bnx2_poll;
6117 dev->ethtool_ops = &bnx2_ethtool_ops;
6120 bp = netdev_priv(dev);
6122 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6123 dev->poll_controller = poll_bnx2;
6126 #if (LINUX_VERSION_CODE >= 0x20418)
6127 if ((rc = register_netdev(dev))) {
6128 printk(KERN_ERR PFX "Cannot register net device\n");
6130 iounmap(bp->regview);
6131 pci_release_regions(pdev);
6132 pci_disable_device(pdev);
6133 pci_set_drvdata(pdev, NULL);
6139 pci_set_drvdata(pdev, dev);
6141 memcpy(dev->dev_addr, bp->mac_addr, 6);
6142 #ifdef ETHTOOL_GPERMADDR
6143 memcpy(dev->perm_addr, bp->mac_addr, 6);
6145 bp->name = board_info[ent->driver_data].name,
6146 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6150 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6151 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6152 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6153 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6158 printk("node addr ");
6159 for (i = 0; i < 6; i++)
6160 printk("%2.2x", dev->dev_addr[i]);
6163 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6165 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6168 dev->features |= NETIF_F_TSO;
6171 netif_carrier_off(bp->dev);
6176 static void __devexit
6177 bnx2_remove_one(struct pci_dev *pdev)
6179 struct net_device *dev = pci_get_drvdata(pdev);
6180 struct bnx2 *bp = netdev_priv(dev);
6182 #if (LINUX_VERSION_CODE >= 0x20600)
6183 flush_scheduled_work();
6186 unregister_netdev(dev);
6189 iounmap(bp->regview);
6191 #if (LINUX_VERSION_CODE >= 0x20418)
6196 pci_release_regions(pdev);
6197 pci_disable_device(pdev);
6198 pci_set_drvdata(pdev, NULL);
6202 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6204 struct net_device *dev = pci_get_drvdata(pdev);
6205 struct bnx2 *bp = netdev_priv(dev);
6208 if (!netif_running(dev))
6211 bnx2_netif_stop(bp);
6212 netif_device_detach(dev);
6213 del_timer_sync(&bp->timer);
6214 if (bp->flags & NO_WOL_FLAG)
6215 reset_code = BNX2_DRV_MSG_CODE_UNLOAD;
6217 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6219 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6220 bnx2_reset_chip(bp, reset_code);
6222 #if (LINUX_VERSION_CODE < 0x2060b)
6223 bnx2_set_power_state(bp, state);
6225 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6231 bnx2_resume(struct pci_dev *pdev)
6233 struct net_device *dev = pci_get_drvdata(pdev);
6234 struct bnx2 *bp = netdev_priv(dev);
6236 if (!netif_running(dev))
6239 bnx2_set_power_state(bp, PCI_D0);
6240 netif_device_attach(dev);
6242 bnx2_netif_start(bp);
6246 static struct pci_driver bnx2_pci_driver = {
6247 .name = DRV_MODULE_NAME,
6248 .id_table = bnx2_pci_tbl,
6249 .probe = bnx2_init_one,
6250 .remove = __devexit_p(bnx2_remove_one),
6251 .suspend = bnx2_suspend,
6252 .resume = bnx2_resume,
6255 static int __init bnx2_init(void)
6257 return pci_module_init(&bnx2_pci_driver);
6260 static void __exit bnx2_cleanup(void)
6262 pci_unregister_driver(&bnx2_pci_driver);
6265 module_init(bnx2_init);
6266 module_exit(bnx2_cleanup);