1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2005 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
14 char bcm5700_driver[] = "bcm5700";
15 char bcm5700_version[] = "8.3.14a";
16 char bcm5700_date[] = "(11/2/05)";
21 /* A few user-configurable values. */
24 /* Used to pass the full-duplex flag, etc. */
25 static int line_speed[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
26 static int auto_speed[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
27 static int full_duplex[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
28 static int rx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
29 static int tx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
30 static int auto_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
31 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
32 static int mtu[MAX_UNITS] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500}; /* Jumbo MTU for interfaces. */
34 static int tx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
35 static int rx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
36 static int scatter_gather[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
38 #define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
39 static unsigned int tx_pkt_desc_cnt[MAX_UNITS] =
40 {TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
41 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
42 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
45 #define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
46 static unsigned int rx_std_desc_cnt[MAX_UNITS] =
47 {RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
48 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
49 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
52 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
53 #define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
54 static unsigned int rx_jumbo_desc_cnt[MAX_UNITS] =
55 {JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
56 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
57 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
62 #ifdef BCM_NAPI_RXPOLL
63 static unsigned int adaptive_coalesce[MAX_UNITS] =
64 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
66 static unsigned int adaptive_coalesce[MAX_UNITS] =
67 {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
70 #define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
71 static unsigned int rx_coalesce_ticks[MAX_UNITS] =
72 {RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
73 RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
74 RX_COAL_TK,RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
77 #define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
78 static unsigned int rx_max_coalesce_frames[MAX_UNITS] =
79 {RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
80 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
81 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
84 #define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
85 static unsigned int tx_coalesce_ticks[MAX_UNITS] =
86 {TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
87 TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
88 TX_COAL_TK,TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
91 #define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
92 static unsigned int tx_max_coalesce_frames[MAX_UNITS] =
93 {TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
94 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
95 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
98 #define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
99 static unsigned int stats_coalesce_ticks[MAX_UNITS] =
100 {ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
101 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
102 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
107 static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
110 static int enable_tso[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
112 #ifdef BCM_NIC_SEND_BD
113 static int nic_tx_bd[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
116 static int vlan_tag_mode[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
118 static int delay_link[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
119 static int disable_d3hot[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
121 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
122 static int disable_msi[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
123 static int bcm_msi_chipset_bug = 0;
126 #define BCM_TIMER_GRANULARITY (1000000 / HZ)
128 /* Operational parameters that usually are not changed. */
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT (2*HZ)
132 #if (LINUX_VERSION_CODE < 0x02030d)
133 #define pci_resource_start(dev, bar) (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
134 #elif (LINUX_VERSION_CODE < 0x02032b)
135 #define pci_resource_start(dev, bar) (dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
138 #if (LINUX_VERSION_CODE < 0x02032b)
139 #define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
140 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
141 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
143 static inline void netif_start_queue(struct net_device *dev)
150 #define netif_queue_stopped(dev) dev->tbusy
151 #define netif_running(dev) dev->start
153 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
155 queue_task(tasklet, &tq_immediate);
156 mark_bh(IMMEDIATE_BH);
159 static inline void tasklet_init(struct tasklet_struct *tasklet,
160 void (*func)(unsigned long),
163 tasklet->next = NULL;
165 tasklet->routine = (void (*)(void *))func;
166 tasklet->data = (void *)data;
169 #define tasklet_kill(tasklet)
173 #if (LINUX_VERSION_CODE < 0x020300)
174 struct pci_device_id {
175 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
176 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
177 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
178 unsigned long driver_data; /* Data private to the driver */
183 #define pci_set_drvdata(pdev, dev)
184 #define pci_get_drvdata(pdev) 0
186 #define pci_enable_device(pdev) 0
188 #define __devinit __init
189 #define __devinitdata __initdata
192 #define SET_MODULE_OWNER(dev)
193 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
197 #if (LINUX_VERSION_CODE < 0x020411)
199 #define __devexit_p(x) x
203 #ifndef MODULE_LICENSE
204 #define MODULE_LICENSE(license)
208 typedef void irqreturn_t;
209 #define IRQ_RETVAL(x)
212 #if (LINUX_VERSION_CODE < 0x02032a)
213 static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
214 dma_addr_t *dma_handle)
218 /* Maximum in slab.c */
222 virt_ptr = kmalloc(size, GFP_KERNEL);
223 *dma_handle = virt_to_bus(virt_ptr);
226 #define pci_free_consistent(dev, size, ptr, dma_ptr) kfree(ptr)
228 #endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
231 #if (LINUX_VERSION_CODE < 0x02040d)
233 #if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
235 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
236 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
239 /* pci_set_dma_mask is using dma_addr_t */
241 #define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
242 #define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
246 #else /* (LINUX_VERSION_CODE < 0x02040d) */
248 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
249 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
252 #if (LINUX_VERSION_CODE < 0x020329)
253 #define pci_set_dma_mask(pdev, mask) (0)
255 #if (LINUX_VERSION_CODE < 0x020403)
257 pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
259 if(! pci_dma_supported(dev, mask))
262 dev->dma_mask = mask;
269 #if (LINUX_VERSION_CODE < 0x020547)
270 #define pci_set_consistent_dma_mask(pdev, mask) (0)
273 #if (LINUX_VERSION_CODE < 0x020402)
274 #define pci_request_regions(pdev, name) (0)
275 #define pci_release_regions(pdev)
278 #if ! defined(spin_is_locked)
279 #define spin_is_locked(lock) (test_bit(0,(lock)))
282 #define BCM5700_LOCK(pUmDevice, flags) \
283 if ((pUmDevice)->do_global_lock) { \
284 spin_lock_irqsave(&(pUmDevice)->global_lock, flags); \
287 #define BCM5700_UNLOCK(pUmDevice, flags) \
288 if ((pUmDevice)->do_global_lock) { \
289 spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
293 bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice)
295 if (pUmDevice->do_global_lock) {
296 spin_lock(&pUmDevice->global_lock);
301 bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice)
303 if (pUmDevice->do_global_lock) {
304 spin_unlock(&pUmDevice->global_lock);
309 bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice)
311 atomic_inc(&pUmDevice->intr_sem);
312 LM_DisableInterrupt(&pUmDevice->lm_dev);
313 #if (LINUX_VERSION_CODE >= 0x2051c)
314 synchronize_irq(pUmDevice->dev->irq);
318 LM_DisableInterrupt(&pUmDevice->lm_dev);
322 bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice)
324 if (atomic_dec_and_test(&pUmDevice->intr_sem)) {
325 LM_EnableInterrupt(&pUmDevice->lm_dev);
330 * Broadcom NIC Extension support
341 #endif /* NICE_SUPPORT */
343 int MM_Packet_Desc_Size = sizeof(UM_PACKET);
346 MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
347 MODULE_DESCRIPTION("BCM5700 Driver");
348 MODULE_LICENSE("GPL");
350 #if (LINUX_VERSION_CODE < 0x020605)
352 MODULE_PARM(debug, "i");
353 MODULE_PARM(line_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
354 MODULE_PARM(auto_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
355 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
356 MODULE_PARM(rx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
357 MODULE_PARM(tx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
358 MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
359 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
360 MODULE_PARM(mtu, "1-" __MODULE_STRING(MAX_UNITS) "i");
362 MODULE_PARM(tx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
363 MODULE_PARM(rx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
364 MODULE_PARM(scatter_gather, "1-" __MODULE_STRING(MAX_UNITS) "i");
365 MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
366 MODULE_PARM(rx_std_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
367 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
368 MODULE_PARM(rx_jumbo_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
371 MODULE_PARM(adaptive_coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
372 MODULE_PARM(rx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
373 MODULE_PARM(rx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
374 MODULE_PARM(tx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
375 MODULE_PARM(tx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
376 MODULE_PARM(stats_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
379 MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i");
382 MODULE_PARM(enable_tso, "1-" __MODULE_STRING(MAX_UNITS) "i");
384 #ifdef BCM_NIC_SEND_BD
385 MODULE_PARM(nic_tx_bd, "1-" __MODULE_STRING(MAX_UNITS) "i");
388 MODULE_PARM(vlan_tag_mode, "1-" __MODULE_STRING(MAX_UNITS) "i");
390 MODULE_PARM(delay_link, "1-" __MODULE_STRING(MAX_UNITS) "i");
391 MODULE_PARM(disable_d3hot, "1-" __MODULE_STRING(MAX_UNITS) "i");
393 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
394 MODULE_PARM(disable_msi, "1-" __MODULE_STRING(MAX_UNITS) "i");
399 #if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
407 #if (LINUX_VERSION_CODE >= 0x2060a)
413 module_param_array(line_speed, int, numvar, 0);
414 module_param_array(auto_speed, int, numvar, 0);
415 module_param_array(full_duplex, int, numvar, 0);
416 module_param_array(rx_flow_control, int, numvar, 0);
417 module_param_array(tx_flow_control, int, numvar, 0);
418 module_param_array(auto_flow_control, int, numvar, 0);
419 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
420 module_param_array(mtu, int, numvar, 0);
422 module_param_array(tx_checksum, int, numvar, 0);
423 module_param_array(rx_checksum, int, numvar, 0);
424 module_param_array(scatter_gather, int, numvar, 0);
425 module_param_array(tx_pkt_desc_cnt, int, numvar, 0);
426 module_param_array(rx_std_desc_cnt, int, numvar, 0);
427 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
428 module_param_array(rx_jumbo_desc_cnt, int, numvar, 0);
431 module_param_array(adaptive_coalesce, int, numvar, 0);
432 module_param_array(rx_coalesce_ticks, int, numvar, 0);
433 module_param_array(rx_max_coalesce_frames, int, numvar, 0);
434 module_param_array(tx_coalesce_ticks, int, numvar, 0);
435 module_param_array(tx_max_coalesce_frames, int, numvar, 0);
436 module_param_array(stats_coalesce_ticks, int, numvar, 0);
439 module_param_array(enable_wol, int, numvar, 0);
442 module_param_array(enable_tso, int, numvar, 0);
444 #ifdef BCM_NIC_SEND_BD
445 module_param_array(nic_tx_bd, int, numvar, 0);
448 module_param_array(vlan_tag_mode, int, numvar, 0);
450 module_param_array(delay_link, int, numvar, 0);
451 module_param_array(disable_d3hot, int, numvar, 0);
453 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
454 module_param_array(disable_msi, int, numvar, 0);
463 #define RUN_AT(x) (jiffies + (x))
465 char kernel_version[] = UTS_RELEASE;
467 #define PCI_SUPPORT_VER2
469 #if ! defined(CAP_NET_ADMIN)
470 #define capable(CAP_XXX) (suser())
473 #define tigon3_debug debug
475 static int tigon3_debug = TIGON3_DEBUG;
477 static int tigon3_debug = 0;
481 int bcm5700_open(struct net_device *dev);
482 STATIC void bcm5700_timer(unsigned long data);
483 STATIC void bcm5700_stats_timer(unsigned long data);
484 STATIC void bcm5700_reset(struct net_device *dev);
485 STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev);
486 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
488 STATIC void bcm5700_tasklet(unsigned long data);
490 STATIC int bcm5700_close(struct net_device *dev);
491 STATIC struct net_device_stats *bcm5700_get_stats(struct net_device *dev);
492 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
493 STATIC void bcm5700_do_rx_mode(struct net_device *dev);
494 STATIC void bcm5700_set_rx_mode(struct net_device *dev);
495 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p);
496 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
497 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu);
499 #ifdef BCM_NAPI_RXPOLL
500 STATIC int bcm5700_poll(struct net_device *dev, int *budget);
502 STATIC int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max);
503 STATIC int bcm5700_freemem(struct net_device *dev);
505 STATIC int bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index);
508 #ifndef BCM_NAPI_RXPOLL
509 STATIC int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice);
512 STATIC void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice);
513 STATIC int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice);
515 STATIC void bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
516 STATIC void bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid);
518 void bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice);
519 void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice);
520 void bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
521 char *param_name, int min, int max, int deflt);
523 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
524 STATIC void poll_bcm5700(struct net_device *dev);
527 /* A list of all installed bcm5700 devices. */
528 static struct net_device *root_tigon3_dev = NULL;
530 #if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
533 #if (LINUX_VERSION_CODE < 0x20500)
534 extern int register_ioctl32_conversion(unsigned int cmd,
535 int (*handler)(unsigned int, unsigned int, unsigned long,
537 int unregister_ioctl32_conversion(unsigned int cmd);
539 #include <linux/ioctl32.h>
542 #define BCM_IOCTL32 1
544 atomic_t bcm5700_load_count = ATOMIC_INIT(0);
547 bcm5700_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg,
551 struct net_device *tmp_dev = root_tigon3_dev;
553 struct nice_req* nrq;
554 struct ifreq_nice32 {
562 if (!capable(CAP_NET_ADMIN))
565 if (mm_copy_from_user(&nrq32, (char *) arg, 32))
568 memcpy(rq.ifr_name, nrq32.ifnr_name, 16);
570 nrq = (struct nice_req*) &rq.ifr_ifru;
571 nrq->cmd = nrq32.cmd;
572 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK) {
573 nrq->nrq_stats_useraddr = (void *) ((__u64) nrq32.nrq1);
574 nrq->nrq_stats_size = nrq32.nrq2;
577 memcpy(&nrq->nrq_speed, &nrq32.nrq1, 12);
580 if (strcmp(rq.ifr_name, tmp_dev->name) == 0) {
581 ret = bcm5700_ioctl(tmp_dev, &rq, cmd);
583 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK)
586 memcpy(&nrq32.nrq1, &nrq->nrq_speed, 12);
587 if (mm_copy_to_user((char *) arg, &nrq32, 32))
592 tmp_dev = ((UM_DEVICE_BLOCK *)(tmp_dev->priv))->next_module;
596 #endif /* NICE_SUPPORT */
678 /* indexed by board_t, above */
681 } board_info[] __devinitdata = {
682 { "Broadcom BCM5700 1000Base-T" },
683 { "Broadcom BCM5700 1000Base-SX" },
684 { "Broadcom BCM5700 1000Base-SX" },
685 { "Broadcom BCM5700 1000Base-T" },
686 { "Broadcom BCM5700" },
687 { "Broadcom BCM5701 1000Base-T" },
688 { "Broadcom BCM5701 1000Base-T" },
689 { "Broadcom BCM5701 1000Base-T" },
690 { "Broadcom BCM5701 1000Base-SX" },
691 { "Broadcom BCM5701 1000Base-T" },
692 { "Broadcom BCM5701 1000Base-T" },
693 { "Broadcom BCM5701" },
694 { "Broadcom BCM5702 1000Base-T" },
695 { "Broadcom BCM5703 1000Base-T" },
696 { "Broadcom BCM5703 1000Base-SX" },
697 { "Broadcom B5703 1000Base-SX" },
698 { "3Com 3C996 10/100/1000 Server NIC" },
699 { "3Com 3C996 10/100/1000 Server NIC" },
700 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
701 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
702 { "3Com 3C996B Gigabit Server NIC" },
703 { "3Com 3C997 Gigabit Server NIC" },
704 { "3Com 3C997 Gigabit Fiber-SX Server NIC" },
705 { "3Com 3C1000 Gigabit NIC" },
706 { "3Com 3C1000B-T 10/100/1000 PCI" },
707 { "3Com 3C940 Gigabit LOM (21X21)" },
708 { "3Com 3C942 Gigabit LOM (31X31)" },
709 { "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
710 { "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
711 { "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
712 { "HP NC6770 Gigabit Server Adapter" },
713 { "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
714 { "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
715 { "HP NC7760 Gigabit Server Adapter" },
716 { "HP NC7761 Gigabit Server Adapter" },
717 { "HP NC7770 Gigabit Server Adapter" },
718 { "HP NC7771 Gigabit Server Adapter" },
719 { "HP NC7780 Gigabit Server Adapter" },
720 { "HP NC7781 Gigabit Server Adapter" },
721 { "HP NC7772 Gigabit Server Adapter" },
722 { "HP NC7782 Gigabit Server Adapter" },
723 { "HP NC7783 Gigabit Server Adapter" },
724 { "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
725 { "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
726 { "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
727 { "HP NC324i Integrated Dual Port PCI Express Gigabit Server Adapter" },
728 { "HP NC326i Integrated Dual Port PCI Express Gigabit Server Adapter" },
729 { "Broadcom BCM5704 CIOB-E 1000Base-T" },
730 { "Broadcom BCM5704 1000Base-T" },
731 { "Broadcom BCM5704 1000Base-SX" },
732 { "Broadcom BCM5705 1000Base-T" },
733 { "Broadcom BCM5705M 1000Base-T" },
734 { "Broadcom 570x 10/100 Integrated Controller" },
735 { "Broadcom BCM5901 100Base-TX" },
736 { "Broadcom NetXtreme Gigabit Ethernet for hp" },
737 { "Broadcom BCM5788 NetLink 1000Base-T" },
738 { "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
739 { "Broadcom BCM5750 1000Base-T PCI" },
740 { "Broadcom BCM5750M 1000Base-T PCI" },
741 { "Broadcom BCM5720 1000Base-T PCI" },
742 { "Broadcom BCM5751 1000Base-T PCI Express" },
743 { "Broadcom BCM5751M 1000Base-T PCI Express" },
744 { "Broadcom BCM5751F 100Base-TX PCI Express" },
745 { "Broadcom BCM5721 1000Base-T PCI Express" },
746 { "Broadcom BCM5753 1000Base-T PCI Express" },
747 { "Broadcom BCM5753M 1000Base-T PCI Express" },
748 { "Broadcom BCM5753F 100Base-TX PCI Express" },
749 { "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
750 { "Broadcom BCM5752 1000Base-T PCI Express" },
751 { "Broadcom BCM5752M 1000Base-T PCI Express" },
752 { "Broadcom BCM5714 1000Base-T " },
753 { "Broadcom BCM5780 1000Base-T" },
754 { "Broadcom BCM5780S 1000Base-SX" },
755 { "Broadcom BCM5715 1000Base-T " },
756 { "Broadcom BCM5903M Gigabit Ethernet " },
760 static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = {
761 {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 },
762 {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 },
763 {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 },
764 {0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9 },
765 {0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700 },
766 {0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700 },
767 {0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700 },
768 {0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700 },
769 {0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T },
770 {0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST },
771 {0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX },
772 {0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T },
773 {0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX },
774 {0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01 },
775 {0x14e4, 0x1644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5700 },
776 {0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5 },
777 {0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1 },
778 {0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8 },
779 {0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7 },
780 {0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10 },
781 {0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12 },
782 {0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770 },
783 {0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770 },
784 {0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780 },
785 {0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701 },
786 {0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX },
787 {0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT },
788 {0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T },
789 {0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01 },
790 {0x14e4, 0x1645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5701 },
791 {0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702 },
792 {0x14e4, 0x1646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
793 {0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702 },
794 {0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702 },
795 {0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760 },
796 {0x14e4, 0x16a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
797 {0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT },
798 {0x14e4, 0x16c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
799 {0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703 },
800 {0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
801 {0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703 },
802 {0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703 },
803 {0x14e4, 0x1647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
804 {0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703 },
805 {0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
806 {0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703 },
807 {0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703 },
808 {0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771 },
809 {0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781 },
810 {0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE },
811 {0x14e4, 0x16a7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
812 {0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
813 {0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771 },
814 {0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781 },
815 {0x14e4, 0x16c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
816 {0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772 },
817 {0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782 },
818 {0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783 },
819 {0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T },
820 {0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T },
821 {0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE },
822 {0x14e4, 0x1648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704 },
823 {0x14e4, 0x1649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
824 {0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S },
825 {0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX },
826 {0x14e4, 0x16a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
827 {0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761 },
828 {0x14e4, 0x1653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
829 {0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761 },
830 {0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020 },
831 {0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T },
832 {0x14e4, 0x1654, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
833 {0x14e4, 0x165d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
834 {0x14e4, 0x165e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
835 {0x14e4, 0x166e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705F },
836 {0x14e4, 0x1696, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5782 },
837 {0x14e4, 0x169c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5788 },
838 {0x14e4, 0x169d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5789 },
839 {0x14e4, 0x170d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
840 {0x14e4, 0x170e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
841 {0x14e4, 0x1676, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750 },
842 {0x14e4, 0x167c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750M },
843 {0x14e4, 0x1677, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751 },
844 {0x14e4, 0x167d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751M },
845 {0x14e4, 0x167e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751F },
846 {0x14e4, 0x1658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5720 },
847 {0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T },
848 {0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T },
849 {0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I },
850 {0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I },
851 {0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I },
852 {0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I },
853 {0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I },
854 {0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I },
855 {0x14e4, 0x1659, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5721 },
856 {0x14e4, 0x16f7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753 },
857 {0x14e4, 0x16fd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753M },
858 {0x14e4, 0x16fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753F },
859 {0x14e4, 0x16dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5781 },
860 {0x14e4, 0x1600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752 },
861 {0x14e4, 0x1601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752M },
862 {0x14e4, 0x1668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714 },
863 {0x14e4, 0x166a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780 },
864 {0x14e4, 0x166b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780S },
865 {0x14e4, 0x1678, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715 },
866 {0x14e4, 0x16ff, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5903M },
870 MODULE_DEVICE_TABLE(pci, bcm5700_pci_tbl);
873 extern int bcm5700_proc_create(void);
874 extern int bcm5700_proc_create_dev(struct net_device *dev);
875 extern int bcm5700_proc_remove_dev(struct net_device *dev);
876 extern int bcm5700_proc_remove_notifier(void);
879 #if (LINUX_VERSION_CODE >= 0x2060a)
880 static struct pci_device_id pci_AMD762id[]={
881 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
882 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
887 /*******************************************************************************
888 *******************************************************************************
891 int get_csum_flag(LM_UINT32 ChipRevId)
893 return NETIF_F_IP_CSUM;
896 /*******************************************************************************
897 *******************************************************************************
899 This function returns true if the device passed to it is attached to an
900 ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
901 or newer, it returns false.
903 This function determines which bridge it is attached to by scaning the pci
904 bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
905 the bridge's subordinate's secondary bus number is compared with this
906 devices bus number. If they match, then the device is attached to this
907 bridge. The bridge's device id is compared to a list of known device ids for
908 ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
909 chip revision must also be checked to determine if the chip is older than an
912 To scan the bus, one of two functions is used depending on the kernel
913 version. For 2.4 kernels, the pci_find_device function is used. This
914 function has been depricated in the 2.6 kernel and replaced with the
915 fucntion pci_get_device. The macro walk_pci_bus determines which function to
916 use when the driver is built.
919 #if (LINUX_VERSION_CODE >= 0x2060a)
920 #define walk_pci_bus(d) while ((d = pci_get_device( \
921 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
923 #define unwalk_pci_bus(d) pci_dev_put(d)
926 #define walk_pci_bus(d) while ((d = pci_find_device( \
927 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
928 #define unwalk_pci_bus(d)
932 #define ICH5_CHIP_VERSION 0xc0
934 static struct pci_device_id pci_ICHtable[] = {
935 {0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8 */
936 {0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8 */
937 {0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6 */
938 {0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
942 int attached_to_ICH4_or_older( struct pci_dev *pdev)
944 struct pci_dev *tmp_pdev = NULL;
945 struct pci_device_id *ich_table;
948 walk_pci_bus (tmp_pdev) {
949 if ((tmp_pdev->hdr_type == 1) &&
950 (tmp_pdev->subordinate != NULL) &&
951 (tmp_pdev->subordinate->secondary == pdev->bus->number)) {
953 ich_table = pci_ICHtable;
955 while (ich_table->vendor) {
956 if ((ich_table->vendor == tmp_pdev->vendor) &&
957 (ich_table->device == tmp_pdev->device)) {
959 pci_read_config_byte( tmp_pdev,
960 PCI_REVISION_ID, &chip_rev);
962 if (chip_rev < ICH5_CHIP_VERSION) {
963 unwalk_pci_bus( tmp_pdev);
974 static int __devinit bcm5700_init_board(struct pci_dev *pdev,
975 struct net_device **dev_out,
978 struct net_device *dev;
979 PUM_DEVICE_BLOCK pUmDevice;
980 PLM_DEVICE_BLOCK pDevice;
985 /* dev zeroed in init_etherdev */
986 #if (LINUX_VERSION_CODE >= 0x20600)
987 dev = alloc_etherdev(sizeof(*pUmDevice));
989 dev = init_etherdev(NULL, sizeof(*pUmDevice));
992 printk (KERN_ERR "%s: unable to alloc new ethernet\n",
996 SET_MODULE_OWNER(dev);
997 #if (LINUX_VERSION_CODE >= 0x20600)
998 SET_NETDEV_DEV(dev, &pdev->dev);
1000 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1002 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1003 rc = pci_enable_device (pdev);
1007 rc = pci_request_regions(pdev, bcm5700_driver);
1011 pci_set_master(pdev);
1013 if (pci_set_dma_mask(pdev, BCM_64BIT_DMA_MASK) == 0) {
1014 pUmDevice->using_dac = 1;
1015 if (pci_set_consistent_dma_mask(pdev, BCM_64BIT_DMA_MASK) != 0)
1017 printk(KERN_ERR "pci_set_consistent_dma_mask failed\n");
1018 pci_release_regions(pdev);
1022 else if (pci_set_dma_mask(pdev, BCM_32BIT_DMA_MASK) == 0) {
1023 pUmDevice->using_dac = 0;
1026 printk(KERN_ERR "System does not support DMA\n");
1027 pci_release_regions(pdev);
1031 pUmDevice->dev = dev;
1032 pUmDevice->pdev = pdev;
1033 pUmDevice->mem_list_num = 0;
1034 pUmDevice->next_module = root_tigon3_dev;
1035 pUmDevice->index = board_idx;
1036 root_tigon3_dev = dev;
1038 spin_lock_init(&pUmDevice->global_lock);
1040 spin_lock_init(&pUmDevice->undi_lock);
1042 spin_lock_init(&pUmDevice->phy_lock);
1044 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1046 pDevice->FunctNum = PCI_FUNC(pUmDevice->pdev->devfn);
1048 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1049 if (board_idx < MAX_UNITS) {
1050 bcm5700_validate_param_range(pUmDevice, &mtu[board_idx], "mtu", 1500, 9000, 1500);
1051 dev->mtu = mtu[board_idx];
1054 /* If we're attached to an ICH4 or older, we may need to implement a
1055 workaround for special cycles described in the BCM5704/357 Errata.
1056 This workaround is only need on 5703-A1/2 or 5704-A0 chips that
1057 are attached to a PCI-X bus. The NIC chip type and bus are checked
1058 later in the driver and the flag cleared if the workaround is not
1059 needed. The workaround is enabled by setting the flag UNDI_FIX_FLAG
1060 which casues the driver to use indirect pci-config cycles when
1061 accessing the low-priority mailboxes (MB_REG_WR/RD).
1064 if (attached_to_ICH4_or_older( pdev)) {
1065 pDevice->Flags |= UNDI_FIX_FLAG;
1068 #if (LINUX_VERSION_CODE >= 0x2060a)
1069 if(pci_dev_present(pci_AMD762id)){
1070 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1071 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1074 if (pci_find_device(0x1022, 0x700c, NULL)) {
1075 /* AMD762 writes I/O out of order */
1076 /* Setting bit 1 in 762's register 0x4C still doesn't work */
1078 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1079 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1082 if (LM_GetAdapterInfo(pDevice) != LM_STATUS_SUCCESS) {
1087 if ( (pDevice->Flags & JUMBO_CAPABLE_FLAG) == 0 ) {
1088 if (dev->mtu > 1500) {
1090 printk(KERN_WARNING "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n", bcm5700_driver, pUmDevice->index);
1094 pUmDevice->do_global_lock = 0;
1095 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
1096 /* The 5700 chip works best without interleaved register */
1097 /* accesses on certain machines. */
1098 pUmDevice->do_global_lock = 1;
1101 if ((T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5701) &&
1102 ((pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) == 0)) {
1104 pUmDevice->rx_buf_align = 0;
1107 pUmDevice->rx_buf_align = 2;
1109 dev->mem_start = pci_resource_start(pdev, 0);
1110 dev->mem_end = dev->mem_start + sizeof(T3_STD_MEM_MAP);
1111 dev->irq = pdev->irq;
1117 pci_release_regions(pdev);
1118 bcm5700_freemem(dev);
1121 #if (LINUX_VERSION_CODE < 0x020600)
1122 unregister_netdev(dev);
1130 static int __devinit
1131 bcm5700_print_ver(void)
1133 printk(KERN_INFO "Broadcom Gigabit Ethernet Driver %s ",
1136 printk("with Broadcom NIC Extension (NICE) ");
1138 printk("ver. %s %s\n", bcm5700_version, bcm5700_date);
1142 static int __devinit
1143 bcm5700_init_one(struct pci_dev *pdev,
1144 const struct pci_device_id *ent)
1146 struct net_device *dev = NULL;
1147 PUM_DEVICE_BLOCK pUmDevice;
1148 PLM_DEVICE_BLOCK pDevice;
1150 static int board_idx = -1;
1151 static int printed_version = 0;
1152 struct pci_dev *pci_dev;
1156 if (!printed_version) {
1157 bcm5700_print_ver();
1159 bcm5700_proc_create();
1161 printed_version = 1;
1164 i = bcm5700_init_board(pdev, &dev, board_idx);
1173 if (atomic_read(&bcm5700_load_count) == 0) {
1174 register_ioctl32_conversion(SIOCNICE, bcm5700_ioctl32);
1176 atomic_inc(&bcm5700_load_count);
1178 dev->open = bcm5700_open;
1179 dev->hard_start_xmit = bcm5700_start_xmit;
1180 dev->stop = bcm5700_close;
1181 dev->get_stats = bcm5700_get_stats;
1182 dev->set_multicast_list = bcm5700_set_rx_mode;
1183 dev->do_ioctl = bcm5700_ioctl;
1184 dev->set_mac_address = &bcm5700_set_mac_addr;
1185 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1186 dev->change_mtu = &bcm5700_change_mtu;
1188 #if (LINUX_VERSION_CODE >= 0x20400)
1189 dev->tx_timeout = bcm5700_reset;
1190 dev->watchdog_timeo = TX_TIMEOUT;
1193 dev->vlan_rx_register = &bcm5700_vlan_rx_register;
1194 dev->vlan_rx_kill_vid = &bcm5700_vlan_rx_kill_vid;
1196 #ifdef BCM_NAPI_RXPOLL
1197 dev->poll = bcm5700_poll;
1201 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1202 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1204 dev->base_addr = pci_resource_start(pdev, 0);
1205 dev->irq = pdev->irq;
1206 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1207 dev->poll_controller = poll_bcm5700;
1210 #if (LINUX_VERSION_CODE >= 0x20600)
1211 if ((i = register_netdev(dev))) {
1212 printk(KERN_ERR "%s: Cannot register net device\n",
1214 if (pUmDevice->lm_dev.pMappedMemBase)
1215 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1216 pci_release_regions(pdev);
1217 bcm5700_freemem(dev);
1224 pci_set_drvdata(pdev, dev);
1226 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1227 pUmDevice->name = board_info[ent->driver_data].name,
1228 printk(KERN_INFO "%s: %s found at mem %lx, IRQ %d, ",
1229 dev->name, pUmDevice->name, dev->base_addr,
1231 printk("node addr ");
1232 for (i = 0; i < 6; i++) {
1233 printk("%2.2x", dev->dev_addr[i]);
1237 printk(KERN_INFO "%s: ", dev->name);
1238 if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5400_PHY_ID)
1239 printk("Broadcom BCM5400 Copper ");
1240 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID)
1241 printk("Broadcom BCM5401 Copper ");
1242 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5411_PHY_ID)
1243 printk("Broadcom BCM5411 Copper ");
1244 else if (((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5701_PHY_ID) &&
1245 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
1246 printk("Broadcom BCM5701 Integrated Copper ");
1248 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5703_PHY_ID) {
1249 printk("Broadcom BCM5703 Integrated ");
1250 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1255 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) {
1256 printk("Broadcom BCM5704 Integrated ");
1257 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1262 else if (pDevice->PhyFlags & PHY_IS_FIBER){
1263 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5780_PHY_ID)
1264 printk("Broadcom BCM5780S Integrated Serdes ");
1267 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID)
1268 printk("Broadcom BCM5705 Integrated Copper ");
1269 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5750_PHY_ID)
1270 printk("Broadcom BCM5750 Integrated Copper ");
1272 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5714_PHY_ID)
1273 printk("Broadcom BCM5714 Integrated Copper ");
1274 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5780_PHY_ID)
1275 printk("Broadcom BCM5780 Integrated Copper ");
1277 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5752_PHY_ID)
1278 printk("Broadcom BCM5752 Integrated Copper ");
1279 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID)
1280 printk("Broadcom BCM8002 SerDes ");
1281 else if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
1282 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1283 printk("Broadcom BCM5703 Integrated SerDes ");
1285 else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1286 printk("Broadcom BCM5704 Integrated SerDes ");
1289 printk("Agilent HDMP-1636 SerDes ");
1295 printk("transceiver found\n");
1297 #if (LINUX_VERSION_CODE >= 0x20400)
1298 if (scatter_gather[board_idx]) {
1299 dev->features |= NETIF_F_SG;
1300 if (pUmDevice->using_dac && !(pDevice->Flags & BCM5788_FLAG))
1301 dev->features |= NETIF_F_HIGHDMA;
1303 if ((pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM) &&
1304 tx_checksum[board_idx]) {
1306 dev->features |= get_csum_flag( pDevice->ChipRevId);
1309 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1312 /* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1313 the same time. Since only one of these features can be enable at a
1314 time, we'll enable only Jumbo Frames and disable TSO when the user
1315 tries to enable both.
1317 dev->features &= ~NETIF_F_TSO;
1319 if ((pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION) &&
1320 (enable_tso[board_idx])) {
1321 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
1322 (dev->mtu > 1500)) {
1323 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
1325 dev->features |= NETIF_F_TSO;
1329 printk(KERN_INFO "%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1331 (char *) ((dev->features & NETIF_F_SG) ? "ON" : "OFF"),
1332 (char *) ((dev->features & NETIF_F_HIGHDMA) ? "ON" : "OFF"),
1333 (char *) ((dev->features & get_csum_flag( pDevice->ChipRevId)) ? "ON" : "OFF"));
1335 if ((pDevice->ChipRevId != T3_CHIP_ID_5700_B0) &&
1336 rx_checksum[board_idx])
1337 printk("Rx Checksum ON");
1339 printk("Rx Checksum OFF");
1341 printk(", 802.1Q VLAN ON");
1344 if (dev->features & NETIF_F_TSO) {
1349 #ifdef BCM_NAPI_RXPOLL
1350 printk(", NAPI ON");
1355 bcm5700_proc_create_dev(dev);
1358 tasklet_init(&pUmDevice->tasklet, bcm5700_tasklet,
1359 (unsigned long) pUmDevice);
1361 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1362 if ((REG_RD(pDevice, PciCfg.DualMacCtrl) &
1363 T3_DUAL_MAC_CH_CTRL_MASK) == 3) {
1365 printk(KERN_WARNING "%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev->name);
1369 #if (LINUX_VERSION_CODE > 0x20605)
1371 if ((pci_dev = pci_get_device(0x1022, 0x700c, NULL))) {
1373 if ((pci_dev = pci_find_device(0x1022, 0x700c, NULL))) {
1377 /* Found AMD 762 North bridge */
1378 pci_read_config_dword(pci_dev, 0x4c, &val);
1379 if ((val & 0x02) == 0) {
1380 pci_write_config_dword(pci_dev, 0x4c, val | 0x02);
1381 printk(KERN_INFO "%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver);
1385 #if (LINUX_VERSION_CODE > 0x20605)
1387 pci_dev_put(pci_dev);
1389 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1391 if ((pci_dev = pci_get_device(0x1066, 0x0017, NULL))) {
1392 bcm_msi_chipset_bug = 1;
1394 pci_dev_put(pci_dev);
1402 static void __devexit
1403 bcm5700_remove_one (struct pci_dev *pdev)
1405 struct net_device *dev = pci_get_drvdata (pdev);
1406 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1409 bcm5700_proc_remove_dev(dev);
1412 atomic_dec(&bcm5700_load_count);
1413 if (atomic_read(&bcm5700_load_count) == 0)
1414 unregister_ioctl32_conversion(SIOCNICE);
1416 unregister_netdev(dev);
1418 if (pUmDevice->lm_dev.pMappedMemBase)
1419 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1421 pci_release_regions(pdev);
1423 #if (LINUX_VERSION_CODE < 0x020600)
1429 pci_set_drvdata(pdev, NULL);
1433 int b57_test_intr(UM_DEVICE_BLOCK *pUmDevice);
1436 bcm5700_open(struct net_device *dev)
1438 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1439 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1442 if (pUmDevice->suspended){
1445 /* delay for 6 seconds */
1446 pUmDevice->delayed_link_ind = (6 * HZ) / pUmDevice->timer_interval;
1449 #ifndef BCM_NAPI_RXPOLL
1450 pUmDevice->adaptive_expiry = HZ / pUmDevice->timer_interval;
1454 #ifdef INCLUDE_TBI_SUPPORT
1455 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
1456 (pDevice->TbiFlags & TBI_POLLING_FLAGS)) {
1457 pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval;
1458 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1459 pUmDevice->poll_tbi_interval /= 4;
1461 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1464 /* set this timer for 2 seconds */
1465 pUmDevice->asf_heartbeat = (2 * HZ) / pUmDevice->timer_interval;
1467 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1470 if ( ( (T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ) &&
1471 (T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5714_A0 ) &&
1472 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_AX ) &&
1473 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_BX ) ) &&
1474 !bcm_msi_chipset_bug ){
1476 if (disable_msi[pUmDevice->index]==1){
1477 /* do nothing-it's not turned on */
1479 pDevice->Flags |= USING_MSI_FLAG;
1481 REG_WR(pDevice, Msi.Mode, 2 );
1483 rc = pci_enable_msi(pUmDevice->pdev);
1486 pDevice->Flags &= ~ USING_MSI_FLAG;
1487 REG_WR(pDevice, Msi.Mode, 1 );
1495 if ((rc= request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt, SA_SHIRQ, dev->name, dev)))
1498 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1500 if(pDevice->Flags & USING_MSI_FLAG) {
1502 pci_disable_msi(pUmDevice->pdev);
1503 pDevice->Flags &= ~USING_MSI_FLAG;
1504 REG_WR(pDevice, Msi.Mode, 1 );
1511 pUmDevice->opened = 1;
1512 if (LM_InitializeAdapter(pDevice) != LM_STATUS_SUCCESS) {
1513 pUmDevice->opened = 0;
1514 free_irq(dev->irq, dev);
1515 bcm5700_freemem(dev);
1519 bcm5700_set_vlan_mode(pUmDevice);
1520 bcm5700_init_counters(pUmDevice);
1522 if (pDevice->Flags & UNDI_FIX_FLAG) {
1523 printk(KERN_INFO "%s: Using indirect register access\n", dev->name);
1526 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6))
1528 /* Do not use invalid eth addrs: any multicast & all zeros */
1529 if( is_valid_ether_addr(dev->dev_addr) ){
1530 LM_SetMacAddress(pDevice, dev->dev_addr);
1534 printk(KERN_INFO "%s: Invalid administered node address\n",dev->name);
1535 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1539 if (tigon3_debug > 1)
1540 printk(KERN_DEBUG "%s: tigon3_open() irq %d.\n", dev->name, dev->irq);
1542 QQ_InitQueue(&pUmDevice->rx_out_of_buf_q.Container,
1543 MAX_RX_PACKET_DESC_COUNT);
1546 #if (LINUX_VERSION_CODE < 0x020300)
1550 atomic_set(&pUmDevice->intr_sem, 0);
1552 LM_EnableInterrupt(pDevice);
1554 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1556 if (pDevice->Flags & USING_MSI_FLAG){
1558 /* int test to check support on older machines */
1559 if (b57_test_intr(pUmDevice) != 1) {
1561 LM_DisableInterrupt(pDevice);
1562 free_irq(pUmDevice->pdev->irq, dev);
1563 pci_disable_msi(pUmDevice->pdev);
1564 REG_WR(pDevice, Msi.Mode, 1 );
1565 pDevice->Flags &= ~USING_MSI_FLAG;
1567 rc = LM_ResetAdapter(pDevice);
1568 printk(KERN_ALERT " The MSI support in this system is not functional.\n");
1570 if (rc == LM_STATUS_SUCCESS)
1576 rc = request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt,
1577 SA_SHIRQ, dev->name, dev);
1582 bcm5700_freemem(dev);
1583 pUmDevice->opened = 0;
1588 pDevice->InitDone = TRUE;
1589 atomic_set(&pUmDevice->intr_sem, 0);
1590 LM_EnableInterrupt(pDevice);
1595 init_timer(&pUmDevice->timer);
1596 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1597 pUmDevice->timer.data = (unsigned long)dev;
1598 pUmDevice->timer.function = &bcm5700_timer;
1599 add_timer(&pUmDevice->timer);
1601 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
1602 init_timer(&pUmDevice->statstimer);
1603 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1604 pUmDevice->statstimer.data = (unsigned long)dev;
1605 pUmDevice->statstimer.function = &bcm5700_stats_timer;
1606 add_timer(&pUmDevice->statstimer);
1609 if(pDevice->Flags & USING_MSI_FLAG)
1610 printk(KERN_INFO "%s: Using Message Signaled Interrupt (MSI) \n", dev->name);
1612 printk(KERN_INFO "%s: Using PCI INTX interrupt \n", dev->name);
1614 netif_start_queue(dev);
1621 bcm5700_stats_timer(unsigned long data)
1623 struct net_device *dev = (struct net_device *)data;
1624 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1625 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1626 unsigned long flags = 0;
1628 if (!pUmDevice->opened)
1631 if (!atomic_read(&pUmDevice->intr_sem) &&
1632 !pUmDevice->suspended &&
1633 (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE)) {
1634 BCM5700_LOCK(pUmDevice, flags);
1635 LM_GetStats(pDevice);
1636 BCM5700_UNLOCK(pUmDevice, flags);
1639 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1641 add_timer(&pUmDevice->statstimer);
1646 bcm5700_timer(unsigned long data)
1648 struct net_device *dev = (struct net_device *)data;
1649 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1650 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1651 unsigned long flags = 0;
1654 if (!pUmDevice->opened)
1657 if (atomic_read(&pUmDevice->intr_sem) || pUmDevice->suspended) {
1658 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1659 add_timer(&pUmDevice->timer);
1663 #ifdef INCLUDE_TBI_SUPPORT
1664 if ((pDevice->TbiFlags & TBI_POLLING_FLAGS) &&
1665 (--pUmDevice->poll_tbi_expiry <= 0)) {
1667 BCM5700_PHY_LOCK(pUmDevice, flags);
1668 value32 = REG_RD(pDevice, MacCtrl.Status);
1669 if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) &&
1670 ((value32 & (MAC_STATUS_LINK_STATE_CHANGED |
1671 MAC_STATUS_CFG_CHANGED)) ||
1672 !(value32 & MAC_STATUS_PCS_SYNCED)))
1674 ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
1675 (value32 & (MAC_STATUS_PCS_SYNCED |
1676 MAC_STATUS_SIGNAL_DETECTED))))
1678 LM_SetupPhy(pDevice);
1680 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1681 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1686 if (pUmDevice->delayed_link_ind > 0) {
1687 if (pUmDevice->delayed_link_ind == 1)
1688 MM_IndicateStatus(pDevice, pDevice->LinkStatus);
1690 pUmDevice->delayed_link_ind--;
1693 if (pUmDevice->crc_counter_expiry > 0)
1694 pUmDevice->crc_counter_expiry--;
1696 if (!pUmDevice->interrupt) {
1697 if (!(pDevice->Flags & USE_TAGGED_STATUS_FLAG)) {
1698 BCM5700_LOCK(pUmDevice, flags);
1699 if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) {
1700 /* This will generate an interrupt */
1701 REG_WR(pDevice, Grc.LocalCtrl,
1702 pDevice->GrcLocalCtrl |
1703 GRC_MISC_LOCAL_CTRL_SET_INT);
1706 REG_WR(pDevice, HostCoalesce.Mode,
1707 pDevice->CoalesceMode |
1708 HOST_COALESCE_ENABLE |
1711 if (!(REG_RD(pDevice, DmaWrite.Mode) &
1712 DMA_WRITE_MODE_ENABLE)) {
1713 BCM5700_UNLOCK(pUmDevice, flags);
1717 BCM5700_UNLOCK(pUmDevice, flags);
1719 if (pUmDevice->tx_queued) {
1720 pUmDevice->tx_queued = 0;
1721 netif_wake_queue(dev);
1724 #if (LINUX_VERSION_CODE < 0x02032b)
1725 if ((QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) !=
1726 pDevice->TxPacketDescCnt) &&
1727 ((jiffies - dev->trans_start) > TX_TIMEOUT)) {
1729 printk(KERN_WARNING "%s: Tx hung\n", dev->name);
1735 #ifndef BCM_NAPI_RXPOLL
1736 if (pUmDevice->adaptive_coalesce) {
1737 pUmDevice->adaptive_expiry--;
1738 if (pUmDevice->adaptive_expiry == 0) {
1739 pUmDevice->adaptive_expiry = HZ /
1740 pUmDevice->timer_interval;
1741 bcm5700_adapt_coalesce(pUmDevice);
1746 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) >
1747 (unsigned int) pUmDevice->rx_buf_repl_panic_thresh) {
1748 /* Generate interrupt and let isr allocate buffers */
1749 REG_WR(pDevice, HostCoalesce.Mode, pDevice->CoalesceMode |
1750 HOST_COALESCE_ENABLE | HOST_COALESCE_NOW);
1754 if (pDevice->AsfFlags & ASF_ENABLED) {
1755 pUmDevice->asf_heartbeat--;
1756 if (pUmDevice->asf_heartbeat == 0) {
1757 if( (pDevice->Flags & UNDI_FIX_FLAG) ||
1758 (pDevice->Flags & ENABLE_PCIX_FIX_FLAG)) {
1759 MEM_WR_OFFSET(pDevice, T3_CMD_MAILBOX,
1760 T3_CMD_NICDRV_ALIVE2);
1761 MEM_WR_OFFSET(pDevice, T3_CMD_LENGTH_MAILBOX,
1763 MEM_WR_OFFSET(pDevice, T3_CMD_DATA_MAILBOX, 5);
1766 (T3_NIC_MBUF_POOL_ADDR +
1768 T3_CMD_NICDRV_ALIVE2, 1);
1770 (T3_NIC_MBUF_POOL_ADDR +
1771 T3_CMD_LENGTH_MAILBOX),4,1);
1773 (T3_NIC_MBUF_POOL_ADDR +
1774 T3_CMD_DATA_MAILBOX),5,1);
1777 value32 = REG_RD(pDevice, Grc.RxCpuEvent);
1778 REG_WR(pDevice, Grc.RxCpuEvent, value32 | BIT_14);
1779 pUmDevice->asf_heartbeat = (2 * HZ) /
1780 pUmDevice->timer_interval;
1785 if (pDevice->PhyFlags & PHY_IS_FIBER){
1786 BCM5700_PHY_LOCK(pUmDevice, flags);
1787 LM_5714_FamFiberCheckLink(pDevice);
1788 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1791 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1792 add_timer(&pUmDevice->timer);
1796 bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice)
1799 #ifndef BCM_NAPI_RXPOLL
1800 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1802 pUmDevice->rx_curr_coalesce_frames = pDevice->RxMaxCoalescedFrames;
1803 pUmDevice->rx_curr_coalesce_ticks = pDevice->RxCoalescingTicks;
1804 pUmDevice->tx_curr_coalesce_frames = pDevice->TxMaxCoalescedFrames;
1805 pUmDevice->rx_last_cnt = 0;
1806 pUmDevice->tx_last_cnt = 0;
1809 pUmDevice->phy_crc_count = 0;
1811 pUmDevice->tx_zc_count = 0;
1812 pUmDevice->tx_chksum_count = 0;
1813 pUmDevice->tx_himem_count = 0;
1814 pUmDevice->rx_good_chksum_count = 0;
1815 pUmDevice->rx_bad_chksum_count = 0;
1818 pUmDevice->tso_pkt_count = 0;
1824 #ifndef BCM_NAPI_RXPOLL
1826 bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice,
1827 int rx_frames, int rx_ticks, int tx_frames, int rx_frames_intr)
1829 unsigned long flags = 0;
1830 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1832 if (pUmDevice->do_global_lock) {
1833 if (spin_is_locked(&pUmDevice->global_lock))
1835 spin_lock_irqsave(&pUmDevice->global_lock, flags);
1837 pUmDevice->rx_curr_coalesce_frames = rx_frames;
1838 pUmDevice->rx_curr_coalesce_ticks = rx_ticks;
1839 pUmDevice->tx_curr_coalesce_frames = tx_frames;
1840 pUmDevice->rx_curr_coalesce_frames_intr = rx_frames_intr;
1841 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFrames, rx_frames);
1843 REG_WR(pDevice, HostCoalesce.RxCoalescingTicks, rx_ticks);
1845 REG_WR(pDevice, HostCoalesce.TxMaxCoalescedFrames, tx_frames);
1847 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt,
1850 BCM5700_UNLOCK(pUmDevice, flags);
1855 bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice)
1857 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
1858 uint rx_curr_cnt, tx_curr_cnt, rx_delta, tx_delta, total_delta;
1860 rx_curr_cnt = pDevice->pStatsBlkVirt->ifHCInUcastPkts.Low;
1861 tx_curr_cnt = pDevice->pStatsBlkVirt->ifHCOutUcastPkts.Low;
1862 if ((rx_curr_cnt <= pUmDevice->rx_last_cnt) ||
1863 (tx_curr_cnt < pUmDevice->tx_last_cnt)) {
1865 /* skip if there is counter rollover */
1866 pUmDevice->rx_last_cnt = rx_curr_cnt;
1867 pUmDevice->tx_last_cnt = tx_curr_cnt;
1871 rx_delta = rx_curr_cnt - pUmDevice->rx_last_cnt;
1872 tx_delta = tx_curr_cnt - pUmDevice->tx_last_cnt;
1873 total_delta = (((rx_delta + rx_delta) + tx_delta) / 3) << 1;
1875 pUmDevice->rx_last_cnt = rx_curr_cnt;
1876 pUmDevice->tx_last_cnt = tx_curr_cnt;
1878 if (total_delta < ADAPTIVE_LO_PKT_THRESH) {
1879 if (pUmDevice->rx_curr_coalesce_frames !=
1880 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES) {
1882 bcm5700_do_adapt_coalesce(pUmDevice,
1883 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES,
1884 ADAPTIVE_LO_RX_COALESCING_TICKS,
1885 ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES,
1886 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT);
1889 else if (total_delta < ADAPTIVE_HI_PKT_THRESH) {
1890 if (pUmDevice->rx_curr_coalesce_frames !=
1891 DEFAULT_RX_MAX_COALESCED_FRAMES) {
1893 bcm5700_do_adapt_coalesce(pUmDevice,
1894 DEFAULT_RX_MAX_COALESCED_FRAMES,
1895 DEFAULT_RX_COALESCING_TICKS,
1896 DEFAULT_TX_MAX_COALESCED_FRAMES,
1897 DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT);
1901 if (pUmDevice->rx_curr_coalesce_frames !=
1902 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES) {
1904 bcm5700_do_adapt_coalesce(pUmDevice,
1905 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES,
1906 ADAPTIVE_HI_RX_COALESCING_TICKS,
1907 ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES,
1908 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT);
1917 bcm5700_reset(struct net_device *dev)
1919 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1920 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1921 unsigned long flags;
1925 if( (dev->features & NETIF_F_TSO) &&
1926 (pUmDevice->tx_full) ) {
1928 dev->features &= ~NETIF_F_TSO;
1932 netif_stop_queue(dev);
1933 bcm5700_intr_off(pUmDevice);
1934 BCM5700_PHY_LOCK(pUmDevice, flags);
1935 LM_ResetAdapter(pDevice);
1936 pDevice->InitDone = TRUE;
1937 bcm5700_do_rx_mode(dev);
1938 bcm5700_set_vlan_mode(pUmDevice);
1939 bcm5700_init_counters(pUmDevice);
1940 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
1941 LM_SetMacAddress(pDevice, dev->dev_addr);
1943 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1944 atomic_set(&pUmDevice->intr_sem, 1);
1945 bcm5700_intr_on(pUmDevice);
1946 netif_wake_queue(dev);
1950 bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice)
1952 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1953 LM_UINT32 ReceiveMask = pDevice->ReceiveMask;
1954 int vlan_tag_mode = pUmDevice->vlan_tag_mode;
1956 if (vlan_tag_mode == VLAN_TAG_MODE_AUTO_STRIP) {
1957 if (pDevice->AsfFlags & ASF_ENABLED) {
1958 vlan_tag_mode = VLAN_TAG_MODE_FORCED_STRIP;
1961 vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
1964 if (vlan_tag_mode == VLAN_TAG_MODE_NORMAL_STRIP) {
1965 ReceiveMask |= LM_KEEP_VLAN_TAG;
1967 if (pUmDevice->vlgrp)
1968 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
1971 if (pUmDevice->nice_rx)
1972 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
1975 else if (vlan_tag_mode == VLAN_TAG_MODE_FORCED_STRIP) {
1976 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
1978 if (ReceiveMask != pDevice->ReceiveMask)
1980 LM_SetReceiveMask(pDevice, ReceiveMask);
1985 bcm5700_poll_wait(UM_DEVICE_BLOCK *pUmDevice)
1987 #ifdef BCM_NAPI_RXPOLL
1988 while (pUmDevice->lm_dev.RxPoll) {
1989 current->state = TASK_INTERRUPTIBLE;
1990 schedule_timeout(1);
1998 bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
2000 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2002 bcm5700_intr_off(pUmDevice);
2003 bcm5700_poll_wait(pUmDevice);
2004 pUmDevice->vlgrp = vlgrp;
2005 bcm5700_set_vlan_mode(pUmDevice);
2006 bcm5700_intr_on(pUmDevice);
2010 bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
2012 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2014 bcm5700_intr_off(pUmDevice);
2015 bcm5700_poll_wait(pUmDevice);
2016 if (pUmDevice->vlgrp) {
2017 pUmDevice->vlgrp->vlan_devices[vid] = NULL;
2019 bcm5700_intr_on(pUmDevice);
2024 bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2026 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2027 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2029 PUM_PACKET pUmPacket;
2030 unsigned long flags = 0;
2033 vlan_tag_t *vlan_tag;
2037 uint16_t ip_tcp_len, tcp_opt_len, tcp_seg_flags;
2040 if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) ||
2041 !pDevice->InitDone || pUmDevice->suspended)
2047 #if (LINUX_VERSION_CODE < 0x02032b)
2048 if (test_and_set_bit(0, &dev->tbusy)) {
2053 if (pUmDevice->do_global_lock && pUmDevice->interrupt) {
2054 netif_stop_queue(dev);
2055 pUmDevice->tx_queued = 1;
2056 if (!pUmDevice->interrupt) {
2057 netif_wake_queue(dev);
2058 pUmDevice->tx_queued = 0;
2063 pPacket = (PLM_PACKET)
2064 QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
2066 netif_stop_queue(dev);
2067 pUmDevice->tx_full = 1;
2068 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container)) {
2069 netif_wake_queue(dev);
2070 pUmDevice->tx_full = 0;
2074 pUmPacket = (PUM_PACKET) pPacket;
2075 pUmPacket->skbuff = skb;
2077 if (skb->ip_summed == CHECKSUM_HW) {
2078 pPacket->Flags = SND_BD_FLAG_TCP_UDP_CKSUM;
2080 pUmDevice->tx_chksum_count++;
2087 frag_no = skb_shinfo(skb)->nr_frags;
2091 if (atomic_read(&pDevice->SendBdLeft) < (frag_no + 1)) {
2092 netif_stop_queue(dev);
2093 pUmDevice->tx_full = 1;
2094 QQ_PushHead(&pDevice->TxPacketFreeQ.Container, pPacket);
2095 if (atomic_read(&pDevice->SendBdLeft) >= (frag_no + 1)) {
2096 netif_wake_queue(dev);
2097 pUmDevice->tx_full = 0;
2102 pPacket->u.Tx.FragCount = frag_no + 1;
2104 if (pPacket->u.Tx.FragCount > 1)
2105 pUmDevice->tx_zc_count++;
2109 if (pUmDevice->vlgrp && vlan_tx_tag_present(skb)) {
2110 pPacket->VlanTag = vlan_tx_tag_get(skb);
2111 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2115 vlan_tag = (vlan_tag_t *) &skb->cb[0];
2116 if (vlan_tag->signature == 0x5555) {
2117 pPacket->VlanTag = vlan_tag->tag;
2118 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2119 vlan_tag->signature = 0;
2124 if ((mss = (LM_UINT32) skb_shinfo(skb)->tso_size) &&
2125 (skb->len > pDevice->TxMtu)) {
2127 #if (LINUX_VERSION_CODE >= 0x02060c)
2129 if (skb_header_cloned(skb) &&
2130 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2136 pUmDevice->tso_pkt_count++;
2138 pPacket->Flags |= SND_BD_FLAG_CPU_PRE_DMA |
2139 SND_BD_FLAG_CPU_POST_DMA;
2142 if (skb->h.th->doff > 5) {
2143 tcp_opt_len = (skb->h.th->doff - 5) << 2;
2145 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
2146 skb->nh.iph->check = 0;
2148 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2149 skb->h.th->check = 0;
2150 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2153 skb->h.th->check = ~csum_tcpudp_magic(
2154 skb->nh.iph->saddr, skb->nh.iph->daddr,
2158 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2161 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
2162 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2164 ((skb->nh.iph->ihl - 5) +
2165 (tcp_opt_len >> 2)) << 11;
2169 ((skb->nh.iph->ihl - 5) +
2170 (tcp_opt_len >> 2)) << 12;
2173 pPacket->u.Tx.MaxSegmentSize = mss | tcp_seg_flags;
2177 pPacket->u.Tx.MaxSegmentSize = 0;
2180 BCM5700_LOCK(pUmDevice, flags);
2181 LM_SendPacket(pDevice, pPacket);
2182 BCM5700_UNLOCK(pUmDevice, flags);
2184 #if (LINUX_VERSION_CODE < 0x02032b)
2185 netif_wake_queue(dev);
2187 dev->trans_start = jiffies;
2193 #ifdef BCM_NAPI_RXPOLL
2195 bcm5700_poll(struct net_device *dev, int *budget)
2197 int orig_budget = *budget;
2199 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
2200 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2201 unsigned long flags = 0;
2204 if (orig_budget > dev->quota)
2205 orig_budget = dev->quota;
2207 BCM5700_LOCK(pUmDevice, flags);
2208 work_done = LM_ServiceRxPoll(pDevice, orig_budget);
2209 *budget -= work_done;
2210 dev->quota -= work_done;
2212 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2213 replenish_rx_buffers(pUmDevice, 0);
2215 BCM5700_UNLOCK(pUmDevice, flags);
2217 MM_IndicateRxPackets(pDevice);
2218 BCM5700_LOCK(pUmDevice, flags);
2219 LM_QueueRxPackets(pDevice);
2220 BCM5700_UNLOCK(pUmDevice, flags);
2222 if ((work_done < orig_budget) || atomic_read(&pUmDevice->intr_sem) ||
2223 pUmDevice->suspended) {
2225 netif_rx_complete(dev);
2226 BCM5700_LOCK(pUmDevice, flags);
2227 REG_WR(pDevice, Grc.Mode, pDevice->GrcMode);
2228 pDevice->RxPoll = FALSE;
2229 if (pDevice->RxPoll) {
2230 BCM5700_UNLOCK(pUmDevice, flags);
2233 /* Take care of possible missed rx interrupts */
2234 REG_RD_BACK(pDevice, Grc.Mode); /* flush the register write */
2235 tag = pDevice->pStatusBlkVirt->StatusTag;
2236 if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2237 (pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx !=
2238 pDevice->RcvRetConIdx)) {
2240 REG_WR(pDevice, HostCoalesce.Mode,
2241 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2244 /* If a new status block is pending in the WDMA state machine */
2245 /* before the register write to enable the rx interrupt, */
2246 /* the new status block may DMA with no interrupt. In this */
2247 /* scenario, the tag read above will be older than the tag in */
2248 /* the pending status block and writing the older tag will */
2249 /* cause interrupt to be generated. */
2250 else if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2251 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low,
2253 /* Make sure we service tx in case some tx interrupts */
2255 if (atomic_read(&pDevice->SendBdLeft) <
2256 (T3_SEND_RCB_ENTRY_COUNT / 2)) {
2257 REG_WR(pDevice, HostCoalesce.Mode,
2258 pDevice->CoalesceMode |
2259 HOST_COALESCE_ENABLE |
2263 BCM5700_UNLOCK(pUmDevice, flags);
2268 #endif /* BCM_NAPI_RXPOLL */
2271 bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2273 struct net_device *dev = (struct net_device *)dev_instance;
2274 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2275 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2276 LM_UINT32 oldtag, newtag;
2277 int i, max_intr_loop;
2281 unsigned int handled = 1;
2283 if (!pDevice->InitDone) {
2285 return IRQ_RETVAL(handled);
2288 bcm5700_intr_lock(pUmDevice);
2289 if (atomic_read(&pUmDevice->intr_sem)) {
2290 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2291 bcm5700_intr_unlock(pUmDevice);
2293 return IRQ_RETVAL(handled);
2296 if (test_and_set_bit(0, (void*)&pUmDevice->interrupt)) {
2297 printk(KERN_ERR "%s: Duplicate entry of the interrupt handler\n",
2299 bcm5700_intr_unlock(pUmDevice);
2301 return IRQ_RETVAL(handled);
2304 if ((pDevice->Flags & USING_MSI_FLAG) ||
2305 (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2306 !(REG_RD(pDevice,PciCfg.PciState) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) )
2309 if (pUmDevice->intr_test) {
2310 if (!(REG_RD(pDevice, PciCfg.PciState) &
2311 T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) ||
2312 pDevice->Flags & USING_MSI_FLAG ) {
2313 pUmDevice->intr_test_result = 1;
2315 pUmDevice->intr_test = 0;
2318 #ifdef BCM_NAPI_RXPOLL
2323 if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2324 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2325 oldtag = pDevice->pStatusBlkVirt->StatusTag;
2327 for (i = 0; ; i++) {
2328 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2330 LM_ServiceInterrupts(pDevice);
2331 newtag = pDevice->pStatusBlkVirt->StatusTag;
2332 if ((newtag == oldtag) || (i > max_intr_loop)) {
2333 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, oldtag << 24);
2334 pDevice->LastTag = oldtag;
2335 if (pDevice->Flags & UNDI_FIX_FLAG) {
2336 REG_WR(pDevice, Grc.LocalCtrl,
2337 pDevice->GrcLocalCtrl | 0x2);
2350 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2351 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2352 LM_ServiceInterrupts(pDevice);
2353 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 0);
2354 dummy = MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2357 while ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) &&
2358 (i < max_intr_loop));
2360 if (pDevice->Flags & UNDI_FIX_FLAG) {
2361 REG_WR(pDevice, Grc.LocalCtrl,
2362 pDevice->GrcLocalCtrl | 0x2);
2368 /* not my interrupt */
2373 repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
2374 if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) ||
2375 pDevice->QueueAgain) &&
2376 (!test_and_set_bit(0, &pUmDevice->tasklet_busy))) {
2378 replenish_rx_buffers(pUmDevice, pUmDevice->rx_buf_repl_isr_limit);
2379 clear_bit(0, (void*)&pUmDevice->tasklet_busy);
2381 else if ((repl_buf_count > pUmDevice->rx_buf_repl_thresh) &&
2382 !pUmDevice->tasklet_pending) {
2384 pUmDevice->tasklet_pending = 1;
2385 tasklet_schedule(&pUmDevice->tasklet);
2388 #ifdef BCM_NAPI_RXPOLL
2389 if (!pDevice->RxPoll &&
2390 QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2391 pDevice->RxPoll = 1;
2392 MM_ScheduleRxPoll(pDevice);
2395 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2396 replenish_rx_buffers(pUmDevice, 0);
2399 if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container) ||
2400 pDevice->QueueAgain) {
2402 LM_QueueRxPackets(pDevice);
2407 clear_bit(0, (void*)&pUmDevice->interrupt);
2408 bcm5700_intr_unlock(pUmDevice);
2409 if (pUmDevice->tx_queued) {
2410 pUmDevice->tx_queued = 0;
2411 netif_wake_queue(dev);
2413 return IRQ_RETVAL(handled);
2419 bcm5700_tasklet(unsigned long data)
2421 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)data;
2422 unsigned long flags = 0;
2424 /* RH 7.2 Beta 3 tasklets are reentrant */
2425 if (test_and_set_bit(0, &pUmDevice->tasklet_busy)) {
2426 pUmDevice->tasklet_pending = 0;
2430 pUmDevice->tasklet_pending = 0;
2431 if (pUmDevice->opened && !pUmDevice->suspended) {
2432 BCM5700_LOCK(pUmDevice, flags);
2433 replenish_rx_buffers(pUmDevice, 0);
2434 BCM5700_UNLOCK(pUmDevice, flags);
2437 clear_bit(0, &pUmDevice->tasklet_busy);
2442 bcm5700_close(struct net_device *dev)
2445 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2446 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2448 #if (LINUX_VERSION_CODE < 0x02032b)
2451 netif_stop_queue(dev);
2452 pUmDevice->opened = 0;
2455 if( !(pDevice->AsfFlags & ASF_ENABLED) )
2458 if( enable_wol[pUmDevice->index] == 0 )
2460 printk(KERN_INFO "%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name);
2462 if (tigon3_debug > 1)
2463 printk(KERN_DEBUG "%s: Shutting down Tigon3\n",
2466 LM_MulticastClear(pDevice);
2467 bcm5700_shutdown(pUmDevice);
2469 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
2470 del_timer_sync(&pUmDevice->statstimer);
2473 del_timer_sync(&pUmDevice->timer);
2475 free_irq(pUmDevice->pdev->irq, dev);
2477 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2479 if(pDevice->Flags & USING_MSI_FLAG) {
2480 pci_disable_msi(pUmDevice->pdev);
2481 REG_WR(pDevice, Msi.Mode, 1 );
2482 pDevice->Flags &= ~USING_MSI_FLAG;
2488 #if (LINUX_VERSION_CODE < 0x020300)
2493 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
2496 bcm5700_freemem(dev);
2498 QQ_InitQueue(&pDevice->RxPacketFreeQ.Container,
2499 MAX_RX_PACKET_DESC_COUNT);
2505 bcm5700_freemem(struct net_device *dev)
2508 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2509 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2511 for (i = 0; i < pUmDevice->mem_list_num; i++) {
2512 if (pUmDevice->mem_size_list[i] == 0) {
2513 kfree(pUmDevice->mem_list[i]);
2516 pci_free_consistent(pUmDevice->pdev,
2517 (size_t) pUmDevice->mem_size_list[i],
2518 pUmDevice->mem_list[i],
2519 pUmDevice->dma_list[i]);
2523 pDevice->pStatusBlkVirt = 0;
2524 pDevice->pStatsBlkVirt = 0;
2525 pUmDevice->mem_list_num = 0;
2528 if (!pUmDevice->opened) {
2529 for (i = 0; i < MAX_MEM2; i++) {
2530 if (pUmDevice->mem_size_list2[i]) {
2531 bcm5700_freemem2(pUmDevice, i);
2540 /* Frees consistent memory allocated through ioctl */
2541 /* The memory to be freed is in mem_list2[index] */
2543 bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index)
2545 #if (LINUX_VERSION_CODE >= 0x020400)
2547 struct page *pg, *last_pg;
2549 /* Probably won't work on some architectures */
2550 ptr = pUmDevice->mem_list2[index],
2551 pg = virt_to_page(ptr);
2552 last_pg = virt_to_page(ptr + pUmDevice->mem_size_list2[index] - 1);
2554 #if (LINUX_VERSION_CODE > 0x020500)
2555 ClearPageReserved(pg);
2557 mem_map_unreserve(pg);
2562 pci_free_consistent(pUmDevice->pdev,
2563 (size_t) pUmDevice->mem_size_list2[index],
2564 pUmDevice->mem_list2[index],
2565 pUmDevice->dma_list2[index]);
2566 pUmDevice->mem_size_list2[index] = 0;
2573 bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice)
2575 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2577 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2578 unsigned long flags;
2580 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5700 ||
2581 T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5701) &&
2582 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
2584 if (!pUmDevice->opened || !pDevice->InitDone)
2590 /* regulate MDIO access during run time */
2591 if (pUmDevice->crc_counter_expiry > 0)
2592 return pUmDevice->phy_crc_count;
2594 pUmDevice->crc_counter_expiry = (5 * HZ) /
2595 pUmDevice->timer_interval;
2597 BCM5700_PHY_LOCK(pUmDevice, flags);
2598 LM_ReadPhy(pDevice, 0x1e, &Value32);
2599 if ((Value32 & 0x8000) == 0)
2600 LM_WritePhy(pDevice, 0x1e, Value32 | 0x8000);
2601 LM_ReadPhy(pDevice, 0x14, &Value32);
2602 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2603 /* Sometimes data on the MDIO bus can be corrupted */
2604 if (Value32 != 0xffff)
2605 pUmDevice->phy_crc_count += Value32;
2606 return pUmDevice->phy_crc_count;
2608 else if (pStats == 0) {
2612 return (MM_GETSTATS64(pStats->dot3StatsFCSErrors));
2617 bcm5700_rx_err_count(UM_DEVICE_BLOCK *pUmDevice)
2619 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2620 T3_STATS_BLOCK *pStats = (T3_STATS_BLOCK *) pDevice->pStatsBlkVirt;
2624 return (bcm5700_crc_count(pUmDevice) +
2625 MM_GETSTATS64(pStats->dot3StatsAlignmentErrors) +
2626 MM_GETSTATS64(pStats->etherStatsUndersizePkts) +
2627 MM_GETSTATS64(pStats->etherStatsFragments) +
2628 MM_GETSTATS64(pStats->dot3StatsFramesTooLong) +
2629 MM_GETSTATS64(pStats->etherStatsJabbers));
2632 STATIC struct net_device_stats *
2633 bcm5700_get_stats(struct net_device *dev)
2635 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2636 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2637 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2638 struct net_device_stats *p_netstats = &pUmDevice->stats;
2643 /* Get stats from LM */
2644 p_netstats->rx_packets =
2645 MM_GETSTATS(pStats->ifHCInUcastPkts) +
2646 MM_GETSTATS(pStats->ifHCInMulticastPkts) +
2647 MM_GETSTATS(pStats->ifHCInBroadcastPkts);
2648 p_netstats->tx_packets =
2649 MM_GETSTATS(pStats->ifHCOutUcastPkts) +
2650 MM_GETSTATS(pStats->ifHCOutMulticastPkts) +
2651 MM_GETSTATS(pStats->ifHCOutBroadcastPkts);
2652 p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2653 p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2654 p_netstats->tx_errors =
2655 MM_GETSTATS(pStats->dot3StatsInternalMacTransmitErrors) +
2656 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors) +
2657 MM_GETSTATS(pStats->ifOutDiscards) +
2658 MM_GETSTATS(pStats->ifOutErrors);
2659 p_netstats->multicast = MM_GETSTATS(pStats->ifHCInMulticastPkts);
2660 p_netstats->collisions = MM_GETSTATS(pStats->etherStatsCollisions);
2661 p_netstats->rx_length_errors =
2662 MM_GETSTATS(pStats->dot3StatsFramesTooLong) +
2663 MM_GETSTATS(pStats->etherStatsUndersizePkts);
2664 p_netstats->rx_over_errors = MM_GETSTATS(pStats->nicNoMoreRxBDs);
2665 p_netstats->rx_frame_errors =
2666 MM_GETSTATS(pStats->dot3StatsAlignmentErrors);
2667 p_netstats->rx_crc_errors = (unsigned long)
2668 bcm5700_crc_count(pUmDevice);
2669 p_netstats->rx_errors = (unsigned long)
2670 bcm5700_rx_err_count(pUmDevice);
2672 p_netstats->tx_aborted_errors = MM_GETSTATS(pStats->ifOutDiscards);
2673 p_netstats->tx_carrier_errors =
2674 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors);
2680 b57_suspend_chip(UM_DEVICE_BLOCK *pUmDevice)
2682 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2684 if (pUmDevice->opened) {
2685 bcm5700_intr_off(pUmDevice);
2686 netif_carrier_off(pUmDevice->dev);
2687 netif_stop_queue(pUmDevice->dev);
2689 tasklet_kill(&pUmDevice->tasklet);
2691 bcm5700_poll_wait(pUmDevice);
2693 pUmDevice->suspended = 1;
2694 LM_ShutdownChip(pDevice, LM_SUSPEND_RESET);
2698 b57_resume_chip(UM_DEVICE_BLOCK *pUmDevice)
2700 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2702 if (pUmDevice->suspended) {
2703 pUmDevice->suspended = 0;
2704 if (pUmDevice->opened) {
2705 bcm5700_reset(pUmDevice->dev);
2708 LM_ShutdownChip(pDevice, LM_SHUTDOWN_RESET);
2713 /* Returns 0 on failure, 1 on success */
2715 b57_test_intr(UM_DEVICE_BLOCK *pUmDevice)
2717 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2720 if (!pUmDevice->opened)
2722 pUmDevice->intr_test_result = 0;
2723 pUmDevice->intr_test = 1;
2725 REG_WR(pDevice, HostCoalesce.Mode,
2726 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2729 for (j = 0; j < 10; j++) {
2730 if (pUmDevice->intr_test_result){
2734 REG_WR(pDevice, HostCoalesce.Mode,
2735 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2738 MM_Sleep(pDevice, 1);
2741 return pUmDevice->intr_test_result;
2747 #ifdef ETHTOOL_GSTRINGS
2749 #define ETH_NUM_STATS 30
2750 #define RX_CRC_IDX 5
2751 #define RX_MAC_ERR_IDX 14
2754 char string[ETH_GSTRING_LEN];
2755 } bcm5700_stats_str_arr[ETH_NUM_STATS] = {
2756 { "rx_unicast_packets" },
2757 { "rx_multicast_packets" },
2758 { "rx_broadcast_packets" },
2761 { "rx_crc_errors" }, /* this needs to be calculated */
2762 { "rx_align_errors" },
2763 { "rx_xon_frames" },
2764 { "rx_xoff_frames" },
2765 { "rx_long_frames" },
2766 { "rx_short_frames" },
2770 { "rx_mac_errors" }, /* this needs to be calculated */
2771 { "tx_unicast_packets" },
2772 { "tx_multicast_packets" },
2773 { "tx_broadcast_packets" },
2776 { "tx_single_collisions" },
2777 { "tx_multi_collisions" },
2778 { "tx_total_collisions" },
2779 { "tx_excess_collisions" },
2780 { "tx_late_collisions" },
2781 { "tx_xon_frames" },
2782 { "tx_xoff_frames" },
2783 { "tx_internal_mac_errors" },
2784 { "tx_carrier_errors" },
2788 #define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
2791 #define SWAP_DWORD_64(x) (x)
2793 #define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
2796 unsigned long bcm5700_stats_offset_arr[ETH_NUM_STATS] = {
2797 STATS_OFFSET(ifHCInUcastPkts),
2798 STATS_OFFSET(ifHCInMulticastPkts),
2799 STATS_OFFSET(ifHCInBroadcastPkts),
2800 STATS_OFFSET(ifHCInOctets),
2801 STATS_OFFSET(etherStatsFragments),
2803 STATS_OFFSET(dot3StatsAlignmentErrors),
2804 STATS_OFFSET(xonPauseFramesReceived),
2805 STATS_OFFSET(xoffPauseFramesReceived),
2806 STATS_OFFSET(dot3StatsFramesTooLong),
2807 STATS_OFFSET(etherStatsUndersizePkts),
2808 STATS_OFFSET(etherStatsJabbers),
2809 STATS_OFFSET(ifInDiscards),
2810 STATS_OFFSET(ifInErrors),
2812 STATS_OFFSET(ifHCOutUcastPkts),
2813 STATS_OFFSET(ifHCOutMulticastPkts),
2814 STATS_OFFSET(ifHCOutBroadcastPkts),
2815 STATS_OFFSET(ifHCOutOctets),
2816 STATS_OFFSET(dot3StatsDeferredTransmissions),
2817 STATS_OFFSET(dot3StatsSingleCollisionFrames),
2818 STATS_OFFSET(dot3StatsMultipleCollisionFrames),
2819 STATS_OFFSET(etherStatsCollisions),
2820 STATS_OFFSET(dot3StatsExcessiveCollisions),
2821 STATS_OFFSET(dot3StatsLateCollisions),
2822 STATS_OFFSET(outXonSent),
2823 STATS_OFFSET(outXoffSent),
2824 STATS_OFFSET(dot3StatsInternalMacTransmitErrors),
2825 STATS_OFFSET(dot3StatsCarrierSenseErrors),
2826 STATS_OFFSET(ifOutErrors),
2829 #endif /* ETHTOOL_GSTRINGS */
2832 #define ETH_NUM_TESTS 6
2834 char string[ETH_GSTRING_LEN];
2835 } bcm5700_tests_str_arr[ETH_NUM_TESTS] = {
2836 { "register test (offline)" },
2837 { "memory test (offline)" },
2838 { "loopback test (offline)" },
2839 { "nvram test (online)" },
2840 { "interrupt test (online)" },
2841 { "link test (online)" },
2844 extern LM_STATUS b57_test_registers(UM_DEVICE_BLOCK *pUmDevice);
2845 extern LM_STATUS b57_test_memory(UM_DEVICE_BLOCK *pUmDevice);
2846 extern LM_STATUS b57_test_nvram(UM_DEVICE_BLOCK *pUmDevice);
2847 extern LM_STATUS b57_test_link(UM_DEVICE_BLOCK *pUmDevice);
2848 extern LM_STATUS b57_test_loopback(UM_DEVICE_BLOCK *pUmDevice, int looptype, int linespeed);
2851 #ifdef ETHTOOL_GREGS
2852 #if (LINUX_VERSION_CODE >= 0x02040f)
2854 bcm5700_get_reg_blk(UM_DEVICE_BLOCK *pUmDevice, u32 **buf, u32 start, u32 end,
2858 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2861 memset(*buf, 0, end - start);
2862 *buf = *buf + (end - start)/4;
2865 for (offset = start; offset < end; offset+=4, *buf = *buf + 1) {
2866 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
2867 if (((offset >= 0x3400) && (offset < 0x3c00)) ||
2868 ((offset >= 0x5400) && (offset < 0x5800)) ||
2869 ((offset >= 0x6400) && (offset < 0x6800))) {
2874 **buf = REG_RD_OFFSET(pDevice, offset);
2880 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
2882 struct ethtool_cmd ethcmd;
2883 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2884 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2886 if (mm_copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2889 switch (ethcmd.cmd) {
2890 #ifdef ETHTOOL_GDRVINFO
2891 case ETHTOOL_GDRVINFO: {
2892 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2894 strcpy(info.driver, bcm5700_driver);
2895 #ifdef INCLUDE_5701_AX_FIX
2896 if(pDevice->ChipRevId == T3_CHIP_ID_5701_A0) {
2897 extern int t3FwReleaseMajor;
2898 extern int t3FwReleaseMinor;
2899 extern int t3FwReleaseFix;
2901 sprintf(info.fw_version, "%i.%i.%i",
2902 t3FwReleaseMajor, t3FwReleaseMinor,
2906 strcpy(info.fw_version, pDevice->BootCodeVer);
2907 strcpy(info.version, bcm5700_version);
2908 #if (LINUX_VERSION_CODE <= 0x020422)
2909 strcpy(info.bus_info, pUmDevice->pdev->slot_name);
2911 strcpy(info.bus_info, pci_name(pUmDevice->pdev));
2916 #ifdef ETHTOOL_GEEPROM
2917 BCM_EEDUMP_LEN(&info, pDevice->NvramSize);
2919 #ifdef ETHTOOL_GREGS
2920 /* dump everything, including holes in the register space */
2921 info.regdump_len = 0x6c00;
2923 #ifdef ETHTOOL_GSTATS
2924 info.n_stats = ETH_NUM_STATS;
2927 info.testinfo_len = ETH_NUM_TESTS;
2929 if (mm_copy_to_user(useraddr, &info, sizeof(info)))
2934 case ETHTOOL_GSET: {
2935 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG)||
2936 (pDevice->PhyFlags & PHY_IS_FIBER)) {
2938 (SUPPORTED_1000baseT_Full |
2940 ethcmd.supported |= SUPPORTED_FIBRE;
2941 ethcmd.port = PORT_FIBRE;
2945 (SUPPORTED_10baseT_Half |
2946 SUPPORTED_10baseT_Full |
2947 SUPPORTED_100baseT_Half |
2948 SUPPORTED_100baseT_Full |
2949 SUPPORTED_1000baseT_Half |
2950 SUPPORTED_1000baseT_Full |
2952 ethcmd.supported |= SUPPORTED_TP;
2953 ethcmd.port = PORT_TP;
2956 ethcmd.transceiver = XCVR_INTERNAL;
2957 ethcmd.phy_address = 0;
2959 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
2960 ethcmd.speed = SPEED_1000;
2961 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
2962 ethcmd.speed = SPEED_100;
2963 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
2964 ethcmd.speed = SPEED_10;
2968 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
2969 ethcmd.duplex = DUPLEX_FULL;
2971 ethcmd.duplex = DUPLEX_HALF;
2973 if (pDevice->DisableAutoNeg == FALSE) {
2974 ethcmd.autoneg = AUTONEG_ENABLE;
2975 ethcmd.advertising = ADVERTISED_Autoneg;
2976 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
2977 (pDevice->PhyFlags & PHY_IS_FIBER)) {
2978 ethcmd.advertising |=
2979 ADVERTISED_1000baseT_Full |
2983 ethcmd.advertising |=
2985 if (pDevice->advertising &
2986 PHY_AN_AD_10BASET_HALF) {
2988 ethcmd.advertising |=
2989 ADVERTISED_10baseT_Half;
2991 if (pDevice->advertising &
2992 PHY_AN_AD_10BASET_FULL) {
2994 ethcmd.advertising |=
2995 ADVERTISED_10baseT_Full;
2997 if (pDevice->advertising &
2998 PHY_AN_AD_100BASETX_HALF) {
3000 ethcmd.advertising |=
3001 ADVERTISED_100baseT_Half;
3003 if (pDevice->advertising &
3004 PHY_AN_AD_100BASETX_FULL) {
3006 ethcmd.advertising |=
3007 ADVERTISED_100baseT_Full;
3009 if (pDevice->advertising1000 &
3010 BCM540X_AN_AD_1000BASET_HALF) {
3012 ethcmd.advertising |=
3013 ADVERTISED_1000baseT_Half;
3015 if (pDevice->advertising1000 &
3016 BCM540X_AN_AD_1000BASET_FULL) {
3018 ethcmd.advertising |=
3019 ADVERTISED_1000baseT_Full;
3024 ethcmd.autoneg = AUTONEG_DISABLE;
3025 ethcmd.advertising = 0;
3028 ethcmd.maxtxpkt = pDevice->TxMaxCoalescedFrames;
3029 ethcmd.maxrxpkt = pDevice->RxMaxCoalescedFrames;
3031 if(mm_copy_to_user(useraddr, ðcmd, sizeof(ethcmd)))
3035 case ETHTOOL_SSET: {
3036 unsigned long flags;
3038 if(!capable(CAP_NET_ADMIN))
3040 if (ethcmd.autoneg == AUTONEG_ENABLE) {
3041 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3042 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_UNKNOWN;
3043 pDevice->DisableAutoNeg = FALSE;
3046 if (ethcmd.speed == SPEED_1000 &&
3047 pDevice->PhyFlags & PHY_NO_GIGABIT)
3050 if (ethcmd.speed == SPEED_1000 &&
3051 (pDevice->TbiFlags & ENABLE_TBI_FLAG ||
3052 pDevice->PhyFlags & PHY_IS_FIBER ) ) {
3054 pDevice->RequestedLineSpeed =
3055 LM_LINE_SPEED_1000MBPS;
3057 pDevice->RequestedDuplexMode =
3058 LM_DUPLEX_MODE_FULL;
3060 else if (ethcmd.speed == SPEED_100 &&
3061 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3062 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3064 pDevice->RequestedLineSpeed =
3065 LM_LINE_SPEED_100MBPS;
3067 else if (ethcmd.speed == SPEED_10 &&
3068 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3069 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3071 pDevice->RequestedLineSpeed =
3072 LM_LINE_SPEED_10MBPS;
3078 pDevice->DisableAutoNeg = TRUE;
3079 if (ethcmd.duplex == DUPLEX_FULL) {
3080 pDevice->RequestedDuplexMode =
3081 LM_DUPLEX_MODE_FULL;
3084 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3085 !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
3087 pDevice->RequestedDuplexMode =
3088 LM_DUPLEX_MODE_HALF;
3092 if (netif_running(dev)) {
3093 BCM5700_PHY_LOCK(pUmDevice, flags);
3094 LM_SetupPhy(pDevice);
3095 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3101 case ETHTOOL_GWOL: {
3102 struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
3104 if (((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3105 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3106 (pDevice->Flags & DISABLE_D3HOT_FLAG)) {
3111 wol.supported = WAKE_MAGIC;
3112 if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET)
3114 wol.wolopts = WAKE_MAGIC;
3120 if (mm_copy_to_user(useraddr, &wol, sizeof(wol)))
3124 case ETHTOOL_SWOL: {
3125 struct ethtool_wolinfo wol;
3127 if(!capable(CAP_NET_ADMIN))
3129 if (mm_copy_from_user(&wol, useraddr, sizeof(wol)))
3131 if ((((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3132 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3133 (pDevice->Flags & DISABLE_D3HOT_FLAG)) &&
3138 if ((wol.wolopts & ~WAKE_MAGIC) != 0) {
3141 if (wol.wolopts & WAKE_MAGIC) {
3142 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
3143 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
3146 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
3147 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
3153 #ifdef ETHTOOL_GLINK
3154 case ETHTOOL_GLINK: {
3155 struct ethtool_value edata = {ETHTOOL_GLINK};
3157 /* workaround for DHCP using ifup script */
3158 /* ifup only waits for 5 seconds for link up */
3159 /* NIC may take more than 5 seconds to establish link */
3160 if ((pUmDevice->delayed_link_ind > 0) &&
3161 delay_link[pUmDevice->index])
3164 if (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) {
3170 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3175 #ifdef ETHTOOL_NWAY_RST
3176 case ETHTOOL_NWAY_RST: {
3178 unsigned long flags;
3180 if(!capable(CAP_NET_ADMIN))
3182 if (pDevice->DisableAutoNeg) {
3185 if (!netif_running(dev))
3187 BCM5700_PHY_LOCK(pUmDevice, flags);
3188 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
3189 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
3190 pDevice->DisableAutoNeg = TRUE;
3191 LM_SetupPhy(pDevice);
3193 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3194 pDevice->DisableAutoNeg = FALSE;
3195 LM_SetupPhy(pDevice);
3198 if ((T3_ASIC_REV(pDevice->ChipRevId) ==
3199 T3_ASIC_REV_5703) ||
3200 (T3_ASIC_REV(pDevice->ChipRevId) ==
3201 T3_ASIC_REV_5704) ||
3202 (T3_ASIC_REV(pDevice->ChipRevId) ==
3205 LM_ResetPhy(pDevice);
3206 LM_SetupPhy(pDevice);
3208 pDevice->PhyFlags &= ~PHY_FIBER_FALLBACK;
3209 LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl);
3210 LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl |
3211 PHY_CTRL_AUTO_NEG_ENABLE |
3212 PHY_CTRL_RESTART_AUTO_NEG);
3214 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3218 #ifdef ETHTOOL_GEEPROM
3219 case ETHTOOL_GEEPROM: {
3220 struct ethtool_eeprom eeprom;
3222 LM_UINT32 buf1[64/4];
3223 int i, j, offset, len;
3225 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3228 if (eeprom.offset >= pDevice->NvramSize)
3231 /* maximum data limited */
3232 /* to read more, call again with a different offset */
3233 if (eeprom.len > 0x800) {
3235 if (mm_copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
3239 if (eeprom.len > 64) {
3240 buf = kmalloc(eeprom.len, GFP_KERNEL);
3247 useraddr += offsetof(struct ethtool_eeprom, data);
3249 offset = eeprom.offset;
3252 offset &= 0xfffffffc;
3253 len += (offset & 3);
3255 len = (len + 3) & 0xfffffffc;
3256 for (i = 0, j = 0; j < len; i++, j += 4) {
3257 if (LM_NvramRead(pDevice, offset + j, buf + i) !=
3258 LM_STATUS_SUCCESS) {
3263 buf += (eeprom.offset & 3);
3264 i = mm_copy_to_user(useraddr, buf, eeprom.len);
3266 if (eeprom.len > 64) {
3273 case ETHTOOL_SEEPROM: {
3274 struct ethtool_eeprom eeprom;
3275 LM_UINT32 buf[64/4];
3278 if(!capable(CAP_NET_ADMIN))
3280 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3283 if ((eeprom.offset & 3) || (eeprom.len & 3) ||
3284 (eeprom.offset >= pDevice->NvramSize)) {
3288 if ((eeprom.offset + eeprom.len) >= pDevice->NvramSize) {
3289 eeprom.len = pDevice->NvramSize - eeprom.offset;
3292 useraddr += offsetof(struct ethtool_eeprom, data);
3295 offset = eeprom.offset;
3301 if (mm_copy_from_user(&buf, useraddr, i))
3304 bcm5700_intr_off(pUmDevice);
3305 /* Prevent race condition on Grc.Mode register */
3306 bcm5700_poll_wait(pUmDevice);
3308 if (LM_NvramWriteBlock(pDevice, offset, buf, i/4) !=
3309 LM_STATUS_SUCCESS) {
3310 bcm5700_intr_on(pUmDevice);
3313 bcm5700_intr_on(pUmDevice);
3321 #ifdef ETHTOOL_GREGS
3322 #if (LINUX_VERSION_CODE >= 0x02040f)
3323 case ETHTOOL_GREGS: {
3324 struct ethtool_regs eregs;
3325 LM_UINT32 *buf, *buf1;
3328 if(!capable(CAP_NET_ADMIN))
3330 if (pDevice->Flags & UNDI_FIX_FLAG)
3332 if (mm_copy_from_user(&eregs, useraddr, sizeof(eregs)))
3334 if (eregs.len > 0x6c00)
3336 eregs.version = 0x0;
3337 if (mm_copy_to_user(useraddr, &eregs, sizeof(eregs)))
3339 buf = buf1 = kmalloc(eregs.len, GFP_KERNEL);
3342 bcm5700_get_reg_blk(pUmDevice, &buf, 0, 0xb0, 0);
3343 bcm5700_get_reg_blk(pUmDevice, &buf, 0xb0, 0x200, 1);
3344 bcm5700_get_reg_blk(pUmDevice, &buf, 0x200, 0x8f0, 0);
3345 bcm5700_get_reg_blk(pUmDevice, &buf, 0x8f0, 0xc00, 1);
3346 bcm5700_get_reg_blk(pUmDevice, &buf, 0xc00, 0xce0, 0);
3347 bcm5700_get_reg_blk(pUmDevice, &buf, 0xce0, 0x1000, 1);
3348 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1000, 0x1004, 0);
3349 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1004, 0x1400, 1);
3350 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1400, 0x1480, 0);
3351 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1480, 0x1800, 1);
3352 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1800, 0x1848, 0);
3353 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1848, 0x1c00, 1);
3354 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c00, 0x1c04, 0);
3355 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c04, 0x2000, 1);
3356 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2000, 0x225c, 0);
3357 bcm5700_get_reg_blk(pUmDevice, &buf, 0x225c, 0x2400, 1);
3358 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2400, 0x24c4, 0);
3359 bcm5700_get_reg_blk(pUmDevice, &buf, 0x24c4, 0x2800, 1);
3360 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2800, 0x2804, 0);
3361 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2804, 0x2c00, 1);
3362 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c00, 0x2c20, 0);
3363 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c20, 0x3000, 1);
3364 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3000, 0x3014, 0);
3365 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3014, 0x3400, 1);
3366 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3400, 0x3408, 0);
3367 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3408, 0x3800, 1);
3368 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3800, 0x3808, 0);
3369 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3808, 0x3c00, 1);
3370 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3c00, 0x3d00, 0);
3371 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3d00, 0x4000, 1);
3372 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4000, 0x4010, 0);
3373 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4010, 0x4400, 1);
3374 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4400, 0x4458, 0);
3375 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4458, 0x4800, 1);
3376 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4800, 0x4808, 0);
3377 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4808, 0x4c00, 1);
3378 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c00, 0x4c08, 0);
3379 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c08, 0x5000, 1);
3380 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5000, 0x5050, 0);
3381 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5050, 0x5400, 1);
3382 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5400, 0x5450, 0);
3383 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5450, 0x5800, 1);
3384 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5800, 0x5a10, 0);
3385 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5a10, 0x6000, 1);
3386 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6000, 0x600c, 0);
3387 bcm5700_get_reg_blk(pUmDevice, &buf, 0x600c, 0x6400, 1);
3388 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6400, 0x6404, 0);
3389 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6404, 0x6800, 1);
3390 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6800, 0x6848, 0);
3391 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6848, 0x6c00, 1);
3393 i = mm_copy_to_user(useraddr + sizeof(eregs), buf1, eregs.len);
3401 #ifdef ETHTOOL_GPAUSEPARAM
3402 case ETHTOOL_GPAUSEPARAM: {
3403 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
3405 if (!pDevice->DisableAutoNeg) {
3406 epause.autoneg = (pDevice->FlowControlCap &
3407 LM_FLOW_CONTROL_AUTO_PAUSE) != 0;
3413 (pDevice->FlowControl &
3414 LM_FLOW_CONTROL_RECEIVE_PAUSE) != 0;
3416 (pDevice->FlowControl &
3417 LM_FLOW_CONTROL_TRANSMIT_PAUSE) != 0;
3418 if (mm_copy_to_user(useraddr, &epause, sizeof(epause)))
3423 case ETHTOOL_SPAUSEPARAM: {
3424 struct ethtool_pauseparam epause;
3425 unsigned long flags;
3427 if(!capable(CAP_NET_ADMIN))
3429 if (mm_copy_from_user(&epause, useraddr, sizeof(epause)))
3431 pDevice->FlowControlCap = 0;
3432 if (epause.autoneg && !pDevice->DisableAutoNeg) {
3433 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
3435 if (epause.rx_pause) {
3436 pDevice->FlowControlCap |=
3437 LM_FLOW_CONTROL_RECEIVE_PAUSE;
3439 if (epause.tx_pause) {
3440 pDevice->FlowControlCap |=
3441 LM_FLOW_CONTROL_TRANSMIT_PAUSE;
3443 if (netif_running(dev)) {
3444 BCM5700_PHY_LOCK(pUmDevice, flags);
3445 LM_SetupPhy(pDevice);
3446 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3452 #ifdef ETHTOOL_GRXCSUM
3453 case ETHTOOL_GRXCSUM: {
3454 struct ethtool_value edata = { ETHTOOL_GRXCSUM };
3457 (pDevice->TaskToOffload &
3458 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM) != 0;
3459 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3464 case ETHTOOL_SRXCSUM: {
3465 struct ethtool_value edata;
3467 if(!capable(CAP_NET_ADMIN))
3469 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3472 if (!(pDevice->TaskOffloadCap &
3473 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3477 pDevice->TaskToOffload |=
3478 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3479 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
3482 pDevice->TaskToOffload &=
3483 ~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3484 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM);
3488 case ETHTOOL_GTXCSUM: {
3489 struct ethtool_value edata = { ETHTOOL_GTXCSUM };
3492 (dev->features & get_csum_flag( pDevice->ChipRevId)) != 0;
3493 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3498 case ETHTOOL_STXCSUM: {
3499 struct ethtool_value edata;
3501 if(!capable(CAP_NET_ADMIN))
3503 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3506 if (!(pDevice->TaskOffloadCap &
3507 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3511 dev->features |= get_csum_flag( pDevice->ChipRevId);
3512 pDevice->TaskToOffload |=
3513 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3514 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
3517 dev->features &= ~get_csum_flag( pDevice->ChipRevId);
3518 pDevice->TaskToOffload &=
3519 ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3520 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM);
3525 struct ethtool_value edata = { ETHTOOL_GSG };
3528 (dev->features & NETIF_F_SG) != 0;
3529 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3534 struct ethtool_value edata;
3536 if(!capable(CAP_NET_ADMIN))
3538 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3541 dev->features |= NETIF_F_SG;
3544 dev->features &= ~NETIF_F_SG;
3549 #ifdef ETHTOOL_GRINGPARAM
3550 case ETHTOOL_GRINGPARAM: {
3551 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
3553 ering.rx_max_pending = T3_STD_RCV_RCB_ENTRY_COUNT - 1;
3554 ering.rx_pending = pDevice->RxStdDescCnt;
3555 ering.rx_mini_max_pending = 0;
3556 ering.rx_mini_pending = 0;
3557 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3558 ering.rx_jumbo_max_pending = T3_JUMBO_RCV_RCB_ENTRY_COUNT - 1;
3559 ering.rx_jumbo_pending = pDevice->RxJumboDescCnt;
3561 ering.rx_jumbo_max_pending = 0;
3562 ering.rx_jumbo_pending = 0;
3564 ering.tx_max_pending = MAX_TX_PACKET_DESC_COUNT - 1;
3565 ering.tx_pending = pDevice->TxPacketDescCnt;
3566 if (mm_copy_to_user(useraddr, &ering, sizeof(ering)))
3571 #ifdef ETHTOOL_PHYS_ID
3572 case ETHTOOL_PHYS_ID: {
3573 struct ethtool_value edata;
3575 if(!capable(CAP_NET_ADMIN))
3577 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3579 if (LM_BlinkLED(pDevice, edata.data) == LM_STATUS_SUCCESS)
3584 #ifdef ETHTOOL_GSTRINGS
3585 case ETHTOOL_GSTRINGS: {
3586 struct ethtool_gstrings egstr = { ETHTOOL_GSTRINGS };
3588 if (mm_copy_from_user(&egstr, useraddr, sizeof(egstr)))
3590 switch(egstr.string_set) {
3591 #ifdef ETHTOOL_GSTATS
3593 egstr.len = ETH_NUM_STATS;
3594 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3596 if (mm_copy_to_user(useraddr + sizeof(egstr),
3597 bcm5700_stats_str_arr,
3598 sizeof(bcm5700_stats_str_arr)))
3604 egstr.len = ETH_NUM_TESTS;
3605 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3607 if (mm_copy_to_user(useraddr + sizeof(egstr),
3608 bcm5700_tests_str_arr,
3609 sizeof(bcm5700_tests_str_arr)))
3618 #ifdef ETHTOOL_GSTATS
3619 case ETHTOOL_GSTATS: {
3620 struct ethtool_stats estats = { ETHTOOL_GSTATS };
3621 uint64_t stats[ETH_NUM_STATS];
3624 (uint64_t *) pDevice->pStatsBlkVirt;
3626 estats.n_stats = ETH_NUM_STATS;
3628 memset(stats, 0, sizeof(stats));
3632 for (i = 0; i < ETH_NUM_STATS; i++) {
3633 if (bcm5700_stats_offset_arr[i] != 0) {
3634 stats[i] = SWAP_DWORD_64(*(pStats +
3635 bcm5700_stats_offset_arr[i]));
3637 else if (i == RX_CRC_IDX) {
3639 bcm5700_crc_count(pUmDevice);
3641 else if (i == RX_MAC_ERR_IDX) {
3643 bcm5700_rx_err_count(pUmDevice);
3647 if (mm_copy_to_user(useraddr, &estats, sizeof(estats))) {
3650 if (mm_copy_to_user(useraddr + sizeof(estats), &stats,
3658 case ETHTOOL_TEST: {
3659 struct ethtool_test etest;
3660 uint64_t tests[ETH_NUM_TESTS] = {0, 0, 0, 0, 0, 0};
3661 LM_POWER_STATE old_power_level;
3663 printk( KERN_ALERT "Performing ethtool test.\n"
3664 "This test will take a few seconds to complete.\n" );
3666 if (mm_copy_from_user(&etest, useraddr, sizeof(etest)))
3669 etest.len = ETH_NUM_TESTS;
3670 old_power_level = pDevice->PowerLevel;
3671 if (old_power_level != LM_POWER_STATE_D0) {
3672 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
3673 LM_SwitchClocks(pDevice);
3675 MM_Sleep(pDevice, 1000);
3676 if (etest.flags & ETH_TEST_FL_OFFLINE) {
3677 b57_suspend_chip(pUmDevice);
3678 MM_Sleep(pDevice, 1000);
3679 LM_HaltCpu(pDevice,T3_RX_CPU_ID | T3_TX_CPU_ID);
3680 MM_Sleep(pDevice, 1000);
3681 if (b57_test_registers(pUmDevice) == 0) {
3682 etest.flags |= ETH_TEST_FL_FAILED;
3685 MM_Sleep(pDevice, 1000);
3686 if (b57_test_memory(pUmDevice) == 0) {
3687 etest.flags |= ETH_TEST_FL_FAILED;
3690 MM_Sleep(pDevice, 1000);
3691 if (b57_test_loopback(pUmDevice, NICE_LOOPBACK_TESTTYPE_MAC, 0) == 0) {
3692 etest.flags |= ETH_TEST_FL_FAILED;
3695 MM_Sleep(pDevice, 1000);
3696 b57_resume_chip(pUmDevice);
3697 /* wait for link to come up for the link test */
3698 MM_Sleep(pDevice, 4000);
3699 if ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
3700 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
3702 /* wait a little longer for linkup on copper */
3703 MM_Sleep(pDevice, 3000);
3706 if (b57_test_nvram(pUmDevice) == 0) {
3707 etest.flags |= ETH_TEST_FL_FAILED;
3710 MM_Sleep(pDevice, 1000);
3711 if (b57_test_intr(pUmDevice) == 0) {
3712 etest.flags |= ETH_TEST_FL_FAILED;
3715 MM_Sleep(pDevice, 1000);
3716 if (b57_test_link(pUmDevice) == 0) {
3717 etest.flags |= ETH_TEST_FL_FAILED;
3720 MM_Sleep(pDevice, 1000);
3721 if (old_power_level != LM_POWER_STATE_D0) {
3722 LM_SetPowerState(pDevice, old_power_level);
3724 if (mm_copy_to_user(useraddr, &etest, sizeof(etest))) {
3727 if (mm_copy_to_user(useraddr + sizeof(etest), tests,
3735 case ETHTOOL_GTSO: {
3736 struct ethtool_value edata = { ETHTOOL_GTSO };
3740 (dev->features & NETIF_F_TSO) != 0;
3744 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3750 case ETHTOOL_STSO: {
3752 struct ethtool_value edata;
3754 if (!capable(CAP_NET_ADMIN))
3757 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3760 if (!(pDevice->TaskToOffload &
3761 LM_TASK_OFFLOAD_TCP_SEGMENTATION)) {
3765 dev->features &= ~NETIF_F_TSO;
3768 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
3769 (dev->mtu > 1500)) {
3770 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
3773 dev->features |= NETIF_F_TSO;
3786 #endif /* #ifdef SIOCETHTOOL */
3788 #if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
3789 #include <linux/iobuf.h>
3792 /* Provide ioctl() calls to examine the MII xcvr state. */
3793 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3795 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
3796 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
3797 u16 *data = (u16 *)&rq->ifr_data;
3799 unsigned long flags;
3805 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
3806 data[0] = pDevice->PhyAddr;
3811 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
3812 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
3815 /* workaround for DHCP using ifup script */
3816 /* ifup only waits for 5 seconds for link up */
3817 /* NIC may take more than 5 seconds to establish link */
3818 if ((pUmDevice->delayed_link_ind > 0) &&
3819 delay_link[pUmDevice->index]) {
3823 BCM5700_PHY_LOCK(pUmDevice, flags);
3824 LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *) &value);
3825 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3826 data[3] = value & 0xffff;
3832 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
3833 if (!capable(CAP_NET_ADMIN))
3836 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
3839 BCM5700_PHY_LOCK(pUmDevice, flags);
3840 LM_WritePhy(pDevice, data[1] & 0x1f, data[2]);
3841 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3847 struct nice_req* nrq;
3849 if (!capable(CAP_NET_ADMIN))
3852 nrq = (struct nice_req*)&rq->ifr_ifru;
3853 if( nrq->cmd == NICE_CMD_QUERY_SUPPORT ) {
3854 nrq->nrq_magic = NICE_DEVICE_MAGIC;
3855 nrq->nrq_support_rx = 1;
3856 nrq->nrq_support_vlan = 1;
3857 nrq->nrq_support_get_speed = 1;
3858 #ifdef BCM_NAPI_RXPOLL
3859 nrq->nrq_support_rx_napi = 1;
3863 #ifdef BCM_NAPI_RXPOLL
3864 else if( nrq->cmd == NICE_CMD_SET_RX_NAPI )
3866 else if( nrq->cmd == NICE_CMD_SET_RX )
3869 pUmDevice->nice_rx = nrq->nrq_rx;
3870 pUmDevice->nice_ctx = nrq->nrq_ctx;
3871 bcm5700_set_vlan_mode(pUmDevice);
3874 #ifdef BCM_NAPI_RXPOLL
3875 else if( nrq->cmd == NICE_CMD_GET_RX_NAPI )
3877 else if( nrq->cmd == NICE_CMD_GET_RX )
3880 nrq->nrq_rx = pUmDevice->nice_rx;
3881 nrq->nrq_ctx = pUmDevice->nice_ctx;
3884 else if( nrq->cmd == NICE_CMD_GET_SPEED ) {
3885 if(pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE){
3888 else if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) {
3889 nrq->nrq_speed = SPEED_1000;
3890 } else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS) {
3891 nrq->nrq_speed = SPEED_100;
3892 } else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) {
3893 nrq->nrq_speed = SPEED_100;
3900 if (!pUmDevice->opened)
3904 case NICE_CMD_BLINK_LED:
3905 if (LM_BlinkLED(pDevice, nrq->nrq_blink_time) ==
3906 LM_STATUS_SUCCESS) {
3911 case NICE_CMD_DIAG_SUSPEND:
3912 b57_suspend_chip(pUmDevice);
3915 case NICE_CMD_DIAG_RESUME:
3916 b57_resume_chip(pUmDevice);
3919 case NICE_CMD_REG_READ:
3920 if (nrq->nrq_offset >= 0x10000) {
3921 nrq->nrq_data = LM_RegRdInd(pDevice,
3925 nrq->nrq_data = LM_RegRd(pDevice,
3930 case NICE_CMD_REG_WRITE:
3931 if (nrq->nrq_offset >= 0x10000) {
3932 LM_RegWrInd(pDevice, nrq->nrq_offset,
3936 LM_RegWr(pDevice, nrq->nrq_offset,
3937 nrq->nrq_data, FALSE);
3941 case NICE_CMD_REG_READ_DIRECT:
3942 case NICE_CMD_REG_WRITE_DIRECT:
3943 if ((nrq->nrq_offset >= 0x10000) ||
3944 (pDevice->Flags & UNDI_FIX_FLAG)) {
3948 if (nrq->cmd == NICE_CMD_REG_READ_DIRECT) {
3949 nrq->nrq_data = REG_RD_OFFSET(pDevice,
3953 REG_WR_OFFSET(pDevice, nrq->nrq_offset,
3958 case NICE_CMD_MEM_READ:
3959 nrq->nrq_data = LM_MemRdInd(pDevice,
3963 case NICE_CMD_MEM_WRITE:
3964 LM_MemWrInd(pDevice, nrq->nrq_offset,
3968 case NICE_CMD_CFG_READ32:
3969 pci_read_config_dword(pUmDevice->pdev,
3970 nrq->nrq_offset, (u32 *)&nrq->nrq_data);
3973 case NICE_CMD_CFG_READ16:
3974 pci_read_config_word(pUmDevice->pdev,
3975 nrq->nrq_offset, (u16 *)&nrq->nrq_data);
3978 case NICE_CMD_CFG_READ8:
3979 pci_read_config_byte(pUmDevice->pdev,
3980 nrq->nrq_offset, (u8 *)&nrq->nrq_data);
3983 case NICE_CMD_CFG_WRITE32:
3984 pci_write_config_dword(pUmDevice->pdev,
3985 nrq->nrq_offset, (u32)nrq->nrq_data);
3988 case NICE_CMD_CFG_WRITE16:
3989 pci_write_config_word(pUmDevice->pdev,
3990 nrq->nrq_offset, (u16)nrq->nrq_data);
3993 case NICE_CMD_CFG_WRITE8:
3994 pci_write_config_byte(pUmDevice->pdev,
3995 nrq->nrq_offset, (u8)nrq->nrq_data);
3998 case NICE_CMD_RESET:
4002 case NICE_CMD_ENABLE_MAC_LOOPBACK:
4003 if (pDevice->LoopBackMode != 0) {
4007 BCM5700_PHY_LOCK(pUmDevice, flags);
4008 LM_EnableMacLoopBack(pDevice);
4009 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4012 case NICE_CMD_DISABLE_MAC_LOOPBACK:
4013 if (pDevice->LoopBackMode !=
4014 LM_MAC_LOOP_BACK_MODE) {
4018 BCM5700_PHY_LOCK(pUmDevice, flags);
4019 LM_DisableMacLoopBack(pDevice);
4020 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4023 case NICE_CMD_ENABLE_PHY_LOOPBACK:
4024 if (pDevice->LoopBackMode != 0) {
4028 BCM5700_PHY_LOCK(pUmDevice, flags);
4029 LM_EnablePhyLoopBack(pDevice);
4030 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4033 case NICE_CMD_DISABLE_PHY_LOOPBACK:
4034 if (pDevice->LoopBackMode !=
4035 LM_PHY_LOOP_BACK_MODE) {
4039 BCM5700_PHY_LOCK(pUmDevice, flags);
4040 LM_DisablePhyLoopBack(pDevice);
4041 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4044 case NICE_CMD_ENABLE_EXT_LOOPBACK:
4045 if (pDevice->LoopBackMode != 0) {
4049 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
4050 if (nrq->nrq_speed != 1000)
4054 if ((nrq->nrq_speed != 1000) &&
4055 (nrq->nrq_speed != 100) &&
4056 (nrq->nrq_speed != 10)) {
4060 BCM5700_PHY_LOCK(pUmDevice, flags);
4061 LM_EnableExtLoopBack(pDevice, nrq->nrq_speed);
4062 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4065 case NICE_CMD_DISABLE_EXT_LOOPBACK:
4066 if (pDevice->LoopBackMode !=
4067 LM_EXT_LOOP_BACK_MODE) {
4071 BCM5700_PHY_LOCK(pUmDevice, flags);
4072 LM_DisableExtLoopBack(pDevice);
4073 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4076 case NICE_CMD_INTERRUPT_TEST:
4077 nrq->nrq_intr_test_result =
4078 b57_test_intr(pUmDevice);
4081 case NICE_CMD_LOOPBACK_TEST:
4083 switch (nrq->nrq_looptype) {
4084 case NICE_LOOPBACK_TESTTYPE_EXT:
4085 if ((nrq->nrq_loopspeed & ~NICE_LOOPBACK_TEST_SPEEDMASK) ||
4086 !(nrq->nrq_loopspeed & NICE_LOOPBACK_TEST_SPEEDMASK))
4088 switch (nrq->nrq_loopspeed) {
4089 case NICE_LOOPBACK_TEST_10MBPS:
4090 value = LM_LINE_SPEED_10MBPS;
4092 case NICE_LOOPBACK_TEST_100MBPS:
4093 value = LM_LINE_SPEED_100MBPS;
4095 case NICE_LOOPBACK_TEST_1000MBPS:
4096 value = LM_LINE_SPEED_1000MBPS;
4101 case NICE_LOOPBACK_TESTTYPE_MAC:
4102 case NICE_LOOPBACK_TESTTYPE_PHY:
4103 b57_suspend_chip(pUmDevice);
4104 value = b57_test_loopback(pUmDevice,
4105 nrq->nrq_looptype, value);
4106 b57_resume_chip(pUmDevice);
4111 /* A '1' indicates success */
4119 case NICE_CMD_KMALLOC_PHYS: {
4120 #if (LINUX_VERSION_CODE >= 0x020400)
4125 struct page *pg, *last_pg;
4127 for (i = 0; i < MAX_MEM2; i++) {
4128 if (pUmDevice->mem_size_list2[i] == 0)
4133 ptr = pci_alloc_consistent(pUmDevice->pdev,
4134 nrq->nrq_size, &mapping);
4138 pUmDevice->mem_size_list2[i] = nrq->nrq_size;
4139 pUmDevice->mem_list2[i] = ptr;
4140 pUmDevice->dma_list2[i] = mapping;
4142 /* put pci mapping at the beginning of buffer */
4143 *((__u64 *) ptr) = (__u64) mapping;
4145 /* Probably won't work on some architectures */
4146 /* get CPU mapping */
4147 cpu_pa = (__u64) virt_to_phys(ptr);
4148 pUmDevice->cpu_pa_list2[i] = cpu_pa;
4149 nrq->nrq_phys_addr_lo = (__u32) cpu_pa;
4150 nrq->nrq_phys_addr_hi = (__u32) (cpu_pa >> 32);
4152 pg = virt_to_page(ptr);
4153 last_pg = virt_to_page(ptr + nrq->nrq_size - 1);
4155 #if (LINUX_VERSION_CODE > 0x020500)
4156 SetPageReserved(pg);
4158 mem_map_reserve(pg);
4169 case NICE_CMD_KFREE_PHYS: {
4173 cpu_pa = (__u64) nrq->nrq_phys_addr_lo +
4174 ((__u64) nrq->nrq_phys_addr_hi << 32);
4175 for (i = 0; i < MAX_MEM2; i++) {
4176 if (pUmDevice->cpu_pa_list2[i] ==
4185 bcm5700_freemem2(pUmDevice, i);
4189 case NICE_CMD_SET_WRITE_PROTECT:
4190 if (nrq->nrq_write_protect)
4191 pDevice->Flags |= EEPROM_WP_FLAG;
4193 pDevice->Flags &= ~EEPROM_WP_FLAG;
4195 case NICE_CMD_GET_STATS_BLOCK: {
4196 PT3_STATS_BLOCK pStats =
4197 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4198 if (mm_copy_to_user(nrq->nrq_stats_useraddr,
4199 pStats, nrq->nrq_stats_size)) {
4204 case NICE_CMD_CLR_STATS_BLOCK: {
4206 PT3_STATS_BLOCK pStats =
4207 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4209 memset(pStats, 0, sizeof(T3_STATS_BLOCK));
4210 if (T3_ASIC_REV(pDevice->ChipRevId) ==
4214 for(j = 0x0300; j < 0x0b00; j = j + 4) {
4215 MEM_WR_OFFSET(pDevice, j, 0);
4225 #endif /* NICE_SUPPORT */
4228 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
4236 STATIC void bcm5700_do_rx_mode(struct net_device *dev)
4238 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4239 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4241 struct dev_mc_list *mclist;
4243 LM_MulticastClear(pDevice);
4244 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4245 i++, mclist = mclist->next) {
4246 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4248 if (dev->flags & IFF_ALLMULTI) {
4249 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4250 LM_SetReceiveMask(pDevice,
4251 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4254 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4255 LM_SetReceiveMask(pDevice,
4256 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4258 if (dev->flags & IFF_PROMISC) {
4259 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4260 LM_SetReceiveMask(pDevice,
4261 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4264 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4265 LM_SetReceiveMask(pDevice,
4266 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4271 STATIC void bcm5700_set_rx_mode(struct net_device *dev)
4273 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4274 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4276 struct dev_mc_list *mclist;
4277 unsigned long flags;
4279 BCM5700_PHY_LOCK(pUmDevice, flags);
4281 LM_MulticastClear(pDevice);
4282 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4283 i++, mclist = mclist->next) {
4284 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4286 if (dev->flags & IFF_ALLMULTI) {
4287 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4288 LM_SetReceiveMask(pDevice,
4289 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4292 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4293 LM_SetReceiveMask(pDevice,
4294 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4296 if (dev->flags & IFF_PROMISC) {
4297 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4298 LM_SetReceiveMask(pDevice,
4299 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4302 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4303 LM_SetReceiveMask(pDevice,
4304 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4307 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4311 * Set the hardware MAC address.
4313 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p)
4315 struct sockaddr *addr=p;
4316 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) dev->priv;
4317 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4319 if(is_valid_ether_addr(addr->sa_data)){
4321 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
4322 if (pUmDevice->opened)
4323 LM_SetMacAddress(pDevice, dev->dev_addr);
4329 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4330 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu)
4332 int pkt_size = new_mtu + ETHERNET_PACKET_HEADER_SIZE;
4333 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4334 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4335 unsigned long flags;
4338 if ((pkt_size < MIN_ETHERNET_PACKET_SIZE_NO_CRC) ||
4339 (pkt_size > MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC)) {
4343 if ( !(pDevice->Flags & JUMBO_CAPABLE_FLAG) &&
4344 (pkt_size > MAX_ETHERNET_PACKET_SIZE_NO_CRC) ) {
4348 if (pUmDevice->suspended)
4351 if (pUmDevice->opened && (new_mtu != dev->mtu) &&
4352 (pDevice->Flags & JUMBO_CAPABLE_FLAG)) {
4356 BCM5700_PHY_LOCK(pUmDevice, flags);
4358 netif_stop_queue(dev);
4359 bcm5700_shutdown(pUmDevice);
4360 bcm5700_freemem(dev);
4364 if (pkt_size < MAX_ETHERNET_PACKET_SIZE_NO_CRC) {
4365 pDevice->RxMtu = pDevice->TxMtu =
4366 MAX_ETHERNET_PACKET_SIZE_NO_CRC;
4369 pDevice->RxMtu = pDevice->TxMtu = pkt_size;
4372 if (dev->mtu <= 1514) {
4373 pDevice->RxJumboDescCnt = 0;
4375 else if (pDevice->Flags & JUMBO_CAPABLE_FLAG){
4376 pDevice->RxJumboDescCnt =
4377 rx_jumbo_desc_cnt[pUmDevice->index];
4379 pDevice->RxPacketDescCnt = pDevice->RxJumboDescCnt +
4380 pDevice->RxStdDescCnt;
4382 pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ +
4383 COMMON_CACHE_LINE_SIZE-1) & ~COMMON_CACHE_LINE_MASK;
4386 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4387 (dev->mtu > 1514) ) {
4388 if (dev->features & NETIF_F_TSO) {
4389 dev->features &= ~NETIF_F_TSO;
4390 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4396 LM_InitializeAdapter(pDevice);
4397 bcm5700_do_rx_mode(dev);
4398 bcm5700_set_vlan_mode(pUmDevice);
4399 bcm5700_init_counters(pUmDevice);
4400 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
4401 LM_SetMacAddress(pDevice, dev->dev_addr);
4403 netif_start_queue(dev);
4404 bcm5700_intr_on(pUmDevice);
4406 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4413 #if (LINUX_VERSION_CODE < 0x020300)
4415 bcm5700_probe(struct net_device *dev)
4417 int cards_found = 0;
4418 struct pci_dev *pdev = NULL;
4419 struct pci_device_id *pci_tbl;
4422 if ( ! pci_present())
4425 pci_tbl = bcm5700_pci_tbl;
4426 while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
4429 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &ssvid);
4430 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &ssid);
4431 for (idx = 0; pci_tbl[idx].vendor; idx++) {
4432 if ((pci_tbl[idx].vendor == PCI_ANY_ID ||
4433 pci_tbl[idx].vendor == pdev->vendor) &&
4434 (pci_tbl[idx].device == PCI_ANY_ID ||
4435 pci_tbl[idx].device == pdev->device) &&
4436 (pci_tbl[idx].subvendor == PCI_ANY_ID ||
4437 pci_tbl[idx].subvendor == ssvid) &&
4438 (pci_tbl[idx].subdevice == PCI_ANY_ID ||
4439 pci_tbl[idx].subdevice == ssid))
4445 if (pci_tbl[idx].vendor == 0)
4449 if (bcm5700_init_one(pdev, &pci_tbl[idx]) == 0)
4453 return cards_found ? 0 : -ENODEV;
4457 int init_module(void)
4459 return bcm5700_probe(NULL);
4462 void cleanup_module(void)
4464 struct net_device *next_dev;
4465 PUM_DEVICE_BLOCK pUmDevice;
4468 bcm5700_proc_remove_notifier();
4470 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
4471 while (root_tigon3_dev) {
4472 pUmDevice = (PUM_DEVICE_BLOCK)root_tigon3_dev->priv;
4474 bcm5700_proc_remove_dev(root_tigon3_dev);
4476 next_dev = pUmDevice->next_module;
4477 unregister_netdev(root_tigon3_dev);
4478 if (pUmDevice->lm_dev.pMappedMemBase)
4479 iounmap(pUmDevice->lm_dev.pMappedMemBase);
4480 #if (LINUX_VERSION_CODE < 0x020600)
4481 kfree(root_tigon3_dev);
4483 free_netdev(root_tigon3_dev);
4485 root_tigon3_dev = next_dev;
4488 unregister_ioctl32_conversion(SIOCNICE);
4493 #else /* LINUX_VERSION_CODE < 0x020300 */
4495 #if (LINUX_VERSION_CODE >= 0x020406)
4496 static int bcm5700_suspend (struct pci_dev *pdev, u32 state)
4498 static void bcm5700_suspend (struct pci_dev *pdev)
4501 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4502 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4503 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4505 if (!netif_running(dev))
4506 #if (LINUX_VERSION_CODE >= 0x020406)
4512 netif_device_detach (dev);
4513 bcm5700_shutdown(pUmDevice);
4515 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
4517 /* pci_power_off(pdev, -1);*/
4518 #if (LINUX_VERSION_CODE >= 0x020406)
4524 #if (LINUX_VERSION_CODE >= 0x020406)
4525 static int bcm5700_resume(struct pci_dev *pdev)
4527 static void bcm5700_resume(struct pci_dev *pdev)
4530 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4531 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4532 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4534 if (!netif_running(dev))
4535 #if (LINUX_VERSION_CODE >= 0x020406)
4540 /* pci_power_on(pdev);*/
4541 netif_device_attach(dev);
4542 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
4543 MM_InitializeUmPackets(pDevice);
4545 #if (LINUX_VERSION_CODE >= 0x020406)
4551 static struct pci_driver bcm5700_pci_driver = {
4552 name: bcm5700_driver,
4553 id_table: bcm5700_pci_tbl,
4554 probe: bcm5700_init_one,
4555 remove: __devexit_p(bcm5700_remove_one),
4556 suspend: bcm5700_suspend,
4557 resume: bcm5700_resume,
4561 static int __init bcm5700_init_module (void)
4563 return pci_module_init(&bcm5700_pci_driver);
4567 static void __exit bcm5700_cleanup_module (void)
4570 bcm5700_proc_remove_notifier();
4572 pci_unregister_driver(&bcm5700_pci_driver);
4576 module_init(bcm5700_init_module);
4577 module_exit(bcm5700_cleanup_module);
4586 #ifdef BCM_NAPI_RXPOLL
4588 MM_ScheduleRxPoll(LM_DEVICE_BLOCK *pDevice)
4590 struct net_device *dev = ((UM_DEVICE_BLOCK *) pDevice)->dev;
4592 if (netif_rx_schedule_prep(dev)) {
4593 __netif_rx_schedule(dev);
4594 return LM_STATUS_SUCCESS;
4596 return LM_STATUS_FAILURE;
4601 MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4602 LM_UINT16 *pValue16)
4604 UM_DEVICE_BLOCK *pUmDevice;
4606 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4607 pci_read_config_word(pUmDevice->pdev, Offset, (u16 *) pValue16);
4608 return LM_STATUS_SUCCESS;
4612 MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4613 LM_UINT32 *pValue32)
4615 UM_DEVICE_BLOCK *pUmDevice;
4617 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4618 pci_read_config_dword(pUmDevice->pdev, Offset, (u32 *) pValue32);
4619 return LM_STATUS_SUCCESS;
4623 MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4626 UM_DEVICE_BLOCK *pUmDevice;
4628 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4629 pci_write_config_word(pUmDevice->pdev, Offset, Value16);
4630 return LM_STATUS_SUCCESS;
4634 MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4637 UM_DEVICE_BLOCK *pUmDevice;
4639 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4640 pci_write_config_dword(pUmDevice->pdev, Offset, Value32);
4641 return LM_STATUS_SUCCESS;
4645 MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4646 PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
4650 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4653 pvirt = pci_alloc_consistent(pUmDevice->pdev, BlockSize,
4656 return LM_STATUS_FAILURE;
4658 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4659 pUmDevice->dma_list[pUmDevice->mem_list_num] = mapping;
4660 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = BlockSize;
4661 memset(pvirt, 0, BlockSize);
4662 *pMemoryBlockVirt = (PLM_VOID) pvirt;
4663 MM_SetAddr(pMemoryBlockPhy, mapping);
4664 return LM_STATUS_SUCCESS;
4668 MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4669 PLM_VOID *pMemoryBlockVirt)
4672 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4675 /* Maximum in slab.c */
4676 if (BlockSize > 131072) {
4677 goto MM_Alloc_error;
4680 pvirt = kmalloc(BlockSize,GFP_ATOMIC);
4682 goto MM_Alloc_error;
4684 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4685 pUmDevice->dma_list[pUmDevice->mem_list_num] = 0;
4686 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = 0;
4687 /* mem_size_list[i] == 0 indicates that the memory should be freed */
4689 memset(pvirt, 0, BlockSize);
4690 *pMemoryBlockVirt = pvirt;
4691 return LM_STATUS_SUCCESS;
4694 printk(KERN_WARNING "%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice->dev->name);
4695 return LM_STATUS_FAILURE;
4699 MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
4701 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4703 pDevice->pMappedMemBase = ioremap_nocache(
4704 pci_resource_start(pUmDevice->pdev, 0), sizeof(T3_STD_MEM_MAP));
4705 if (pDevice->pMappedMemBase == 0)
4706 return LM_STATUS_FAILURE;
4708 return LM_STATUS_SUCCESS;
4712 MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
4715 struct sk_buff *skb;
4716 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4717 PUM_PACKET pUmPacket;
4720 for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
4721 pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
4722 pUmPacket = (PUM_PACKET) pPacket;
4724 printk(KERN_DEBUG "Bad RxPacketFreeQ\n");
4726 if (pUmPacket->skbuff == 0) {
4727 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2);
4729 pUmPacket->skbuff = 0;
4731 &pUmDevice->rx_out_of_buf_q.Container,
4735 pUmPacket->skbuff = skb;
4736 skb->dev = pUmDevice->dev;
4737 skb_reserve(skb, pUmDevice->rx_buf_align);
4739 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
4741 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
4742 /* reallocate buffers in the ISR */
4743 pUmDevice->rx_buf_repl_thresh = 0;
4744 pUmDevice->rx_buf_repl_panic_thresh = 0;
4745 pUmDevice->rx_buf_repl_isr_limit = 0;
4748 pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 8;
4749 pUmDevice->rx_buf_repl_panic_thresh =
4750 pDevice->RxPacketDescCnt * 7 / 8;
4752 /* This limits the time spent in the ISR when the receiver */
4753 /* is in a steady state of being overrun. */
4754 pUmDevice->rx_buf_repl_isr_limit = pDevice->RxPacketDescCnt / 8;
4756 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4757 if (pDevice->RxJumboDescCnt != 0) {
4758 if (pUmDevice->rx_buf_repl_thresh >=
4759 pDevice->RxJumboDescCnt) {
4761 pUmDevice->rx_buf_repl_thresh =
4762 pUmDevice->rx_buf_repl_panic_thresh =
4763 pDevice->RxJumboDescCnt - 1;
4765 if (pUmDevice->rx_buf_repl_thresh >=
4766 pDevice->RxStdDescCnt) {
4768 pUmDevice->rx_buf_repl_thresh =
4769 pUmDevice->rx_buf_repl_panic_thresh =
4770 pDevice->RxStdDescCnt - 1;
4775 return LM_STATUS_SUCCESS;
4779 MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
4781 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4782 int index = pUmDevice->index;
4783 struct net_device *dev = pUmDevice->dev;
4785 if (index >= MAX_UNITS)
4786 return LM_STATUS_SUCCESS;
4788 #if LINUX_KERNEL_VERSION < 0x0020609
4790 bcm5700_validate_param_range(pUmDevice, &auto_speed[index], "auto_speed",
4792 if (auto_speed[index] == 0)
4793 pDevice->DisableAutoNeg = TRUE;
4795 pDevice->DisableAutoNeg = FALSE;
4797 if (line_speed[index] == 0) {
4798 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4799 pDevice->DisableAutoNeg = FALSE;
4802 bcm5700_validate_param_range(pUmDevice, &full_duplex[index],
4803 "full_duplex", 0, 1, 1);
4804 if (full_duplex[index]) {
4805 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
4808 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_HALF;
4811 if (line_speed[index] == 1000) {
4812 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
4813 if (pDevice->PhyFlags & PHY_NO_GIGABIT) {
4814 pDevice->RequestedLineSpeed =
4815 LM_LINE_SPEED_100MBPS;
4816 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver, index);
4819 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
4820 !full_duplex[index]) {
4821 printk(KERN_WARNING "%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver, index);
4822 pDevice->RequestedDuplexMode =
4823 LM_DUPLEX_MODE_FULL;
4826 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
4827 !auto_speed[index] && !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
4828 printk(KERN_WARNING "%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver, index);
4829 pDevice->DisableAutoNeg = FALSE;
4833 else if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
4834 (pDevice->PhyFlags & PHY_IS_FIBER)){
4835 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4836 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
4837 pDevice->DisableAutoNeg = FALSE;
4838 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver, index, line_speed[index]);
4840 else if (line_speed[index] == 100) {
4842 pDevice->RequestedLineSpeed = LM_LINE_SPEED_100MBPS;
4844 else if (line_speed[index] == 10) {
4846 pDevice->RequestedLineSpeed = LM_LINE_SPEED_10MBPS;
4849 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4850 pDevice->DisableAutoNeg = FALSE;
4851 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver, index, line_speed[index]);
4856 #endif /* LINUX_KERNEL_VERSION */
4858 /* This is an unmanageable switch nic and will have link problems if
4861 if(pDevice->SubsystemVendorId==0x103c && pDevice->SubsystemId==0x3226)
4863 if(pDevice->RequestedLineSpeed != LM_LINE_SPEED_AUTO)
4865 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n",
4866 bcm5700_driver, index, line_speed[index]);
4868 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4869 pDevice->DisableAutoNeg = FALSE;
4872 #if LINUX_KERNEL_VERSION < 0x0020609
4874 pDevice->FlowControlCap = 0;
4875 bcm5700_validate_param_range(pUmDevice, &rx_flow_control[index],
4876 "rx_flow_control", 0, 1, 0);
4877 if (rx_flow_control[index] != 0) {
4878 pDevice->FlowControlCap |= LM_FLOW_CONTROL_RECEIVE_PAUSE;
4880 bcm5700_validate_param_range(pUmDevice, &tx_flow_control[index],
4881 "tx_flow_control", 0, 1, 0);
4882 if (tx_flow_control[index] != 0) {
4883 pDevice->FlowControlCap |= LM_FLOW_CONTROL_TRANSMIT_PAUSE;
4885 bcm5700_validate_param_range(pUmDevice, &auto_flow_control[index],
4886 "auto_flow_control", 0, 1, 0);
4887 if (auto_flow_control[index] != 0) {
4888 if (pDevice->DisableAutoNeg == FALSE) {
4890 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
4891 if ((tx_flow_control[index] == 0) &&
4892 (rx_flow_control[index] == 0)) {
4894 pDevice->FlowControlCap |=
4895 LM_FLOW_CONTROL_TRANSMIT_PAUSE |
4896 LM_FLOW_CONTROL_RECEIVE_PAUSE;
4901 if (dev->mtu > 1500) {
4903 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4904 (dev->features & NETIF_F_TSO)) {
4905 dev->features &= ~NETIF_F_TSO;
4906 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4909 pDevice->RxMtu = dev->mtu + 14;
4912 if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) &&
4913 !(pDevice->Flags & BCM5788_FLAG)) {
4914 pDevice->Flags |= USE_TAGGED_STATUS_FLAG;
4915 pUmDevice->timer_interval = HZ;
4916 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) &&
4917 (pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
4918 pUmDevice->timer_interval = HZ/4;
4922 pUmDevice->timer_interval = HZ/10;
4925 bcm5700_validate_param_range(pUmDevice, &tx_pkt_desc_cnt[index],
4926 "tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT-1, TX_DESC_CNT);
4927 pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[index];
4928 bcm5700_validate_param_range(pUmDevice, &rx_std_desc_cnt[index],
4929 "rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT-1,
4931 pDevice->RxStdDescCnt = rx_std_desc_cnt[index];
4933 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4934 bcm5700_validate_param_range(pUmDevice, &rx_jumbo_desc_cnt[index],
4935 "rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT-1,
4938 if (mtu[index] <= 1514)
4939 pDevice->RxJumboDescCnt = 0;
4940 else if(!T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
4941 pDevice->RxJumboDescCnt = rx_jumbo_desc_cnt[index];
4946 bcm5700_validate_param_range(pUmDevice, &adaptive_coalesce[index],
4947 "adaptive_coalesce", 0, 1, 1);
4948 #ifdef BCM_NAPI_RXPOLL
4949 if (adaptive_coalesce[index]) {
4950 printk(KERN_WARNING "%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver, index);
4951 adaptive_coalesce[index] = 0;
4955 pUmDevice->adaptive_coalesce = adaptive_coalesce[index];
4956 if (!pUmDevice->adaptive_coalesce) {
4957 bcm5700_validate_param_range(pUmDevice,
4958 &rx_coalesce_ticks[index], "rx_coalesce_ticks", 0,
4959 MAX_RX_COALESCING_TICKS, RX_COAL_TK);
4960 if ((rx_coalesce_ticks[index] == 0) &&
4961 (rx_max_coalesce_frames[index] == 0)) {
4963 printk(KERN_WARNING "%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
4964 bcm5700_driver, index, RX_COAL_TK, RX_COAL_FM);
4966 rx_coalesce_ticks[index] = RX_COAL_TK;
4967 rx_max_coalesce_frames[index] = RX_COAL_FM;
4969 pDevice->RxCoalescingTicks = pUmDevice->rx_curr_coalesce_ticks =
4970 rx_coalesce_ticks[index];
4971 #ifdef BCM_NAPI_RXPOLL
4972 pDevice->RxCoalescingTicksDuringInt = rx_coalesce_ticks[index];
4975 bcm5700_validate_param_range(pUmDevice,
4976 &rx_max_coalesce_frames[index],
4977 "rx_max_coalesce_frames", 0,
4978 MAX_RX_MAX_COALESCED_FRAMES, RX_COAL_FM);
4980 pDevice->RxMaxCoalescedFrames =
4981 pUmDevice->rx_curr_coalesce_frames =
4982 rx_max_coalesce_frames[index];
4983 #ifdef BCM_NAPI_RXPOLL
4984 pDevice->RxMaxCoalescedFramesDuringInt =
4985 rx_max_coalesce_frames[index];
4988 bcm5700_validate_param_range(pUmDevice,
4989 &tx_coalesce_ticks[index], "tx_coalesce_ticks", 0,
4990 MAX_TX_COALESCING_TICKS, TX_COAL_TK);
4991 if ((tx_coalesce_ticks[index] == 0) &&
4992 (tx_max_coalesce_frames[index] == 0)) {
4994 printk(KERN_WARNING "%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
4995 bcm5700_driver, index, TX_COAL_TK, TX_COAL_FM);
4997 tx_coalesce_ticks[index] = TX_COAL_TK;
4998 tx_max_coalesce_frames[index] = TX_COAL_FM;
5000 pDevice->TxCoalescingTicks = tx_coalesce_ticks[index];
5001 bcm5700_validate_param_range(pUmDevice,
5002 &tx_max_coalesce_frames[index],
5003 "tx_max_coalesce_frames", 0,
5004 MAX_TX_MAX_COALESCED_FRAMES, TX_COAL_FM);
5005 pDevice->TxMaxCoalescedFrames = tx_max_coalesce_frames[index];
5006 pUmDevice->tx_curr_coalesce_frames =
5007 pDevice->TxMaxCoalescedFrames;
5009 bcm5700_validate_param_range(pUmDevice,
5010 &stats_coalesce_ticks[index], "stats_coalesce_ticks",
5011 0, MAX_STATS_COALESCING_TICKS, ST_COAL_TK);
5012 if (adaptive_coalesce[index]) {
5013 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver, index);
5015 if ((stats_coalesce_ticks[index] > 0) &&
5016 (stats_coalesce_ticks[index] < 100)) {
5017 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver, index, (unsigned int) stats_coalesce_ticks[index]);
5018 stats_coalesce_ticks[index] = 100;
5019 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5020 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5025 pUmDevice->rx_curr_coalesce_frames = RX_COAL_FM;
5026 pUmDevice->rx_curr_coalesce_ticks = RX_COAL_TK;
5027 pUmDevice->tx_curr_coalesce_frames = TX_COAL_FM;
5031 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
5032 unsigned int tmpvar;
5034 tmpvar = pDevice->StatsCoalescingTicks / BCM_TIMER_GRANULARITY;
5037 * If the result is zero, the request is too demanding.
5043 pDevice->StatsCoalescingTicks = tmpvar * BCM_TIMER_GRANULARITY;
5045 pUmDevice->statstimer_interval = tmpvar;
5049 bcm5700_validate_param_range(pUmDevice, &enable_wol[index],
5050 "enable_wol", 0, 1, 0);
5051 if (enable_wol[index]) {
5052 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
5053 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
5056 #ifdef INCLUDE_TBI_SUPPORT
5057 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
5058 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) ||
5059 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) {
5060 /* just poll since we have hardware autoneg. in 5704 */
5061 pDevice->TbiFlags |= TBI_PURE_POLLING_FLAG;
5064 pDevice->TbiFlags |= TBI_POLLING_INTR_FLAG;
5068 bcm5700_validate_param_range(pUmDevice, &scatter_gather[index],
5069 "scatter_gather", 0, 1, 1);
5070 bcm5700_validate_param_range(pUmDevice, &tx_checksum[index],
5071 "tx_checksum", 0, 1, 1);
5072 bcm5700_validate_param_range(pUmDevice, &rx_checksum[index],
5073 "rx_checksum", 0, 1, 1);
5074 if (!(pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
5075 if (tx_checksum[index] || rx_checksum[index]) {
5077 pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
5078 printk(KERN_WARNING "%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver, index);
5082 if (rx_checksum[index]) {
5083 pDevice->TaskToOffload |=
5084 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
5085 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
5087 if (tx_checksum[index]) {
5088 pDevice->TaskToOffload |=
5089 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
5090 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
5091 pDevice->Flags |= NO_TX_PSEUDO_HDR_CSUM_FLAG;
5095 bcm5700_validate_param_range(pUmDevice, &enable_tso[index],
5096 "enable_tso", 0, 1, 1);
5098 /* Always enable TSO firmware if supported */
5099 /* This way we can turn it on or off on the fly */
5100 if (pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TCP_SEGMENTATION)
5102 pDevice->TaskToOffload |=
5103 LM_TASK_OFFLOAD_TCP_SEGMENTATION;
5105 if (enable_tso[index] &&
5106 !(pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION))
5108 printk(KERN_WARNING "%s-%d: TSO not available on this NIC\n", bcm5700_driver, index);
5112 bcm5700_validate_param_range(pUmDevice, &vlan_tag_mode[index],
5113 "vlan_strip_mode", 0, 2, 0);
5114 pUmDevice->vlan_tag_mode = vlan_tag_mode[index];
5116 pUmDevice->vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
5119 #endif /* LINUX_KERNEL_VERSION */
5121 #ifdef BCM_NIC_SEND_BD
5122 bcm5700_validate_param_range(pUmDevice, &nic_tx_bd[index], "nic_tx_bd",
5124 if (nic_tx_bd[index])
5125 pDevice->Flags |= NIC_SEND_BD_FLAG;
5126 if ((pDevice->Flags & ENABLE_PCIX_FIX_FLAG) ||
5127 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5705)) {
5128 if (pDevice->Flags & NIC_SEND_BD_FLAG) {
5129 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
5130 printk(KERN_WARNING "%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver, index);
5134 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5135 bcm5700_validate_param_range(pUmDevice, &disable_msi[pUmDevice->index],
5136 "disable_msi", 0, 1, 0);
5139 bcm5700_validate_param_range(pUmDevice, &delay_link[index],
5140 "delay_link", 0, 1, 0);
5142 bcm5700_validate_param_range(pUmDevice, &disable_d3hot[index],
5143 "disable_d3hot", 0, 1, 0);
5144 if (disable_d3hot[index]) {
5147 if (enable_wol[index]) {
5148 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
5149 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
5150 printk(KERN_WARNING "%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver, index);
5153 pDevice->Flags |= DISABLE_D3HOT_FLAG;
5156 return LM_STATUS_SUCCESS;
5160 MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
5162 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5164 PUM_PACKET pUmPacket;
5165 struct sk_buff *skb;
5167 int vlan_tag_size = 0;
5169 if (pDevice->ReceiveMask & LM_KEEP_VLAN_TAG)
5173 pPacket = (PLM_PACKET)
5174 QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
5177 pUmPacket = (PUM_PACKET) pPacket;
5178 #if ! defined(NO_PCI_UNMAP)
5179 pci_unmap_single(pUmDevice->pdev,
5180 pci_unmap_addr(pUmPacket, map[0]),
5181 pPacket->u.Rx.RxBufferSize,
5182 PCI_DMA_FROMDEVICE);
5184 if ((pPacket->PacketStatus != LM_STATUS_SUCCESS) ||
5185 ((size = pPacket->PacketSize) >
5186 (pDevice->RxMtu + vlan_tag_size))) {
5190 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5192 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5194 pUmDevice->rx_misc_errors++;
5197 skb = pUmPacket->skbuff;
5200 skb->protocol = eth_type_trans(skb, skb->dev);
5201 if (size > pDevice->RxMtu) {
5202 /* Make sure we have a valid VLAN tag */
5203 if (htons(skb->protocol) != 0x8100) {
5204 dev_kfree_skb_irq(skb);
5205 pUmDevice->rx_misc_errors++;
5209 if ((pPacket->Flags & RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD) &&
5210 (pDevice->TaskToOffload &
5211 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM)) {
5212 if (pPacket->u.Rx.TcpUdpChecksum == 0xffff) {
5214 skb->ip_summed = CHECKSUM_UNNECESSARY;
5216 pUmDevice->rx_good_chksum_count++;
5220 skb->ip_summed = CHECKSUM_NONE;
5221 pUmDevice->rx_bad_chksum_count++;
5225 skb->ip_summed = CHECKSUM_NONE;
5228 if( pUmDevice->nice_rx ) {
5229 vlan_tag_t *vlan_tag;
5231 vlan_tag = (vlan_tag_t *) &skb->cb[0];
5232 if (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG) {
5233 vlan_tag->signature = 0x7777;
5234 vlan_tag->tag = pPacket->VlanTag;
5237 vlan_tag->signature = 0;
5239 pUmDevice->nice_rx(skb, pUmDevice->nice_ctx);
5245 if (pUmDevice->vlgrp &&
5246 (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG)) {
5248 #ifdef BCM_NAPI_RXPOLL
5249 vlan_hwaccel_receive_skb(skb, pUmDevice->vlgrp,
5252 vlan_hwaccel_rx(skb, pUmDevice->vlgrp,
5259 #ifdef BCM_NAPI_RXPOLL
5260 netif_receive_skb(skb);
5266 pUmDevice->dev->last_rx = jiffies;
5270 pUmPacket->skbuff = 0;
5271 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5273 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2);
5275 pUmPacket->skbuff = 0;
5276 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5279 pUmPacket->skbuff = skb;
5280 skb->dev = pUmDevice->dev;
5281 skb_reserve(skb, pUmDevice->rx_buf_align);
5282 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5286 return LM_STATUS_SUCCESS;
5290 MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5292 PUM_PACKET pUmPacket = (PUM_PACKET) pPacket;
5293 struct sk_buff *skb = pUmPacket->skbuff;
5294 struct sk_buff *nskb;
5295 #if ! defined(NO_PCI_UNMAP)
5296 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5298 pci_unmap_single(pUmDevice->pdev,
5299 pci_unmap_addr(pUmPacket, map[0]),
5300 pci_unmap_len(pUmPacket, map_len[0]),
5306 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5307 pci_unmap_page(pUmDevice->pdev,
5308 pci_unmap_addr(pUmPacket, map[i + 1]),
5309 pci_unmap_len(pUmPacket, map_len[i + 1]),
5315 if ((nskb = skb_copy(skb, GFP_ATOMIC))) {
5316 pUmPacket->lm_packet.u.Tx.FragCount = 1;
5318 pUmPacket->skbuff = nskb;
5319 return LM_STATUS_SUCCESS;
5322 pUmPacket->skbuff = 0;
5323 return LM_STATUS_FAILURE;
5326 /* Returns 1 if not all buffers are allocated */
5328 replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max)
5331 PUM_PACKET pUmPacket;
5332 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
5333 struct sk_buff *skb;
5338 while ((pUmPacket = (PUM_PACKET)
5339 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) {
5340 pPacket = (PLM_PACKET) pUmPacket;
5341 if (pUmPacket->skbuff) {
5342 /* reuse an old skb */
5343 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5347 if ((skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2)) == 0) {
5348 QQ_PushHead(&pUmDevice->rx_out_of_buf_q.Container,
5353 pUmPacket->skbuff = skb;
5354 skb->dev = pUmDevice->dev;
5355 skb_reserve(skb, pUmDevice->rx_buf_align);
5356 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5360 if (alloc_cnt >= max)
5364 if (queue_rx || pDevice->QueueAgain) {
5365 LM_QueueRxPackets(pDevice);
5371 MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
5373 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5375 PUM_PACKET pUmPacket;
5376 struct sk_buff *skb;
5377 #if ! defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
5382 pPacket = (PLM_PACKET)
5383 QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
5386 pUmPacket = (PUM_PACKET) pPacket;
5387 skb = pUmPacket->skbuff;
5388 #if ! defined(NO_PCI_UNMAP)
5389 pci_unmap_single(pUmDevice->pdev,
5390 pci_unmap_addr(pUmPacket, map[0]),
5391 pci_unmap_len(pUmPacket, map_len[0]),
5394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5395 pci_unmap_page(pUmDevice->pdev,
5396 pci_unmap_addr(pUmPacket, map[i + 1]),
5397 pci_unmap_len(pUmPacket, map_len[i + 1]),
5402 dev_kfree_skb_irq(skb);
5403 pUmPacket->skbuff = 0;
5404 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
5406 if (pUmDevice->tx_full) {
5407 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) >=
5408 (pDevice->TxPacketDescCnt >> 1)) {
5410 pUmDevice->tx_full = 0;
5411 netif_wake_queue(pUmDevice->dev);
5414 return LM_STATUS_SUCCESS;
5418 MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
5420 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5421 struct net_device *dev = pUmDevice->dev;
5422 LM_FLOW_CONTROL flow_control;
5425 if (!pUmDevice->opened)
5426 return LM_STATUS_SUCCESS;
5428 if (!pUmDevice->suspended) {
5429 if (Status == LM_STATUS_LINK_DOWN) {
5430 netif_carrier_off(dev);
5432 else if (Status == LM_STATUS_LINK_ACTIVE) {
5433 netif_carrier_on(dev);
5437 if (pUmDevice->delayed_link_ind > 0) {
5438 pUmDevice->delayed_link_ind = 0;
5439 if (Status == LM_STATUS_LINK_DOWN) {
5440 printk(KERN_ERR "%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name);
5442 else if (Status == LM_STATUS_LINK_ACTIVE) {
5443 printk(KERN_INFO "%s: %s NIC Link is UP, ", bcm5700_driver, dev->name);
5447 if (Status == LM_STATUS_LINK_DOWN) {
5448 printk(KERN_ERR "%s: %s NIC Link is Down\n", bcm5700_driver, dev->name);
5450 else if (Status == LM_STATUS_LINK_ACTIVE) {
5451 printk(KERN_INFO "%s: %s NIC Link is Up, ", bcm5700_driver, dev->name);
5455 if (Status == LM_STATUS_LINK_ACTIVE) {
5456 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
5458 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
5460 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
5463 printk("%d Mbps ", speed);
5465 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
5466 printk("full duplex");
5468 printk("half duplex");
5470 flow_control = pDevice->FlowControl &
5471 (LM_FLOW_CONTROL_RECEIVE_PAUSE |
5472 LM_FLOW_CONTROL_TRANSMIT_PAUSE);
5474 if (flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE) {
5475 printk(", receive ");
5476 if (flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
5477 printk("& transmit ");
5480 printk(", transmit ");
5482 printk("flow control ON");
5486 return LM_STATUS_SUCCESS;
5490 MM_UnmapRxDma(LM_DEVICE_BLOCK *pDevice, LM_PACKET *pPacket)
5492 #if ! defined(NO_PCI_UNMAP)
5493 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5494 UM_PACKET *pUmPacket = (UM_PACKET *) pPacket;
5496 if (!pUmPacket->skbuff)
5499 pci_unmap_single(pUmDevice->pdev,
5500 pci_unmap_addr(pUmPacket, map[0]),
5501 pPacket->u.Rx.RxBufferSize,
5502 PCI_DMA_FROMDEVICE);
5507 MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5509 PUM_PACKET pUmPacket;
5510 struct sk_buff *skb;
5513 return LM_STATUS_SUCCESS;
5514 pUmPacket = (PUM_PACKET) pPacket;
5515 if ((skb = pUmPacket->skbuff)) {
5516 /* DMA address already unmapped */
5519 pUmPacket->skbuff = 0;
5520 return LM_STATUS_SUCCESS;
5524 MM_Sleep(LM_DEVICE_BLOCK *pDevice, LM_UINT32 msec)
5526 current->state = TASK_INTERRUPTIBLE;
5527 if (schedule_timeout(HZ * msec / 1000) != 0) {
5528 return LM_STATUS_FAILURE;
5530 if (signal_pending(current))
5531 return LM_STATUS_FAILURE;
5533 return LM_STATUS_SUCCESS;
5537 bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice)
5539 LM_DEVICE_BLOCK *pDevice = (LM_DEVICE_BLOCK *) pUmDevice;
5541 bcm5700_intr_off(pUmDevice);
5542 netif_carrier_off(pUmDevice->dev);
5544 tasklet_kill(&pUmDevice->tasklet);
5546 bcm5700_poll_wait(pUmDevice);
5550 pDevice->InitDone = 0;
5551 bcm5700_free_remaining_rx_bufs(pUmDevice);
5555 bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice)
5557 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
5558 UM_PACKET *pUmPacket;
5561 cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
5562 for (i = 0; i < cnt; i++) {
5564 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container))
5567 MM_UnmapRxDma(pDevice, (LM_PACKET *) pUmPacket);
5568 MM_FreeRxBuffer(pDevice, &pUmPacket->lm_packet);
5569 QQ_PushTail(&pDevice->RxPacketFreeQ.Container,
5576 bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
5577 char *param_name, int min, int max, int deflt)
5579 if (((unsigned int) *param < (unsigned int) min) ||
5580 ((unsigned int) *param > (unsigned int) max)) {
5582 printk(KERN_WARNING "%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver, pUmDevice->index, param_name, (unsigned int) *param, (unsigned int) deflt);
5588 bcm5700_find_peer(struct net_device *dev)
5590 struct net_device *tmp_dev;
5591 UM_DEVICE_BLOCK *pUmDevice, *pUmTmp;
5592 LM_DEVICE_BLOCK *pDevice;
5595 pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
5596 pDevice = &pUmDevice->lm_dev;
5597 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
5598 tmp_dev = root_tigon3_dev;
5600 pUmTmp = (PUM_DEVICE_BLOCK) tmp_dev->priv;
5601 if ((tmp_dev != dev) &&
5602 (pUmDevice->pdev->bus->number ==
5603 pUmTmp->pdev->bus->number) &&
5604 PCI_SLOT(pUmDevice->pdev->devfn) ==
5605 PCI_SLOT(pUmTmp->pdev->devfn)) {
5609 tmp_dev = pUmTmp->next_module;
5616 MM_FindPeerDev(LM_DEVICE_BLOCK *pDevice)
5618 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5619 struct net_device *dev = pUmDevice->dev;
5620 struct net_device *peer_dev;
5622 peer_dev = bcm5700_find_peer(dev);
5625 return ((LM_DEVICE_BLOCK *) peer_dev->priv);
5628 int MM_FindCapability(LM_DEVICE_BLOCK *pDevice, int capability)
5630 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5631 return (pci_find_capability(pUmDevice->pdev, capability));
5634 #if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
5636 poll_bcm5700(struct net_device *dev)
5638 UM_DEVICE_BLOCK *pUmDevice = dev->priv;
5640 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
5642 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5643 #ifdef BCM_NAPI_RXPOLL
5644 if (dev->poll_list.prev) {
5647 bcm5700_poll(dev, &budget);
5654 disable_irq(pUmDevice->pdev->irq);
5655 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5656 enable_irq(pUmDevice->pdev->irq);