1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2005 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
14 char bcm5700_driver[] = "bcm5700";
15 char bcm5700_version[] = "8.3.14a";
16 char bcm5700_date[] = "(11/2/05)";
21 /* A few user-configurable values. */
24 /* Used to pass the full-duplex flag, etc. */
25 static int line_speed[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
26 static int auto_speed[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
27 static int full_duplex[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
28 static int rx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
29 static int tx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
30 static int auto_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
31 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
32 static int mtu[MAX_UNITS] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500}; /* Jumbo MTU for interfaces. */
34 static int tx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
35 static int rx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
36 static int scatter_gather[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
38 #define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
39 static unsigned int tx_pkt_desc_cnt[MAX_UNITS] =
40 {TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
41 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
42 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
45 #define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
46 static unsigned int rx_std_desc_cnt[MAX_UNITS] =
47 {RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
48 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
49 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
52 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
53 #define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
54 static unsigned int rx_jumbo_desc_cnt[MAX_UNITS] =
55 {JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
56 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
57 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
62 #ifdef BCM_NAPI_RXPOLL
63 static unsigned int adaptive_coalesce[MAX_UNITS] =
64 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
66 static unsigned int adaptive_coalesce[MAX_UNITS] =
67 {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
70 #define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
71 static unsigned int rx_coalesce_ticks[MAX_UNITS] =
72 {RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
73 RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
74 RX_COAL_TK,RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
77 #define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
78 static unsigned int rx_max_coalesce_frames[MAX_UNITS] =
79 {RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
80 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
81 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
84 #define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
85 static unsigned int tx_coalesce_ticks[MAX_UNITS] =
86 {TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
87 TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
88 TX_COAL_TK,TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
91 #define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
92 static unsigned int tx_max_coalesce_frames[MAX_UNITS] =
93 {TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
94 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
95 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
98 #define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
99 static unsigned int stats_coalesce_ticks[MAX_UNITS] =
100 {ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
101 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
102 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
107 static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
110 static int enable_tso[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
112 #ifdef BCM_NIC_SEND_BD
113 static int nic_tx_bd[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
116 static int vlan_tag_mode[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
118 static int delay_link[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
119 static int disable_d3hot[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
121 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
122 static int disable_msi[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
123 static int bcm_msi_chipset_bug = 0;
126 #define BCM_TIMER_GRANULARITY (1000000 / HZ)
128 /* Operational parameters that usually are not changed. */
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT (2*HZ)
132 #if (LINUX_VERSION_CODE < 0x02030d)
133 #define pci_resource_start(dev, bar) (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
134 #elif (LINUX_VERSION_CODE < 0x02032b)
135 #define pci_resource_start(dev, bar) (dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
138 #if (LINUX_VERSION_CODE < 0x02032b)
139 #define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
140 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
141 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
143 static inline void netif_start_queue(struct net_device *dev)
150 #define netif_queue_stopped(dev) dev->tbusy
151 #define netif_running(dev) dev->start
153 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
155 queue_task(tasklet, &tq_immediate);
156 mark_bh(IMMEDIATE_BH);
159 static inline void tasklet_init(struct tasklet_struct *tasklet,
160 void (*func)(unsigned long),
163 tasklet->next = NULL;
165 tasklet->routine = (void (*)(void *))func;
166 tasklet->data = (void *)data;
169 #define tasklet_kill(tasklet)
173 #if (LINUX_VERSION_CODE < 0x020300)
174 struct pci_device_id {
175 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
176 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
177 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
178 unsigned long driver_data; /* Data private to the driver */
183 #define pci_set_drvdata(pdev, dev)
184 #define pci_get_drvdata(pdev) 0
186 #define pci_enable_device(pdev) 0
188 #define __devinit __init
189 #define __devinitdata __initdata
192 #define SET_MODULE_OWNER(dev)
193 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
197 #if (LINUX_VERSION_CODE < 0x020411)
199 #define __devexit_p(x) x
203 #ifndef MODULE_LICENSE
204 #define MODULE_LICENSE(license)
208 typedef void irqreturn_t;
209 #define IRQ_RETVAL(x)
212 #if (LINUX_VERSION_CODE < 0x02032a)
213 static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
214 dma_addr_t *dma_handle)
218 /* Maximum in slab.c */
222 virt_ptr = kmalloc(size, GFP_KERNEL);
223 *dma_handle = virt_to_bus(virt_ptr);
226 #define pci_free_consistent(dev, size, ptr, dma_ptr) kfree(ptr)
228 #endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
231 #if (LINUX_VERSION_CODE < 0x02040d)
233 #if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
235 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
236 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
239 /* pci_set_dma_mask is using dma_addr_t */
241 #define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
242 #define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
246 #else /* (LINUX_VERSION_CODE < 0x02040d) */
248 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
249 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
252 #if (LINUX_VERSION_CODE < 0x020329)
253 #define pci_set_dma_mask(pdev, mask) (0)
255 #if (LINUX_VERSION_CODE < 0x020403)
257 pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
259 if(! pci_dma_supported(dev, mask))
262 dev->dma_mask = mask;
269 #if (LINUX_VERSION_CODE < 0x020547)
270 #define pci_set_consistent_dma_mask(pdev, mask) (0)
273 #if (LINUX_VERSION_CODE < 0x020402)
274 #define pci_request_regions(pdev, name) (0)
275 #define pci_release_regions(pdev)
278 #if ! defined(spin_is_locked)
279 #define spin_is_locked(lock) (test_bit(0,(lock)))
282 #define BCM5700_LOCK(pUmDevice, flags) \
283 if ((pUmDevice)->do_global_lock) { \
284 spin_lock_irqsave(&(pUmDevice)->global_lock, flags); \
287 #define BCM5700_UNLOCK(pUmDevice, flags) \
288 if ((pUmDevice)->do_global_lock) { \
289 spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
293 bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice)
295 if (pUmDevice->do_global_lock) {
296 spin_lock(&pUmDevice->global_lock);
301 bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice)
303 if (pUmDevice->do_global_lock) {
304 spin_unlock(&pUmDevice->global_lock);
309 bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice)
311 atomic_inc(&pUmDevice->intr_sem);
312 LM_DisableInterrupt(&pUmDevice->lm_dev);
313 #if (LINUX_VERSION_CODE >= 0x2051c)
314 synchronize_irq(pUmDevice->dev->irq);
318 LM_DisableInterrupt(&pUmDevice->lm_dev);
322 bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice)
324 if (atomic_dec_and_test(&pUmDevice->intr_sem)) {
325 LM_EnableInterrupt(&pUmDevice->lm_dev);
330 * Broadcom NIC Extension support
341 #endif /* NICE_SUPPORT */
343 int MM_Packet_Desc_Size = sizeof(UM_PACKET);
346 MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
347 MODULE_DESCRIPTION("BCM5700 Driver");
348 MODULE_LICENSE("GPL");
350 #if (LINUX_VERSION_CODE < 0x020605)
352 MODULE_PARM(debug, "i");
353 MODULE_PARM(line_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
354 MODULE_PARM(auto_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
355 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
356 MODULE_PARM(rx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
357 MODULE_PARM(tx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
358 MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
359 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
360 MODULE_PARM(mtu, "1-" __MODULE_STRING(MAX_UNITS) "i");
362 MODULE_PARM(tx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
363 MODULE_PARM(rx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
364 MODULE_PARM(scatter_gather, "1-" __MODULE_STRING(MAX_UNITS) "i");
365 MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
366 MODULE_PARM(rx_std_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
367 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
368 MODULE_PARM(rx_jumbo_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
371 MODULE_PARM(adaptive_coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
372 MODULE_PARM(rx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
373 MODULE_PARM(rx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
374 MODULE_PARM(tx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
375 MODULE_PARM(tx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
376 MODULE_PARM(stats_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
379 MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i");
382 MODULE_PARM(enable_tso, "1-" __MODULE_STRING(MAX_UNITS) "i");
384 #ifdef BCM_NIC_SEND_BD
385 MODULE_PARM(nic_tx_bd, "1-" __MODULE_STRING(MAX_UNITS) "i");
388 MODULE_PARM(vlan_tag_mode, "1-" __MODULE_STRING(MAX_UNITS) "i");
390 MODULE_PARM(delay_link, "1-" __MODULE_STRING(MAX_UNITS) "i");
391 MODULE_PARM(disable_d3hot, "1-" __MODULE_STRING(MAX_UNITS) "i");
393 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
394 MODULE_PARM(disable_msi, "1-" __MODULE_STRING(MAX_UNITS) "i");
399 #if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
407 #if (LINUX_VERSION_CODE >= 0x2060a)
413 module_param_array(line_speed, int, numvar, 0);
414 module_param_array(auto_speed, int, numvar, 0);
415 module_param_array(full_duplex, int, numvar, 0);
416 module_param_array(rx_flow_control, int, numvar, 0);
417 module_param_array(tx_flow_control, int, numvar, 0);
418 module_param_array(auto_flow_control, int, numvar, 0);
419 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
420 module_param_array(mtu, int, numvar, 0);
422 module_param_array(tx_checksum, int, numvar, 0);
423 module_param_array(rx_checksum, int, numvar, 0);
424 module_param_array(scatter_gather, int, numvar, 0);
425 module_param_array(tx_pkt_desc_cnt, int, numvar, 0);
426 module_param_array(rx_std_desc_cnt, int, numvar, 0);
427 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
428 module_param_array(rx_jumbo_desc_cnt, int, numvar, 0);
431 module_param_array(adaptive_coalesce, int, numvar, 0);
432 module_param_array(rx_coalesce_ticks, int, numvar, 0);
433 module_param_array(rx_max_coalesce_frames, int, numvar, 0);
434 module_param_array(tx_coalesce_ticks, int, numvar, 0);
435 module_param_array(tx_max_coalesce_frames, int, numvar, 0);
436 module_param_array(stats_coalesce_ticks, int, numvar, 0);
439 module_param_array(enable_wol, int, numvar, 0);
442 module_param_array(enable_tso, int, numvar, 0);
444 #ifdef BCM_NIC_SEND_BD
445 module_param_array(nic_tx_bd, int, numvar, 0);
448 module_param_array(vlan_tag_mode, int, numvar, 0);
450 module_param_array(delay_link, int, numvar, 0);
451 module_param_array(disable_d3hot, int, numvar, 0);
453 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
454 module_param_array(disable_msi, int, numvar, 0);
463 #define RUN_AT(x) (jiffies + (x))
465 char kernel_version[] = UTS_RELEASE;
467 #define PCI_SUPPORT_VER2
469 #if ! defined(CAP_NET_ADMIN)
470 #define capable(CAP_XXX) (suser())
473 #define tigon3_debug debug
475 static int tigon3_debug = TIGON3_DEBUG;
477 static int tigon3_debug = 0;
481 int bcm5700_open(struct net_device *dev);
482 STATIC void bcm5700_timer(unsigned long data);
483 STATIC void bcm5700_stats_timer(unsigned long data);
484 STATIC void bcm5700_reset(struct net_device *dev);
485 STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev);
486 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
488 STATIC void bcm5700_tasklet(unsigned long data);
490 STATIC int bcm5700_close(struct net_device *dev);
491 STATIC struct net_device_stats *bcm5700_get_stats(struct net_device *dev);
492 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
493 STATIC void bcm5700_do_rx_mode(struct net_device *dev);
494 STATIC void bcm5700_set_rx_mode(struct net_device *dev);
495 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p);
496 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
497 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu);
499 #ifdef BCM_NAPI_RXPOLL
500 STATIC int bcm5700_poll(struct net_device *dev, int *budget);
502 STATIC int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max);
503 STATIC int bcm5700_freemem(struct net_device *dev);
505 STATIC int bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index);
508 #ifndef BCM_NAPI_RXPOLL
509 STATIC int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice);
512 STATIC void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice);
513 STATIC int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice);
515 STATIC void bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
516 STATIC void bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid);
518 void bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice);
519 void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice);
520 void bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
521 char *param_name, int min, int max, int deflt);
523 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
524 STATIC void poll_bcm5700(struct net_device *dev);
527 /* A list of all installed bcm5700 devices. */
528 static struct net_device *root_tigon3_dev = NULL;
530 #if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
533 #if (LINUX_VERSION_CODE < 0x20500)
534 extern int register_ioctl32_conversion(unsigned int cmd,
535 int (*handler)(unsigned int, unsigned int, unsigned long,
537 int unregister_ioctl32_conversion(unsigned int cmd);
539 #include <linux/ioctl32.h>
542 #define BCM_IOCTL32 1
544 atomic_t bcm5700_load_count = ATOMIC_INIT(0);
547 bcm5700_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg,
551 struct net_device *tmp_dev = root_tigon3_dev;
553 struct nice_req* nrq;
554 struct ifreq_nice32 {
562 if (!capable(CAP_NET_ADMIN))
565 if (mm_copy_from_user(&nrq32, (char *) arg, 32))
568 memcpy(rq.ifr_name, nrq32.ifnr_name, 16);
570 nrq = (struct nice_req*) &rq.ifr_ifru;
571 nrq->cmd = nrq32.cmd;
572 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK) {
573 nrq->nrq_stats_useraddr = (void *) ((__u64) nrq32.nrq1);
574 nrq->nrq_stats_size = nrq32.nrq2;
577 memcpy(&nrq->nrq_speed, &nrq32.nrq1, 12);
580 if (strcmp(rq.ifr_name, tmp_dev->name) == 0) {
581 ret = bcm5700_ioctl(tmp_dev, &rq, cmd);
583 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK)
586 memcpy(&nrq32.nrq1, &nrq->nrq_speed, 12);
587 if (mm_copy_to_user((char *) arg, &nrq32, 32))
592 tmp_dev = ((UM_DEVICE_BLOCK *)(tmp_dev->priv))->next_module;
596 #endif /* NICE_SUPPORT */
600 #ifdef MEFHACK_NOTFORPLANETLAB
649 #ifdef MEFHACK_NOTFORPLANETLAB
682 /* indexed by board_t, above */
685 } board_info[] __devinitdata = {
686 #ifdef MEFHACK_NOTFORPLANETLAB
687 { "Broadcom BCM5700 1000Base-T" },
688 { "Broadcom BCM5700 1000Base-SX" },
689 { "Broadcom BCM5700 1000Base-SX" },
690 { "Broadcom BCM5700 1000Base-T" },
691 { "Broadcom BCM5700" },
692 { "Broadcom BCM5701 1000Base-T" },
693 { "Broadcom BCM5701 1000Base-T" },
694 { "Broadcom BCM5701 1000Base-T" },
695 { "Broadcom BCM5701 1000Base-SX" },
696 { "Broadcom BCM5701 1000Base-T" },
697 { "Broadcom BCM5701 1000Base-T" },
698 { "Broadcom BCM5701" },
699 { "Broadcom BCM5702 1000Base-T" },
700 { "Broadcom BCM5703 1000Base-T" },
701 { "Broadcom BCM5703 1000Base-SX" },
702 { "Broadcom B5703 1000Base-SX" },
703 { "3Com 3C996 10/100/1000 Server NIC" },
704 { "3Com 3C996 10/100/1000 Server NIC" },
705 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
706 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
707 { "3Com 3C996B Gigabit Server NIC" },
708 { "3Com 3C997 Gigabit Server NIC" },
709 { "3Com 3C997 Gigabit Fiber-SX Server NIC" },
710 { "3Com 3C1000 Gigabit NIC" },
711 { "3Com 3C1000B-T 10/100/1000 PCI" },
712 { "3Com 3C940 Gigabit LOM (21X21)" },
713 { "3Com 3C942 Gigabit LOM (31X31)" },
714 { "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
715 { "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
716 { "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
718 { "HP NC6770 Gigabit Server Adapter" },
719 { "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
720 { "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
721 { "HP NC7760 Gigabit Server Adapter" },
722 { "HP NC7761 Gigabit Server Adapter" },
723 { "HP NC7770 Gigabit Server Adapter" },
724 { "HP NC7771 Gigabit Server Adapter" },
725 { "HP NC7780 Gigabit Server Adapter" },
726 { "HP NC7781 Gigabit Server Adapter" },
727 { "HP NC7772 Gigabit Server Adapter" },
728 { "HP NC7782 Gigabit Server Adapter" },
729 { "HP NC7783 Gigabit Server Adapter" },
730 { "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
731 { "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
732 { "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
733 { "HP NC324i Integrated Dual Port PCI Express Gigabit Server Adapter" },
734 { "HP NC326i Integrated Dual Port PCI Express Gigabit Server Adapter" },
735 #ifdef MEFHACK_NOTFORPLANETLAB
736 { "Broadcom BCM5704 CIOB-E 1000Base-T" },
737 { "Broadcom BCM5704 1000Base-T" },
738 { "Broadcom BCM5704 1000Base-SX" },
739 { "Broadcom BCM5705 1000Base-T" },
740 { "Broadcom BCM5705M 1000Base-T" },
741 { "Broadcom 570x 10/100 Integrated Controller" },
742 { "Broadcom BCM5901 100Base-TX" },
743 { "Broadcom NetXtreme Gigabit Ethernet for hp" },
744 { "Broadcom BCM5788 NetLink 1000Base-T" },
745 { "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
746 { "Broadcom BCM5750 1000Base-T PCI" },
747 { "Broadcom BCM5750M 1000Base-T PCI" },
748 { "Broadcom BCM5720 1000Base-T PCI" },
749 { "Broadcom BCM5751 1000Base-T PCI Express" },
750 { "Broadcom BCM5751M 1000Base-T PCI Express" },
751 { "Broadcom BCM5751F 100Base-TX PCI Express" },
752 { "Broadcom BCM5721 1000Base-T PCI Express" },
753 { "Broadcom BCM5753 1000Base-T PCI Express" },
754 { "Broadcom BCM5753M 1000Base-T PCI Express" },
755 { "Broadcom BCM5753F 100Base-TX PCI Express" },
756 { "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
757 { "Broadcom BCM5752 1000Base-T PCI Express" },
758 { "Broadcom BCM5752M 1000Base-T PCI Express" },
759 { "Broadcom BCM5714 1000Base-T " },
760 { "Broadcom BCM5780 1000Base-T" },
761 { "Broadcom BCM5780S 1000Base-SX" },
762 { "Broadcom BCM5715 1000Base-T " },
763 { "Broadcom BCM5903M Gigabit Ethernet " },
768 static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = {
769 #ifdef MEFHACK_NOTFORPLANETLAB
770 {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 },
771 {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 },
772 {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 },
773 {0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9 },
774 {0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700 },
775 {0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700 },
776 {0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700 },
777 {0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700 },
778 {0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T },
779 {0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST },
780 {0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX },
781 {0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T },
782 {0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX },
783 {0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01 },
784 {0x14e4, 0x1644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5700 },
785 {0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5 },
786 {0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1 },
787 {0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8 },
788 {0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7 },
789 {0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10 },
790 {0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12 },
792 {0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770 },
793 {0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770 },
794 {0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780 },
795 #ifdef MEFHACK_NOTFORPLANETLAB
796 {0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701 },
797 {0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX },
798 {0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT },
799 {0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T },
800 {0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01 },
801 {0x14e4, 0x1645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5701 },
802 {0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702 },
803 {0x14e4, 0x1646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
804 {0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702 },
805 {0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702 },
806 {0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760 },
807 {0x14e4, 0x16a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
808 {0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT },
809 {0x14e4, 0x16c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
810 {0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703 },
811 {0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
812 {0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703 },
813 {0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703 },
814 {0x14e4, 0x1647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
815 {0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703 },
816 {0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
817 {0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703 },
818 {0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703 },
820 {0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771 },
821 {0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781 },
822 #ifdef MEFHACK_NOTFORPLANETLAB
823 {0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE },
824 {0x14e4, 0x16a7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
825 {0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
827 {0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771 },
828 {0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781 },
829 #ifdef MEFHACK_NOTFORPLANETLAB
830 {0x14e4, 0x16c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
832 {0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772 },
833 {0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782 },
834 {0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783 },
835 #ifdef MEFHACK_NOTFORPLANETLAB
836 {0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T },
837 {0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T },
838 {0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE },
839 {0x14e4, 0x1648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704 },
840 {0x14e4, 0x1649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
841 {0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S },
842 {0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX },
843 {0x14e4, 0x16a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
845 {0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761 },
846 #ifdef MEFHACK_NOTFORPLANETLAB
847 {0x14e4, 0x1653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
849 {0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761 },
850 {0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020 },
851 {0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T },
852 #ifdef MEFHACK_NOTFORPLANETLAB
853 {0x14e4, 0x1654, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
854 {0x14e4, 0x165d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
855 {0x14e4, 0x165e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
856 {0x14e4, 0x166e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705F },
857 {0x14e4, 0x1696, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5782 },
858 {0x14e4, 0x169c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5788 },
859 {0x14e4, 0x169d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5789 },
860 {0x14e4, 0x170d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
861 {0x14e4, 0x170e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
862 {0x14e4, 0x1676, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750 },
863 {0x14e4, 0x167c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750M },
864 {0x14e4, 0x1677, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751 },
865 {0x14e4, 0x167d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751M },
866 {0x14e4, 0x167e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751F },
867 {0x14e4, 0x1658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5720 },
869 {0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T },
870 {0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T },
871 {0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I },
872 {0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I },
873 {0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I },
874 {0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I },
875 {0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I },
876 {0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I },
877 #ifdef MEFHACK_NOTFORPLANETLAB
878 {0x14e4, 0x1659, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5721 },
879 {0x14e4, 0x16f7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753 },
880 {0x14e4, 0x16fd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753M },
881 {0x14e4, 0x16fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753F },
882 {0x14e4, 0x16dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5781 },
883 {0x14e4, 0x1600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752 },
884 {0x14e4, 0x1601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752M },
885 {0x14e4, 0x1668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714 },
886 {0x14e4, 0x166a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780 },
887 {0x14e4, 0x166b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780S },
888 {0x14e4, 0x1678, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715 },
889 {0x14e4, 0x16ff, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5903M },
894 MODULE_DEVICE_TABLE(pci, bcm5700_pci_tbl);
897 extern int bcm5700_proc_create(void);
898 extern int bcm5700_proc_create_dev(struct net_device *dev);
899 extern int bcm5700_proc_remove_dev(struct net_device *dev);
900 extern int bcm5700_proc_remove_notifier(void);
903 #if (LINUX_VERSION_CODE >= 0x2060a)
904 static struct pci_device_id pci_AMD762id[]={
905 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
906 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
911 /*******************************************************************************
912 *******************************************************************************
915 int get_csum_flag(LM_UINT32 ChipRevId)
917 return NETIF_F_IP_CSUM;
920 /*******************************************************************************
921 *******************************************************************************
923 This function returns true if the device passed to it is attached to an
924 ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
925 or newer, it returns false.
927 This function determines which bridge it is attached to by scaning the pci
928 bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
929 the bridge's subordinate's secondary bus number is compared with this
930 devices bus number. If they match, then the device is attached to this
931 bridge. The bridge's device id is compared to a list of known device ids for
932 ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
933 chip revision must also be checked to determine if the chip is older than an
936 To scan the bus, one of two functions is used depending on the kernel
937 version. For 2.4 kernels, the pci_find_device function is used. This
938 function has been depricated in the 2.6 kernel and replaced with the
939 fucntion pci_get_device. The macro walk_pci_bus determines which function to
940 use when the driver is built.
943 #if (LINUX_VERSION_CODE >= 0x2060a)
944 #define walk_pci_bus(d) while ((d = pci_get_device( \
945 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
947 #define unwalk_pci_bus(d) pci_dev_put(d)
950 #define walk_pci_bus(d) while ((d = pci_find_device( \
951 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
952 #define unwalk_pci_bus(d)
956 #define ICH5_CHIP_VERSION 0xc0
958 static struct pci_device_id pci_ICHtable[] = {
959 {0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8 */
960 {0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8 */
961 {0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6 */
962 {0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
966 int attached_to_ICH4_or_older( struct pci_dev *pdev)
968 struct pci_dev *tmp_pdev = NULL;
969 struct pci_device_id *ich_table;
972 walk_pci_bus (tmp_pdev) {
973 if ((tmp_pdev->hdr_type == 1) &&
974 (tmp_pdev->subordinate != NULL) &&
975 (tmp_pdev->subordinate->secondary == pdev->bus->number)) {
977 ich_table = pci_ICHtable;
979 while (ich_table->vendor) {
980 if ((ich_table->vendor == tmp_pdev->vendor) &&
981 (ich_table->device == tmp_pdev->device)) {
983 pci_read_config_byte( tmp_pdev,
984 PCI_REVISION_ID, &chip_rev);
986 if (chip_rev < ICH5_CHIP_VERSION) {
987 unwalk_pci_bus( tmp_pdev);
998 static int __devinit bcm5700_init_board(struct pci_dev *pdev,
999 struct net_device **dev_out,
1002 struct net_device *dev;
1003 PUM_DEVICE_BLOCK pUmDevice;
1004 PLM_DEVICE_BLOCK pDevice;
1009 /* dev zeroed in init_etherdev */
1010 #if (LINUX_VERSION_CODE >= 0x20600)
1011 dev = alloc_etherdev(sizeof(*pUmDevice));
1013 dev = init_etherdev(NULL, sizeof(*pUmDevice));
1016 printk (KERN_ERR "%s: unable to alloc new ethernet\n",
1020 SET_MODULE_OWNER(dev);
1021 #if (LINUX_VERSION_CODE >= 0x20600)
1022 SET_NETDEV_DEV(dev, &pdev->dev);
1024 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1026 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1027 rc = pci_enable_device (pdev);
1031 rc = pci_request_regions(pdev, bcm5700_driver);
1035 pci_set_master(pdev);
1037 if (pci_set_dma_mask(pdev, BCM_64BIT_DMA_MASK) == 0) {
1038 pUmDevice->using_dac = 1;
1039 if (pci_set_consistent_dma_mask(pdev, BCM_64BIT_DMA_MASK) != 0)
1041 printk(KERN_ERR "pci_set_consistent_dma_mask failed\n");
1042 pci_release_regions(pdev);
1046 else if (pci_set_dma_mask(pdev, BCM_32BIT_DMA_MASK) == 0) {
1047 pUmDevice->using_dac = 0;
1050 printk(KERN_ERR "System does not support DMA\n");
1051 pci_release_regions(pdev);
1055 pUmDevice->dev = dev;
1056 pUmDevice->pdev = pdev;
1057 pUmDevice->mem_list_num = 0;
1058 pUmDevice->next_module = root_tigon3_dev;
1059 pUmDevice->index = board_idx;
1060 root_tigon3_dev = dev;
1062 spin_lock_init(&pUmDevice->global_lock);
1064 spin_lock_init(&pUmDevice->undi_lock);
1066 spin_lock_init(&pUmDevice->phy_lock);
1068 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1070 pDevice->FunctNum = PCI_FUNC(pUmDevice->pdev->devfn);
1072 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1073 if (board_idx < MAX_UNITS) {
1074 bcm5700_validate_param_range(pUmDevice, &mtu[board_idx], "mtu", 1500, 9000, 1500);
1075 dev->mtu = mtu[board_idx];
1078 /* If we're attached to an ICH4 or older, we may need to implement a
1079 workaround for special cycles described in the BCM5704/357 Errata.
1080 This workaround is only need on 5703-A1/2 or 5704-A0 chips that
1081 are attached to a PCI-X bus. The NIC chip type and bus are checked
1082 later in the driver and the flag cleared if the workaround is not
1083 needed. The workaround is enabled by setting the flag UNDI_FIX_FLAG
1084 which casues the driver to use indirect pci-config cycles when
1085 accessing the low-priority mailboxes (MB_REG_WR/RD).
1088 if (attached_to_ICH4_or_older( pdev)) {
1089 pDevice->Flags |= UNDI_FIX_FLAG;
1092 #if (LINUX_VERSION_CODE >= 0x2060a)
1093 if(pci_dev_present(pci_AMD762id)){
1094 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1095 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1098 if (pci_find_device(0x1022, 0x700c, NULL)) {
1099 /* AMD762 writes I/O out of order */
1100 /* Setting bit 1 in 762's register 0x4C still doesn't work */
1102 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1103 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1106 if (LM_GetAdapterInfo(pDevice) != LM_STATUS_SUCCESS) {
1111 if ( (pDevice->Flags & JUMBO_CAPABLE_FLAG) == 0 ) {
1112 if (dev->mtu > 1500) {
1114 printk(KERN_WARNING "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n", bcm5700_driver, pUmDevice->index);
1118 pUmDevice->do_global_lock = 0;
1119 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
1120 /* The 5700 chip works best without interleaved register */
1121 /* accesses on certain machines. */
1122 pUmDevice->do_global_lock = 1;
1125 if ((T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5701) &&
1126 ((pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) == 0)) {
1128 pUmDevice->rx_buf_align = 0;
1131 pUmDevice->rx_buf_align = 2;
1133 dev->mem_start = pci_resource_start(pdev, 0);
1134 dev->mem_end = dev->mem_start + sizeof(T3_STD_MEM_MAP);
1135 dev->irq = pdev->irq;
1141 pci_release_regions(pdev);
1142 bcm5700_freemem(dev);
1145 #if (LINUX_VERSION_CODE < 0x020600)
1146 unregister_netdev(dev);
1154 static int __devinit
1155 bcm5700_print_ver(void)
1157 printk(KERN_INFO "Broadcom Gigabit Ethernet Driver %s ",
1160 printk("with Broadcom NIC Extension (NICE) ");
1162 printk("ver. %s %s\n", bcm5700_version, bcm5700_date);
1166 static int __devinit
1167 bcm5700_init_one(struct pci_dev *pdev,
1168 const struct pci_device_id *ent)
1170 struct net_device *dev = NULL;
1171 PUM_DEVICE_BLOCK pUmDevice;
1172 PLM_DEVICE_BLOCK pDevice;
1174 static int board_idx = -1;
1175 static int printed_version = 0;
1176 struct pci_dev *pci_dev;
1180 if (!printed_version) {
1181 bcm5700_print_ver();
1183 bcm5700_proc_create();
1185 printed_version = 1;
1188 i = bcm5700_init_board(pdev, &dev, board_idx);
1197 if (atomic_read(&bcm5700_load_count) == 0) {
1198 register_ioctl32_conversion(SIOCNICE, bcm5700_ioctl32);
1200 atomic_inc(&bcm5700_load_count);
1202 dev->open = bcm5700_open;
1203 dev->hard_start_xmit = bcm5700_start_xmit;
1204 dev->stop = bcm5700_close;
1205 dev->get_stats = bcm5700_get_stats;
1206 dev->set_multicast_list = bcm5700_set_rx_mode;
1207 dev->do_ioctl = bcm5700_ioctl;
1208 dev->set_mac_address = &bcm5700_set_mac_addr;
1209 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1210 dev->change_mtu = &bcm5700_change_mtu;
1212 #if (LINUX_VERSION_CODE >= 0x20400)
1213 dev->tx_timeout = bcm5700_reset;
1214 dev->watchdog_timeo = TX_TIMEOUT;
1217 dev->vlan_rx_register = &bcm5700_vlan_rx_register;
1218 dev->vlan_rx_kill_vid = &bcm5700_vlan_rx_kill_vid;
1220 #ifdef BCM_NAPI_RXPOLL
1221 dev->poll = bcm5700_poll;
1225 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1226 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1228 dev->base_addr = pci_resource_start(pdev, 0);
1229 dev->irq = pdev->irq;
1230 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1231 dev->poll_controller = poll_bcm5700;
1234 #if (LINUX_VERSION_CODE >= 0x20600)
1235 if ((i = register_netdev(dev))) {
1236 printk(KERN_ERR "%s: Cannot register net device\n",
1238 if (pUmDevice->lm_dev.pMappedMemBase)
1239 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1240 pci_release_regions(pdev);
1241 bcm5700_freemem(dev);
1248 pci_set_drvdata(pdev, dev);
1250 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1251 pUmDevice->name = board_info[ent->driver_data].name,
1252 printk(KERN_INFO "%s: %s found at mem %lx, IRQ %d, ",
1253 dev->name, pUmDevice->name, dev->base_addr,
1255 printk("node addr ");
1256 for (i = 0; i < 6; i++) {
1257 printk("%2.2x", dev->dev_addr[i]);
1261 printk(KERN_INFO "%s: ", dev->name);
1262 if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5400_PHY_ID)
1263 printk("Broadcom BCM5400 Copper ");
1264 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID)
1265 printk("Broadcom BCM5401 Copper ");
1266 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5411_PHY_ID)
1267 printk("Broadcom BCM5411 Copper ");
1268 else if (((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5701_PHY_ID) &&
1269 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
1270 printk("Broadcom BCM5701 Integrated Copper ");
1272 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5703_PHY_ID) {
1273 printk("Broadcom BCM5703 Integrated ");
1274 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1279 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) {
1280 printk("Broadcom BCM5704 Integrated ");
1281 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1286 else if (pDevice->PhyFlags & PHY_IS_FIBER){
1287 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5780_PHY_ID)
1288 printk("Broadcom BCM5780S Integrated Serdes ");
1291 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID)
1292 printk("Broadcom BCM5705 Integrated Copper ");
1293 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5750_PHY_ID)
1294 printk("Broadcom BCM5750 Integrated Copper ");
1296 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5714_PHY_ID)
1297 printk("Broadcom BCM5714 Integrated Copper ");
1298 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5780_PHY_ID)
1299 printk("Broadcom BCM5780 Integrated Copper ");
1301 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5752_PHY_ID)
1302 printk("Broadcom BCM5752 Integrated Copper ");
1303 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID)
1304 printk("Broadcom BCM8002 SerDes ");
1305 else if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
1306 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1307 printk("Broadcom BCM5703 Integrated SerDes ");
1309 else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1310 printk("Broadcom BCM5704 Integrated SerDes ");
1313 printk("Agilent HDMP-1636 SerDes ");
1319 printk("transceiver found\n");
1321 #if (LINUX_VERSION_CODE >= 0x20400)
1322 if (scatter_gather[board_idx]) {
1323 dev->features |= NETIF_F_SG;
1324 if (pUmDevice->using_dac && !(pDevice->Flags & BCM5788_FLAG))
1325 dev->features |= NETIF_F_HIGHDMA;
1327 if ((pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM) &&
1328 tx_checksum[board_idx]) {
1330 dev->features |= get_csum_flag( pDevice->ChipRevId);
1333 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1336 /* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1337 the same time. Since only one of these features can be enable at a
1338 time, we'll enable only Jumbo Frames and disable TSO when the user
1339 tries to enable both.
1341 dev->features &= ~NETIF_F_TSO;
1343 if ((pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION) &&
1344 (enable_tso[board_idx])) {
1345 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
1346 (dev->mtu > 1500)) {
1347 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
1349 dev->features |= NETIF_F_TSO;
1353 printk(KERN_INFO "%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1355 (char *) ((dev->features & NETIF_F_SG) ? "ON" : "OFF"),
1356 (char *) ((dev->features & NETIF_F_HIGHDMA) ? "ON" : "OFF"),
1357 (char *) ((dev->features & get_csum_flag( pDevice->ChipRevId)) ? "ON" : "OFF"));
1359 if ((pDevice->ChipRevId != T3_CHIP_ID_5700_B0) &&
1360 rx_checksum[board_idx])
1361 printk("Rx Checksum ON");
1363 printk("Rx Checksum OFF");
1365 printk(", 802.1Q VLAN ON");
1368 if (dev->features & NETIF_F_TSO) {
1373 #ifdef BCM_NAPI_RXPOLL
1374 printk(", NAPI ON");
1379 bcm5700_proc_create_dev(dev);
1382 tasklet_init(&pUmDevice->tasklet, bcm5700_tasklet,
1383 (unsigned long) pUmDevice);
1385 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1386 if ((REG_RD(pDevice, PciCfg.DualMacCtrl) &
1387 T3_DUAL_MAC_CH_CTRL_MASK) == 3) {
1389 printk(KERN_WARNING "%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev->name);
1393 #if (LINUX_VERSION_CODE > 0x20605)
1395 if ((pci_dev = pci_get_device(0x1022, 0x700c, NULL))) {
1397 if ((pci_dev = pci_find_device(0x1022, 0x700c, NULL))) {
1401 /* Found AMD 762 North bridge */
1402 pci_read_config_dword(pci_dev, 0x4c, &val);
1403 if ((val & 0x02) == 0) {
1404 pci_write_config_dword(pci_dev, 0x4c, val | 0x02);
1405 printk(KERN_INFO "%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver);
1409 #if (LINUX_VERSION_CODE > 0x20605)
1411 pci_dev_put(pci_dev);
1413 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1415 if ((pci_dev = pci_get_device(0x1066, 0x0017, NULL))) {
1416 bcm_msi_chipset_bug = 1;
1418 pci_dev_put(pci_dev);
1426 static void __devexit
1427 bcm5700_remove_one (struct pci_dev *pdev)
1429 struct net_device *dev = pci_get_drvdata (pdev);
1430 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1433 bcm5700_proc_remove_dev(dev);
1436 atomic_dec(&bcm5700_load_count);
1437 if (atomic_read(&bcm5700_load_count) == 0)
1438 unregister_ioctl32_conversion(SIOCNICE);
1440 unregister_netdev(dev);
1442 if (pUmDevice->lm_dev.pMappedMemBase)
1443 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1445 pci_release_regions(pdev);
1447 #if (LINUX_VERSION_CODE < 0x020600)
1453 pci_set_drvdata(pdev, NULL);
1457 int b57_test_intr(UM_DEVICE_BLOCK *pUmDevice);
1460 bcm5700_open(struct net_device *dev)
1462 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1463 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1466 if (pUmDevice->suspended){
1469 /* delay for 6 seconds */
1470 pUmDevice->delayed_link_ind = (6 * HZ) / pUmDevice->timer_interval;
1473 #ifndef BCM_NAPI_RXPOLL
1474 pUmDevice->adaptive_expiry = HZ / pUmDevice->timer_interval;
1478 #ifdef INCLUDE_TBI_SUPPORT
1479 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
1480 (pDevice->TbiFlags & TBI_POLLING_FLAGS)) {
1481 pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval;
1482 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1483 pUmDevice->poll_tbi_interval /= 4;
1485 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1488 /* set this timer for 2 seconds */
1489 pUmDevice->asf_heartbeat = (2 * HZ) / pUmDevice->timer_interval;
1491 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1494 if ( ( (T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ) &&
1495 (T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5714_A0 ) &&
1496 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_AX ) &&
1497 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_BX ) ) &&
1498 !bcm_msi_chipset_bug ){
1500 if (disable_msi[pUmDevice->index]==1){
1501 /* do nothing-it's not turned on */
1503 pDevice->Flags |= USING_MSI_FLAG;
1505 REG_WR(pDevice, Msi.Mode, 2 );
1507 rc = pci_enable_msi(pUmDevice->pdev);
1510 pDevice->Flags &= ~ USING_MSI_FLAG;
1511 REG_WR(pDevice, Msi.Mode, 1 );
1519 if ((rc= request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt, SA_SHIRQ, dev->name, dev)))
1522 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1524 if(pDevice->Flags & USING_MSI_FLAG) {
1526 pci_disable_msi(pUmDevice->pdev);
1527 pDevice->Flags &= ~USING_MSI_FLAG;
1528 REG_WR(pDevice, Msi.Mode, 1 );
1535 pUmDevice->opened = 1;
1536 if (LM_InitializeAdapter(pDevice) != LM_STATUS_SUCCESS) {
1537 pUmDevice->opened = 0;
1538 free_irq(dev->irq, dev);
1539 bcm5700_freemem(dev);
1543 bcm5700_set_vlan_mode(pUmDevice);
1544 bcm5700_init_counters(pUmDevice);
1546 if (pDevice->Flags & UNDI_FIX_FLAG) {
1547 printk(KERN_INFO "%s: Using indirect register access\n", dev->name);
1550 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6))
1552 /* Do not use invalid eth addrs: any multicast & all zeros */
1553 if( is_valid_ether_addr(dev->dev_addr) ){
1554 LM_SetMacAddress(pDevice, dev->dev_addr);
1558 printk(KERN_INFO "%s: Invalid administered node address\n",dev->name);
1559 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1563 if (tigon3_debug > 1)
1564 printk(KERN_DEBUG "%s: tigon3_open() irq %d.\n", dev->name, dev->irq);
1566 QQ_InitQueue(&pUmDevice->rx_out_of_buf_q.Container,
1567 MAX_RX_PACKET_DESC_COUNT);
1570 #if (LINUX_VERSION_CODE < 0x020300)
1574 atomic_set(&pUmDevice->intr_sem, 0);
1576 LM_EnableInterrupt(pDevice);
1578 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1580 if (pDevice->Flags & USING_MSI_FLAG){
1582 /* int test to check support on older machines */
1583 if (b57_test_intr(pUmDevice) != 1) {
1585 LM_DisableInterrupt(pDevice);
1586 free_irq(pUmDevice->pdev->irq, dev);
1587 pci_disable_msi(pUmDevice->pdev);
1588 REG_WR(pDevice, Msi.Mode, 1 );
1589 pDevice->Flags &= ~USING_MSI_FLAG;
1591 rc = LM_ResetAdapter(pDevice);
1592 printk(KERN_ALERT " The MSI support in this system is not functional.\n");
1594 if (rc == LM_STATUS_SUCCESS)
1600 rc = request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt,
1601 SA_SHIRQ, dev->name, dev);
1606 bcm5700_freemem(dev);
1607 pUmDevice->opened = 0;
1612 pDevice->InitDone = TRUE;
1613 atomic_set(&pUmDevice->intr_sem, 0);
1614 LM_EnableInterrupt(pDevice);
1619 init_timer(&pUmDevice->timer);
1620 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1621 pUmDevice->timer.data = (unsigned long)dev;
1622 pUmDevice->timer.function = &bcm5700_timer;
1623 add_timer(&pUmDevice->timer);
1625 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
1626 init_timer(&pUmDevice->statstimer);
1627 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1628 pUmDevice->statstimer.data = (unsigned long)dev;
1629 pUmDevice->statstimer.function = &bcm5700_stats_timer;
1630 add_timer(&pUmDevice->statstimer);
1633 if(pDevice->Flags & USING_MSI_FLAG)
1634 printk(KERN_INFO "%s: Using Message Signaled Interrupt (MSI) \n", dev->name);
1636 printk(KERN_INFO "%s: Using PCI INTX interrupt \n", dev->name);
1638 netif_start_queue(dev);
1645 bcm5700_stats_timer(unsigned long data)
1647 struct net_device *dev = (struct net_device *)data;
1648 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1649 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1650 unsigned long flags = 0;
1652 if (!pUmDevice->opened)
1655 if (!atomic_read(&pUmDevice->intr_sem) &&
1656 !pUmDevice->suspended &&
1657 (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE)) {
1658 BCM5700_LOCK(pUmDevice, flags);
1659 LM_GetStats(pDevice);
1660 BCM5700_UNLOCK(pUmDevice, flags);
1663 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1665 add_timer(&pUmDevice->statstimer);
1670 bcm5700_timer(unsigned long data)
1672 struct net_device *dev = (struct net_device *)data;
1673 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1674 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1675 unsigned long flags = 0;
1678 if (!pUmDevice->opened)
1681 if (atomic_read(&pUmDevice->intr_sem) || pUmDevice->suspended) {
1682 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1683 add_timer(&pUmDevice->timer);
1687 #ifdef INCLUDE_TBI_SUPPORT
1688 if ((pDevice->TbiFlags & TBI_POLLING_FLAGS) &&
1689 (--pUmDevice->poll_tbi_expiry <= 0)) {
1691 BCM5700_PHY_LOCK(pUmDevice, flags);
1692 value32 = REG_RD(pDevice, MacCtrl.Status);
1693 if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) &&
1694 ((value32 & (MAC_STATUS_LINK_STATE_CHANGED |
1695 MAC_STATUS_CFG_CHANGED)) ||
1696 !(value32 & MAC_STATUS_PCS_SYNCED)))
1698 ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
1699 (value32 & (MAC_STATUS_PCS_SYNCED |
1700 MAC_STATUS_SIGNAL_DETECTED))))
1702 LM_SetupPhy(pDevice);
1704 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1705 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1710 if (pUmDevice->delayed_link_ind > 0) {
1711 if (pUmDevice->delayed_link_ind == 1)
1712 MM_IndicateStatus(pDevice, pDevice->LinkStatus);
1714 pUmDevice->delayed_link_ind--;
1717 if (pUmDevice->crc_counter_expiry > 0)
1718 pUmDevice->crc_counter_expiry--;
1720 if (!pUmDevice->interrupt) {
1721 if (!(pDevice->Flags & USE_TAGGED_STATUS_FLAG)) {
1722 BCM5700_LOCK(pUmDevice, flags);
1723 if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) {
1724 /* This will generate an interrupt */
1725 REG_WR(pDevice, Grc.LocalCtrl,
1726 pDevice->GrcLocalCtrl |
1727 GRC_MISC_LOCAL_CTRL_SET_INT);
1730 REG_WR(pDevice, HostCoalesce.Mode,
1731 pDevice->CoalesceMode |
1732 HOST_COALESCE_ENABLE |
1735 if (!(REG_RD(pDevice, DmaWrite.Mode) &
1736 DMA_WRITE_MODE_ENABLE)) {
1737 BCM5700_UNLOCK(pUmDevice, flags);
1741 BCM5700_UNLOCK(pUmDevice, flags);
1743 if (pUmDevice->tx_queued) {
1744 pUmDevice->tx_queued = 0;
1745 netif_wake_queue(dev);
1748 #if (LINUX_VERSION_CODE < 0x02032b)
1749 if ((QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) !=
1750 pDevice->TxPacketDescCnt) &&
1751 ((jiffies - dev->trans_start) > TX_TIMEOUT)) {
1753 printk(KERN_WARNING "%s: Tx hung\n", dev->name);
1759 #ifndef BCM_NAPI_RXPOLL
1760 if (pUmDevice->adaptive_coalesce) {
1761 pUmDevice->adaptive_expiry--;
1762 if (pUmDevice->adaptive_expiry == 0) {
1763 pUmDevice->adaptive_expiry = HZ /
1764 pUmDevice->timer_interval;
1765 bcm5700_adapt_coalesce(pUmDevice);
1770 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) >
1771 (unsigned int) pUmDevice->rx_buf_repl_panic_thresh) {
1772 /* Generate interrupt and let isr allocate buffers */
1773 REG_WR(pDevice, HostCoalesce.Mode, pDevice->CoalesceMode |
1774 HOST_COALESCE_ENABLE | HOST_COALESCE_NOW);
1778 if (pDevice->AsfFlags & ASF_ENABLED) {
1779 pUmDevice->asf_heartbeat--;
1780 if (pUmDevice->asf_heartbeat == 0) {
1781 if( (pDevice->Flags & UNDI_FIX_FLAG) ||
1782 (pDevice->Flags & ENABLE_PCIX_FIX_FLAG)) {
1783 MEM_WR_OFFSET(pDevice, T3_CMD_MAILBOX,
1784 T3_CMD_NICDRV_ALIVE2);
1785 MEM_WR_OFFSET(pDevice, T3_CMD_LENGTH_MAILBOX,
1787 MEM_WR_OFFSET(pDevice, T3_CMD_DATA_MAILBOX, 5);
1790 (T3_NIC_MBUF_POOL_ADDR +
1792 T3_CMD_NICDRV_ALIVE2, 1);
1794 (T3_NIC_MBUF_POOL_ADDR +
1795 T3_CMD_LENGTH_MAILBOX),4,1);
1797 (T3_NIC_MBUF_POOL_ADDR +
1798 T3_CMD_DATA_MAILBOX),5,1);
1801 value32 = REG_RD(pDevice, Grc.RxCpuEvent);
1802 REG_WR(pDevice, Grc.RxCpuEvent, value32 | BIT_14);
1803 pUmDevice->asf_heartbeat = (2 * HZ) /
1804 pUmDevice->timer_interval;
1809 if (pDevice->PhyFlags & PHY_IS_FIBER){
1810 BCM5700_PHY_LOCK(pUmDevice, flags);
1811 LM_5714_FamFiberCheckLink(pDevice);
1812 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1815 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1816 add_timer(&pUmDevice->timer);
1820 bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice)
1823 #ifndef BCM_NAPI_RXPOLL
1824 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1826 pUmDevice->rx_curr_coalesce_frames = pDevice->RxMaxCoalescedFrames;
1827 pUmDevice->rx_curr_coalesce_ticks = pDevice->RxCoalescingTicks;
1828 pUmDevice->tx_curr_coalesce_frames = pDevice->TxMaxCoalescedFrames;
1829 pUmDevice->rx_last_cnt = 0;
1830 pUmDevice->tx_last_cnt = 0;
1833 pUmDevice->phy_crc_count = 0;
1835 pUmDevice->tx_zc_count = 0;
1836 pUmDevice->tx_chksum_count = 0;
1837 pUmDevice->tx_himem_count = 0;
1838 pUmDevice->rx_good_chksum_count = 0;
1839 pUmDevice->rx_bad_chksum_count = 0;
1842 pUmDevice->tso_pkt_count = 0;
1848 #ifndef BCM_NAPI_RXPOLL
1850 bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice,
1851 int rx_frames, int rx_ticks, int tx_frames, int rx_frames_intr)
1853 unsigned long flags = 0;
1854 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1856 if (pUmDevice->do_global_lock) {
1857 if (spin_is_locked(&pUmDevice->global_lock))
1859 spin_lock_irqsave(&pUmDevice->global_lock, flags);
1861 pUmDevice->rx_curr_coalesce_frames = rx_frames;
1862 pUmDevice->rx_curr_coalesce_ticks = rx_ticks;
1863 pUmDevice->tx_curr_coalesce_frames = tx_frames;
1864 pUmDevice->rx_curr_coalesce_frames_intr = rx_frames_intr;
1865 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFrames, rx_frames);
1867 REG_WR(pDevice, HostCoalesce.RxCoalescingTicks, rx_ticks);
1869 REG_WR(pDevice, HostCoalesce.TxMaxCoalescedFrames, tx_frames);
1871 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt,
1874 BCM5700_UNLOCK(pUmDevice, flags);
1879 bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice)
1881 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
1882 uint rx_curr_cnt, tx_curr_cnt, rx_delta, tx_delta, total_delta;
1884 rx_curr_cnt = pDevice->pStatsBlkVirt->ifHCInUcastPkts.Low;
1885 tx_curr_cnt = pDevice->pStatsBlkVirt->ifHCOutUcastPkts.Low;
1886 if ((rx_curr_cnt <= pUmDevice->rx_last_cnt) ||
1887 (tx_curr_cnt < pUmDevice->tx_last_cnt)) {
1889 /* skip if there is counter rollover */
1890 pUmDevice->rx_last_cnt = rx_curr_cnt;
1891 pUmDevice->tx_last_cnt = tx_curr_cnt;
1895 rx_delta = rx_curr_cnt - pUmDevice->rx_last_cnt;
1896 tx_delta = tx_curr_cnt - pUmDevice->tx_last_cnt;
1897 total_delta = (((rx_delta + rx_delta) + tx_delta) / 3) << 1;
1899 pUmDevice->rx_last_cnt = rx_curr_cnt;
1900 pUmDevice->tx_last_cnt = tx_curr_cnt;
1902 if (total_delta < ADAPTIVE_LO_PKT_THRESH) {
1903 if (pUmDevice->rx_curr_coalesce_frames !=
1904 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES) {
1906 bcm5700_do_adapt_coalesce(pUmDevice,
1907 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES,
1908 ADAPTIVE_LO_RX_COALESCING_TICKS,
1909 ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES,
1910 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT);
1913 else if (total_delta < ADAPTIVE_HI_PKT_THRESH) {
1914 if (pUmDevice->rx_curr_coalesce_frames !=
1915 DEFAULT_RX_MAX_COALESCED_FRAMES) {
1917 bcm5700_do_adapt_coalesce(pUmDevice,
1918 DEFAULT_RX_MAX_COALESCED_FRAMES,
1919 DEFAULT_RX_COALESCING_TICKS,
1920 DEFAULT_TX_MAX_COALESCED_FRAMES,
1921 DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT);
1925 if (pUmDevice->rx_curr_coalesce_frames !=
1926 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES) {
1928 bcm5700_do_adapt_coalesce(pUmDevice,
1929 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES,
1930 ADAPTIVE_HI_RX_COALESCING_TICKS,
1931 ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES,
1932 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT);
1941 bcm5700_reset(struct net_device *dev)
1943 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1944 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1945 unsigned long flags;
1949 if( (dev->features & NETIF_F_TSO) &&
1950 (pUmDevice->tx_full) ) {
1952 dev->features &= ~NETIF_F_TSO;
1956 netif_stop_queue(dev);
1957 bcm5700_intr_off(pUmDevice);
1958 BCM5700_PHY_LOCK(pUmDevice, flags);
1959 LM_ResetAdapter(pDevice);
1960 pDevice->InitDone = TRUE;
1961 bcm5700_do_rx_mode(dev);
1962 bcm5700_set_vlan_mode(pUmDevice);
1963 bcm5700_init_counters(pUmDevice);
1964 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
1965 LM_SetMacAddress(pDevice, dev->dev_addr);
1967 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1968 atomic_set(&pUmDevice->intr_sem, 1);
1969 bcm5700_intr_on(pUmDevice);
1970 netif_wake_queue(dev);
1974 bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice)
1976 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1977 LM_UINT32 ReceiveMask = pDevice->ReceiveMask;
1978 int vlan_tag_mode = pUmDevice->vlan_tag_mode;
1980 if (vlan_tag_mode == VLAN_TAG_MODE_AUTO_STRIP) {
1981 if (pDevice->AsfFlags & ASF_ENABLED) {
1982 vlan_tag_mode = VLAN_TAG_MODE_FORCED_STRIP;
1985 vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
1988 if (vlan_tag_mode == VLAN_TAG_MODE_NORMAL_STRIP) {
1989 ReceiveMask |= LM_KEEP_VLAN_TAG;
1991 if (pUmDevice->vlgrp)
1992 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
1995 if (pUmDevice->nice_rx)
1996 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
1999 else if (vlan_tag_mode == VLAN_TAG_MODE_FORCED_STRIP) {
2000 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2002 if (ReceiveMask != pDevice->ReceiveMask)
2004 LM_SetReceiveMask(pDevice, ReceiveMask);
2009 bcm5700_poll_wait(UM_DEVICE_BLOCK *pUmDevice)
2011 #ifdef BCM_NAPI_RXPOLL
2012 while (pUmDevice->lm_dev.RxPoll) {
2013 current->state = TASK_INTERRUPTIBLE;
2014 schedule_timeout(1);
2022 bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
2024 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2026 bcm5700_intr_off(pUmDevice);
2027 bcm5700_poll_wait(pUmDevice);
2028 pUmDevice->vlgrp = vlgrp;
2029 bcm5700_set_vlan_mode(pUmDevice);
2030 bcm5700_intr_on(pUmDevice);
2034 bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
2036 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2038 bcm5700_intr_off(pUmDevice);
2039 bcm5700_poll_wait(pUmDevice);
2040 if (pUmDevice->vlgrp) {
2041 pUmDevice->vlgrp->vlan_devices[vid] = NULL;
2043 bcm5700_intr_on(pUmDevice);
2048 bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2050 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2051 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2053 PUM_PACKET pUmPacket;
2054 unsigned long flags = 0;
2057 vlan_tag_t *vlan_tag;
2061 uint16_t ip_tcp_len, tcp_opt_len, tcp_seg_flags;
2064 if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) ||
2065 !pDevice->InitDone || pUmDevice->suspended)
2071 #if (LINUX_VERSION_CODE < 0x02032b)
2072 if (test_and_set_bit(0, &dev->tbusy)) {
2077 if (pUmDevice->do_global_lock && pUmDevice->interrupt) {
2078 netif_stop_queue(dev);
2079 pUmDevice->tx_queued = 1;
2080 if (!pUmDevice->interrupt) {
2081 netif_wake_queue(dev);
2082 pUmDevice->tx_queued = 0;
2087 pPacket = (PLM_PACKET)
2088 QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
2090 netif_stop_queue(dev);
2091 pUmDevice->tx_full = 1;
2092 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container)) {
2093 netif_wake_queue(dev);
2094 pUmDevice->tx_full = 0;
2098 pUmPacket = (PUM_PACKET) pPacket;
2099 pUmPacket->skbuff = skb;
2101 if (skb->ip_summed == CHECKSUM_HW) {
2102 pPacket->Flags = SND_BD_FLAG_TCP_UDP_CKSUM;
2104 pUmDevice->tx_chksum_count++;
2111 frag_no = skb_shinfo(skb)->nr_frags;
2115 if (atomic_read(&pDevice->SendBdLeft) < (frag_no + 1)) {
2116 netif_stop_queue(dev);
2117 pUmDevice->tx_full = 1;
2118 QQ_PushHead(&pDevice->TxPacketFreeQ.Container, pPacket);
2119 if (atomic_read(&pDevice->SendBdLeft) >= (frag_no + 1)) {
2120 netif_wake_queue(dev);
2121 pUmDevice->tx_full = 0;
2126 pPacket->u.Tx.FragCount = frag_no + 1;
2128 if (pPacket->u.Tx.FragCount > 1)
2129 pUmDevice->tx_zc_count++;
2133 if (pUmDevice->vlgrp && vlan_tx_tag_present(skb)) {
2134 pPacket->VlanTag = vlan_tx_tag_get(skb);
2135 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2139 vlan_tag = (vlan_tag_t *) &skb->cb[0];
2140 if (vlan_tag->signature == 0x5555) {
2141 pPacket->VlanTag = vlan_tag->tag;
2142 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2143 vlan_tag->signature = 0;
2148 if ((mss = (LM_UINT32) skb_shinfo(skb)->tso_size) &&
2149 (skb->len > pDevice->TxMtu)) {
2151 #if (LINUX_VERSION_CODE >= 0x02060c)
2153 if (skb_header_cloned(skb) &&
2154 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2160 pUmDevice->tso_pkt_count++;
2162 pPacket->Flags |= SND_BD_FLAG_CPU_PRE_DMA |
2163 SND_BD_FLAG_CPU_POST_DMA;
2166 if (skb->h.th->doff > 5) {
2167 tcp_opt_len = (skb->h.th->doff - 5) << 2;
2169 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
2170 skb->nh.iph->check = 0;
2172 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2173 skb->h.th->check = 0;
2174 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2177 skb->h.th->check = ~csum_tcpudp_magic(
2178 skb->nh.iph->saddr, skb->nh.iph->daddr,
2182 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2185 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
2186 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2188 ((skb->nh.iph->ihl - 5) +
2189 (tcp_opt_len >> 2)) << 11;
2193 ((skb->nh.iph->ihl - 5) +
2194 (tcp_opt_len >> 2)) << 12;
2197 pPacket->u.Tx.MaxSegmentSize = mss | tcp_seg_flags;
2201 pPacket->u.Tx.MaxSegmentSize = 0;
2204 BCM5700_LOCK(pUmDevice, flags);
2205 LM_SendPacket(pDevice, pPacket);
2206 BCM5700_UNLOCK(pUmDevice, flags);
2208 #if (LINUX_VERSION_CODE < 0x02032b)
2209 netif_wake_queue(dev);
2211 dev->trans_start = jiffies;
2217 #ifdef BCM_NAPI_RXPOLL
2219 bcm5700_poll(struct net_device *dev, int *budget)
2221 int orig_budget = *budget;
2223 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
2224 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2225 unsigned long flags = 0;
2228 if (orig_budget > dev->quota)
2229 orig_budget = dev->quota;
2231 BCM5700_LOCK(pUmDevice, flags);
2232 work_done = LM_ServiceRxPoll(pDevice, orig_budget);
2233 *budget -= work_done;
2234 dev->quota -= work_done;
2236 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2237 replenish_rx_buffers(pUmDevice, 0);
2239 BCM5700_UNLOCK(pUmDevice, flags);
2241 MM_IndicateRxPackets(pDevice);
2242 BCM5700_LOCK(pUmDevice, flags);
2243 LM_QueueRxPackets(pDevice);
2244 BCM5700_UNLOCK(pUmDevice, flags);
2246 if ((work_done < orig_budget) || atomic_read(&pUmDevice->intr_sem) ||
2247 pUmDevice->suspended) {
2249 netif_rx_complete(dev);
2250 BCM5700_LOCK(pUmDevice, flags);
2251 REG_WR(pDevice, Grc.Mode, pDevice->GrcMode);
2252 pDevice->RxPoll = FALSE;
2253 if (pDevice->RxPoll) {
2254 BCM5700_UNLOCK(pUmDevice, flags);
2257 /* Take care of possible missed rx interrupts */
2258 REG_RD_BACK(pDevice, Grc.Mode); /* flush the register write */
2259 tag = pDevice->pStatusBlkVirt->StatusTag;
2260 if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2261 (pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx !=
2262 pDevice->RcvRetConIdx)) {
2264 REG_WR(pDevice, HostCoalesce.Mode,
2265 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2268 /* If a new status block is pending in the WDMA state machine */
2269 /* before the register write to enable the rx interrupt, */
2270 /* the new status block may DMA with no interrupt. In this */
2271 /* scenario, the tag read above will be older than the tag in */
2272 /* the pending status block and writing the older tag will */
2273 /* cause interrupt to be generated. */
2274 else if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2275 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low,
2277 /* Make sure we service tx in case some tx interrupts */
2279 if (atomic_read(&pDevice->SendBdLeft) <
2280 (T3_SEND_RCB_ENTRY_COUNT / 2)) {
2281 REG_WR(pDevice, HostCoalesce.Mode,
2282 pDevice->CoalesceMode |
2283 HOST_COALESCE_ENABLE |
2287 BCM5700_UNLOCK(pUmDevice, flags);
2292 #endif /* BCM_NAPI_RXPOLL */
2295 bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2297 struct net_device *dev = (struct net_device *)dev_instance;
2298 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2299 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2300 LM_UINT32 oldtag, newtag;
2301 int i, max_intr_loop;
2305 unsigned int handled = 1;
2307 if (!pDevice->InitDone) {
2309 return IRQ_RETVAL(handled);
2312 bcm5700_intr_lock(pUmDevice);
2313 if (atomic_read(&pUmDevice->intr_sem)) {
2314 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2315 bcm5700_intr_unlock(pUmDevice);
2317 return IRQ_RETVAL(handled);
2320 if (test_and_set_bit(0, (void*)&pUmDevice->interrupt)) {
2321 printk(KERN_ERR "%s: Duplicate entry of the interrupt handler\n",
2323 bcm5700_intr_unlock(pUmDevice);
2325 return IRQ_RETVAL(handled);
2328 if ((pDevice->Flags & USING_MSI_FLAG) ||
2329 (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2330 !(REG_RD(pDevice,PciCfg.PciState) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) )
2333 if (pUmDevice->intr_test) {
2334 if (!(REG_RD(pDevice, PciCfg.PciState) &
2335 T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) ||
2336 pDevice->Flags & USING_MSI_FLAG ) {
2337 pUmDevice->intr_test_result = 1;
2339 pUmDevice->intr_test = 0;
2342 #ifdef BCM_NAPI_RXPOLL
2347 if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2348 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2349 oldtag = pDevice->pStatusBlkVirt->StatusTag;
2351 for (i = 0; ; i++) {
2352 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2354 LM_ServiceInterrupts(pDevice);
2355 newtag = pDevice->pStatusBlkVirt->StatusTag;
2356 if ((newtag == oldtag) || (i > max_intr_loop)) {
2357 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, oldtag << 24);
2358 pDevice->LastTag = oldtag;
2359 if (pDevice->Flags & UNDI_FIX_FLAG) {
2360 REG_WR(pDevice, Grc.LocalCtrl,
2361 pDevice->GrcLocalCtrl | 0x2);
2374 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2375 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2376 LM_ServiceInterrupts(pDevice);
2377 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 0);
2378 dummy = MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2381 while ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) &&
2382 (i < max_intr_loop));
2384 if (pDevice->Flags & UNDI_FIX_FLAG) {
2385 REG_WR(pDevice, Grc.LocalCtrl,
2386 pDevice->GrcLocalCtrl | 0x2);
2392 /* not my interrupt */
2397 repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
2398 if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) ||
2399 pDevice->QueueAgain) &&
2400 (!test_and_set_bit(0, &pUmDevice->tasklet_busy))) {
2402 replenish_rx_buffers(pUmDevice, pUmDevice->rx_buf_repl_isr_limit);
2403 clear_bit(0, (void*)&pUmDevice->tasklet_busy);
2405 else if ((repl_buf_count > pUmDevice->rx_buf_repl_thresh) &&
2406 !pUmDevice->tasklet_pending) {
2408 pUmDevice->tasklet_pending = 1;
2409 tasklet_schedule(&pUmDevice->tasklet);
2412 #ifdef BCM_NAPI_RXPOLL
2413 if (!pDevice->RxPoll &&
2414 QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2415 pDevice->RxPoll = 1;
2416 MM_ScheduleRxPoll(pDevice);
2419 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2420 replenish_rx_buffers(pUmDevice, 0);
2423 if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container) ||
2424 pDevice->QueueAgain) {
2426 LM_QueueRxPackets(pDevice);
2431 clear_bit(0, (void*)&pUmDevice->interrupt);
2432 bcm5700_intr_unlock(pUmDevice);
2433 if (pUmDevice->tx_queued) {
2434 pUmDevice->tx_queued = 0;
2435 netif_wake_queue(dev);
2437 return IRQ_RETVAL(handled);
2443 bcm5700_tasklet(unsigned long data)
2445 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)data;
2446 unsigned long flags = 0;
2448 /* RH 7.2 Beta 3 tasklets are reentrant */
2449 if (test_and_set_bit(0, &pUmDevice->tasklet_busy)) {
2450 pUmDevice->tasklet_pending = 0;
2454 pUmDevice->tasklet_pending = 0;
2455 if (pUmDevice->opened && !pUmDevice->suspended) {
2456 BCM5700_LOCK(pUmDevice, flags);
2457 replenish_rx_buffers(pUmDevice, 0);
2458 BCM5700_UNLOCK(pUmDevice, flags);
2461 clear_bit(0, &pUmDevice->tasklet_busy);
2466 bcm5700_close(struct net_device *dev)
2469 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2470 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2472 #if (LINUX_VERSION_CODE < 0x02032b)
2475 netif_stop_queue(dev);
2476 pUmDevice->opened = 0;
2479 if( !(pDevice->AsfFlags & ASF_ENABLED) )
2482 if( enable_wol[pUmDevice->index] == 0 )
2484 printk(KERN_INFO "%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name);
2486 if (tigon3_debug > 1)
2487 printk(KERN_DEBUG "%s: Shutting down Tigon3\n",
2490 LM_MulticastClear(pDevice);
2491 bcm5700_shutdown(pUmDevice);
2493 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
2494 del_timer_sync(&pUmDevice->statstimer);
2497 del_timer_sync(&pUmDevice->timer);
2499 free_irq(pUmDevice->pdev->irq, dev);
2501 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2503 if(pDevice->Flags & USING_MSI_FLAG) {
2504 pci_disable_msi(pUmDevice->pdev);
2505 REG_WR(pDevice, Msi.Mode, 1 );
2506 pDevice->Flags &= ~USING_MSI_FLAG;
2512 #if (LINUX_VERSION_CODE < 0x020300)
2517 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
2520 bcm5700_freemem(dev);
2522 QQ_InitQueue(&pDevice->RxPacketFreeQ.Container,
2523 MAX_RX_PACKET_DESC_COUNT);
2529 bcm5700_freemem(struct net_device *dev)
2532 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2533 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2535 for (i = 0; i < pUmDevice->mem_list_num; i++) {
2536 if (pUmDevice->mem_size_list[i] == 0) {
2537 kfree(pUmDevice->mem_list[i]);
2540 pci_free_consistent(pUmDevice->pdev,
2541 (size_t) pUmDevice->mem_size_list[i],
2542 pUmDevice->mem_list[i],
2543 pUmDevice->dma_list[i]);
2547 pDevice->pStatusBlkVirt = 0;
2548 pDevice->pStatsBlkVirt = 0;
2549 pUmDevice->mem_list_num = 0;
2552 if (!pUmDevice->opened) {
2553 for (i = 0; i < MAX_MEM2; i++) {
2554 if (pUmDevice->mem_size_list2[i]) {
2555 bcm5700_freemem2(pUmDevice, i);
2564 /* Frees consistent memory allocated through ioctl */
2565 /* The memory to be freed is in mem_list2[index] */
2567 bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index)
2569 #if (LINUX_VERSION_CODE >= 0x020400)
2571 struct page *pg, *last_pg;
2573 /* Probably won't work on some architectures */
2574 ptr = pUmDevice->mem_list2[index],
2575 pg = virt_to_page(ptr);
2576 last_pg = virt_to_page(ptr + pUmDevice->mem_size_list2[index] - 1);
2578 #if (LINUX_VERSION_CODE > 0x020500)
2579 ClearPageReserved(pg);
2581 mem_map_unreserve(pg);
2586 pci_free_consistent(pUmDevice->pdev,
2587 (size_t) pUmDevice->mem_size_list2[index],
2588 pUmDevice->mem_list2[index],
2589 pUmDevice->dma_list2[index]);
2590 pUmDevice->mem_size_list2[index] = 0;
2597 bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice)
2599 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2601 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2602 unsigned long flags;
2604 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5700 ||
2605 T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5701) &&
2606 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
2608 if (!pUmDevice->opened || !pDevice->InitDone)
2614 /* regulate MDIO access during run time */
2615 if (pUmDevice->crc_counter_expiry > 0)
2616 return pUmDevice->phy_crc_count;
2618 pUmDevice->crc_counter_expiry = (5 * HZ) /
2619 pUmDevice->timer_interval;
2621 BCM5700_PHY_LOCK(pUmDevice, flags);
2622 LM_ReadPhy(pDevice, 0x1e, &Value32);
2623 if ((Value32 & 0x8000) == 0)
2624 LM_WritePhy(pDevice, 0x1e, Value32 | 0x8000);
2625 LM_ReadPhy(pDevice, 0x14, &Value32);
2626 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2627 /* Sometimes data on the MDIO bus can be corrupted */
2628 if (Value32 != 0xffff)
2629 pUmDevice->phy_crc_count += Value32;
2630 return pUmDevice->phy_crc_count;
2632 else if (pStats == 0) {
2636 return (MM_GETSTATS64(pStats->dot3StatsFCSErrors));
2641 bcm5700_rx_err_count(UM_DEVICE_BLOCK *pUmDevice)
2643 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2644 T3_STATS_BLOCK *pStats = (T3_STATS_BLOCK *) pDevice->pStatsBlkVirt;
2648 return (bcm5700_crc_count(pUmDevice) +
2649 MM_GETSTATS64(pStats->dot3StatsAlignmentErrors) +
2650 MM_GETSTATS64(pStats->etherStatsUndersizePkts) +
2651 MM_GETSTATS64(pStats->etherStatsFragments) +
2652 MM_GETSTATS64(pStats->dot3StatsFramesTooLong) +
2653 MM_GETSTATS64(pStats->etherStatsJabbers));
2656 STATIC struct net_device_stats *
2657 bcm5700_get_stats(struct net_device *dev)
2659 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2660 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2661 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2662 struct net_device_stats *p_netstats = &pUmDevice->stats;
2667 /* Get stats from LM */
2668 p_netstats->rx_packets =
2669 MM_GETSTATS(pStats->ifHCInUcastPkts) +
2670 MM_GETSTATS(pStats->ifHCInMulticastPkts) +
2671 MM_GETSTATS(pStats->ifHCInBroadcastPkts);
2672 p_netstats->tx_packets =
2673 MM_GETSTATS(pStats->ifHCOutUcastPkts) +
2674 MM_GETSTATS(pStats->ifHCOutMulticastPkts) +
2675 MM_GETSTATS(pStats->ifHCOutBroadcastPkts);
2676 p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2677 p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2678 p_netstats->tx_errors =
2679 MM_GETSTATS(pStats->dot3StatsInternalMacTransmitErrors) +
2680 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors) +
2681 MM_GETSTATS(pStats->ifOutDiscards) +
2682 MM_GETSTATS(pStats->ifOutErrors);
2683 p_netstats->multicast = MM_GETSTATS(pStats->ifHCInMulticastPkts);
2684 p_netstats->collisions = MM_GETSTATS(pStats->etherStatsCollisions);
2685 p_netstats->rx_length_errors =
2686 MM_GETSTATS(pStats->dot3StatsFramesTooLong) +
2687 MM_GETSTATS(pStats->etherStatsUndersizePkts);
2688 p_netstats->rx_over_errors = MM_GETSTATS(pStats->nicNoMoreRxBDs);
2689 p_netstats->rx_frame_errors =
2690 MM_GETSTATS(pStats->dot3StatsAlignmentErrors);
2691 p_netstats->rx_crc_errors = (unsigned long)
2692 bcm5700_crc_count(pUmDevice);
2693 p_netstats->rx_errors = (unsigned long)
2694 bcm5700_rx_err_count(pUmDevice);
2696 p_netstats->tx_aborted_errors = MM_GETSTATS(pStats->ifOutDiscards);
2697 p_netstats->tx_carrier_errors =
2698 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors);
2704 b57_suspend_chip(UM_DEVICE_BLOCK *pUmDevice)
2706 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2708 if (pUmDevice->opened) {
2709 bcm5700_intr_off(pUmDevice);
2710 netif_carrier_off(pUmDevice->dev);
2711 netif_stop_queue(pUmDevice->dev);
2713 tasklet_kill(&pUmDevice->tasklet);
2715 bcm5700_poll_wait(pUmDevice);
2717 pUmDevice->suspended = 1;
2718 LM_ShutdownChip(pDevice, LM_SUSPEND_RESET);
2722 b57_resume_chip(UM_DEVICE_BLOCK *pUmDevice)
2724 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2726 if (pUmDevice->suspended) {
2727 pUmDevice->suspended = 0;
2728 if (pUmDevice->opened) {
2729 bcm5700_reset(pUmDevice->dev);
2732 LM_ShutdownChip(pDevice, LM_SHUTDOWN_RESET);
2737 /* Returns 0 on failure, 1 on success */
2739 b57_test_intr(UM_DEVICE_BLOCK *pUmDevice)
2741 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2744 if (!pUmDevice->opened)
2746 pUmDevice->intr_test_result = 0;
2747 pUmDevice->intr_test = 1;
2749 REG_WR(pDevice, HostCoalesce.Mode,
2750 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2753 for (j = 0; j < 10; j++) {
2754 if (pUmDevice->intr_test_result){
2758 REG_WR(pDevice, HostCoalesce.Mode,
2759 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2762 MM_Sleep(pDevice, 1);
2765 return pUmDevice->intr_test_result;
2771 #ifdef ETHTOOL_GSTRINGS
2773 #define ETH_NUM_STATS 30
2774 #define RX_CRC_IDX 5
2775 #define RX_MAC_ERR_IDX 14
2778 char string[ETH_GSTRING_LEN];
2779 } bcm5700_stats_str_arr[ETH_NUM_STATS] = {
2780 { "rx_unicast_packets" },
2781 { "rx_multicast_packets" },
2782 { "rx_broadcast_packets" },
2785 { "rx_crc_errors" }, /* this needs to be calculated */
2786 { "rx_align_errors" },
2787 { "rx_xon_frames" },
2788 { "rx_xoff_frames" },
2789 { "rx_long_frames" },
2790 { "rx_short_frames" },
2794 { "rx_mac_errors" }, /* this needs to be calculated */
2795 { "tx_unicast_packets" },
2796 { "tx_multicast_packets" },
2797 { "tx_broadcast_packets" },
2800 { "tx_single_collisions" },
2801 { "tx_multi_collisions" },
2802 { "tx_total_collisions" },
2803 { "tx_excess_collisions" },
2804 { "tx_late_collisions" },
2805 { "tx_xon_frames" },
2806 { "tx_xoff_frames" },
2807 { "tx_internal_mac_errors" },
2808 { "tx_carrier_errors" },
2812 #define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
2815 #define SWAP_DWORD_64(x) (x)
2817 #define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
2820 unsigned long bcm5700_stats_offset_arr[ETH_NUM_STATS] = {
2821 STATS_OFFSET(ifHCInUcastPkts),
2822 STATS_OFFSET(ifHCInMulticastPkts),
2823 STATS_OFFSET(ifHCInBroadcastPkts),
2824 STATS_OFFSET(ifHCInOctets),
2825 STATS_OFFSET(etherStatsFragments),
2827 STATS_OFFSET(dot3StatsAlignmentErrors),
2828 STATS_OFFSET(xonPauseFramesReceived),
2829 STATS_OFFSET(xoffPauseFramesReceived),
2830 STATS_OFFSET(dot3StatsFramesTooLong),
2831 STATS_OFFSET(etherStatsUndersizePkts),
2832 STATS_OFFSET(etherStatsJabbers),
2833 STATS_OFFSET(ifInDiscards),
2834 STATS_OFFSET(ifInErrors),
2836 STATS_OFFSET(ifHCOutUcastPkts),
2837 STATS_OFFSET(ifHCOutMulticastPkts),
2838 STATS_OFFSET(ifHCOutBroadcastPkts),
2839 STATS_OFFSET(ifHCOutOctets),
2840 STATS_OFFSET(dot3StatsDeferredTransmissions),
2841 STATS_OFFSET(dot3StatsSingleCollisionFrames),
2842 STATS_OFFSET(dot3StatsMultipleCollisionFrames),
2843 STATS_OFFSET(etherStatsCollisions),
2844 STATS_OFFSET(dot3StatsExcessiveCollisions),
2845 STATS_OFFSET(dot3StatsLateCollisions),
2846 STATS_OFFSET(outXonSent),
2847 STATS_OFFSET(outXoffSent),
2848 STATS_OFFSET(dot3StatsInternalMacTransmitErrors),
2849 STATS_OFFSET(dot3StatsCarrierSenseErrors),
2850 STATS_OFFSET(ifOutErrors),
2853 #endif /* ETHTOOL_GSTRINGS */
2856 #define ETH_NUM_TESTS 6
2858 char string[ETH_GSTRING_LEN];
2859 } bcm5700_tests_str_arr[ETH_NUM_TESTS] = {
2860 { "register test (offline)" },
2861 { "memory test (offline)" },
2862 { "loopback test (offline)" },
2863 { "nvram test (online)" },
2864 { "interrupt test (online)" },
2865 { "link test (online)" },
2868 extern LM_STATUS b57_test_registers(UM_DEVICE_BLOCK *pUmDevice);
2869 extern LM_STATUS b57_test_memory(UM_DEVICE_BLOCK *pUmDevice);
2870 extern LM_STATUS b57_test_nvram(UM_DEVICE_BLOCK *pUmDevice);
2871 extern LM_STATUS b57_test_link(UM_DEVICE_BLOCK *pUmDevice);
2872 extern LM_STATUS b57_test_loopback(UM_DEVICE_BLOCK *pUmDevice, int looptype, int linespeed);
2875 #ifdef ETHTOOL_GREGS
2876 #if (LINUX_VERSION_CODE >= 0x02040f)
2878 bcm5700_get_reg_blk(UM_DEVICE_BLOCK *pUmDevice, u32 **buf, u32 start, u32 end,
2882 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2885 memset(*buf, 0, end - start);
2886 *buf = *buf + (end - start)/4;
2889 for (offset = start; offset < end; offset+=4, *buf = *buf + 1) {
2890 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
2891 if (((offset >= 0x3400) && (offset < 0x3c00)) ||
2892 ((offset >= 0x5400) && (offset < 0x5800)) ||
2893 ((offset >= 0x6400) && (offset < 0x6800))) {
2898 **buf = REG_RD_OFFSET(pDevice, offset);
2904 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
2906 struct ethtool_cmd ethcmd;
2907 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2908 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2910 if (mm_copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2913 switch (ethcmd.cmd) {
2914 #ifdef ETHTOOL_GDRVINFO
2915 case ETHTOOL_GDRVINFO: {
2916 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2918 strcpy(info.driver, bcm5700_driver);
2919 #ifdef INCLUDE_5701_AX_FIX
2920 if(pDevice->ChipRevId == T3_CHIP_ID_5701_A0) {
2921 extern int t3FwReleaseMajor;
2922 extern int t3FwReleaseMinor;
2923 extern int t3FwReleaseFix;
2925 sprintf(info.fw_version, "%i.%i.%i",
2926 t3FwReleaseMajor, t3FwReleaseMinor,
2930 strcpy(info.fw_version, pDevice->BootCodeVer);
2931 strcpy(info.version, bcm5700_version);
2932 #if (LINUX_VERSION_CODE <= 0x020422)
2933 strcpy(info.bus_info, pUmDevice->pdev->slot_name);
2935 strcpy(info.bus_info, pci_name(pUmDevice->pdev));
2940 #ifdef ETHTOOL_GEEPROM
2941 BCM_EEDUMP_LEN(&info, pDevice->NvramSize);
2943 #ifdef ETHTOOL_GREGS
2944 /* dump everything, including holes in the register space */
2945 info.regdump_len = 0x6c00;
2947 #ifdef ETHTOOL_GSTATS
2948 info.n_stats = ETH_NUM_STATS;
2951 info.testinfo_len = ETH_NUM_TESTS;
2953 if (mm_copy_to_user(useraddr, &info, sizeof(info)))
2958 case ETHTOOL_GSET: {
2959 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG)||
2960 (pDevice->PhyFlags & PHY_IS_FIBER)) {
2962 (SUPPORTED_1000baseT_Full |
2964 ethcmd.supported |= SUPPORTED_FIBRE;
2965 ethcmd.port = PORT_FIBRE;
2969 (SUPPORTED_10baseT_Half |
2970 SUPPORTED_10baseT_Full |
2971 SUPPORTED_100baseT_Half |
2972 SUPPORTED_100baseT_Full |
2973 SUPPORTED_1000baseT_Half |
2974 SUPPORTED_1000baseT_Full |
2976 ethcmd.supported |= SUPPORTED_TP;
2977 ethcmd.port = PORT_TP;
2980 ethcmd.transceiver = XCVR_INTERNAL;
2981 ethcmd.phy_address = 0;
2983 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
2984 ethcmd.speed = SPEED_1000;
2985 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
2986 ethcmd.speed = SPEED_100;
2987 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
2988 ethcmd.speed = SPEED_10;
2992 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
2993 ethcmd.duplex = DUPLEX_FULL;
2995 ethcmd.duplex = DUPLEX_HALF;
2997 if (pDevice->DisableAutoNeg == FALSE) {
2998 ethcmd.autoneg = AUTONEG_ENABLE;
2999 ethcmd.advertising = ADVERTISED_Autoneg;
3000 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
3001 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3002 ethcmd.advertising |=
3003 ADVERTISED_1000baseT_Full |
3007 ethcmd.advertising |=
3009 if (pDevice->advertising &
3010 PHY_AN_AD_10BASET_HALF) {
3012 ethcmd.advertising |=
3013 ADVERTISED_10baseT_Half;
3015 if (pDevice->advertising &
3016 PHY_AN_AD_10BASET_FULL) {
3018 ethcmd.advertising |=
3019 ADVERTISED_10baseT_Full;
3021 if (pDevice->advertising &
3022 PHY_AN_AD_100BASETX_HALF) {
3024 ethcmd.advertising |=
3025 ADVERTISED_100baseT_Half;
3027 if (pDevice->advertising &
3028 PHY_AN_AD_100BASETX_FULL) {
3030 ethcmd.advertising |=
3031 ADVERTISED_100baseT_Full;
3033 if (pDevice->advertising1000 &
3034 BCM540X_AN_AD_1000BASET_HALF) {
3036 ethcmd.advertising |=
3037 ADVERTISED_1000baseT_Half;
3039 if (pDevice->advertising1000 &
3040 BCM540X_AN_AD_1000BASET_FULL) {
3042 ethcmd.advertising |=
3043 ADVERTISED_1000baseT_Full;
3048 ethcmd.autoneg = AUTONEG_DISABLE;
3049 ethcmd.advertising = 0;
3052 ethcmd.maxtxpkt = pDevice->TxMaxCoalescedFrames;
3053 ethcmd.maxrxpkt = pDevice->RxMaxCoalescedFrames;
3055 if(mm_copy_to_user(useraddr, ðcmd, sizeof(ethcmd)))
3059 case ETHTOOL_SSET: {
3060 unsigned long flags;
3062 if(!capable(CAP_NET_ADMIN))
3064 if (ethcmd.autoneg == AUTONEG_ENABLE) {
3065 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3066 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_UNKNOWN;
3067 pDevice->DisableAutoNeg = FALSE;
3070 if (ethcmd.speed == SPEED_1000 &&
3071 pDevice->PhyFlags & PHY_NO_GIGABIT)
3074 if (ethcmd.speed == SPEED_1000 &&
3075 (pDevice->TbiFlags & ENABLE_TBI_FLAG ||
3076 pDevice->PhyFlags & PHY_IS_FIBER ) ) {
3078 pDevice->RequestedLineSpeed =
3079 LM_LINE_SPEED_1000MBPS;
3081 pDevice->RequestedDuplexMode =
3082 LM_DUPLEX_MODE_FULL;
3084 else if (ethcmd.speed == SPEED_100 &&
3085 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3086 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3088 pDevice->RequestedLineSpeed =
3089 LM_LINE_SPEED_100MBPS;
3091 else if (ethcmd.speed == SPEED_10 &&
3092 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3093 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3095 pDevice->RequestedLineSpeed =
3096 LM_LINE_SPEED_10MBPS;
3102 pDevice->DisableAutoNeg = TRUE;
3103 if (ethcmd.duplex == DUPLEX_FULL) {
3104 pDevice->RequestedDuplexMode =
3105 LM_DUPLEX_MODE_FULL;
3108 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3109 !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
3111 pDevice->RequestedDuplexMode =
3112 LM_DUPLEX_MODE_HALF;
3116 if (netif_running(dev)) {
3117 BCM5700_PHY_LOCK(pUmDevice, flags);
3118 LM_SetupPhy(pDevice);
3119 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3125 case ETHTOOL_GWOL: {
3126 struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
3128 if (((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3129 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3130 (pDevice->Flags & DISABLE_D3HOT_FLAG)) {
3135 wol.supported = WAKE_MAGIC;
3136 if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET)
3138 wol.wolopts = WAKE_MAGIC;
3144 if (mm_copy_to_user(useraddr, &wol, sizeof(wol)))
3148 case ETHTOOL_SWOL: {
3149 struct ethtool_wolinfo wol;
3151 if(!capable(CAP_NET_ADMIN))
3153 if (mm_copy_from_user(&wol, useraddr, sizeof(wol)))
3155 if ((((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3156 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3157 (pDevice->Flags & DISABLE_D3HOT_FLAG)) &&
3162 if ((wol.wolopts & ~WAKE_MAGIC) != 0) {
3165 if (wol.wolopts & WAKE_MAGIC) {
3166 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
3167 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
3170 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
3171 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
3177 #ifdef ETHTOOL_GLINK
3178 case ETHTOOL_GLINK: {
3179 struct ethtool_value edata = {ETHTOOL_GLINK};
3181 /* workaround for DHCP using ifup script */
3182 /* ifup only waits for 5 seconds for link up */
3183 /* NIC may take more than 5 seconds to establish link */
3184 if ((pUmDevice->delayed_link_ind > 0) &&
3185 delay_link[pUmDevice->index])
3188 if (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) {
3194 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3199 #ifdef ETHTOOL_NWAY_RST
3200 case ETHTOOL_NWAY_RST: {
3202 unsigned long flags;
3204 if(!capable(CAP_NET_ADMIN))
3206 if (pDevice->DisableAutoNeg) {
3209 if (!netif_running(dev))
3211 BCM5700_PHY_LOCK(pUmDevice, flags);
3212 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
3213 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
3214 pDevice->DisableAutoNeg = TRUE;
3215 LM_SetupPhy(pDevice);
3217 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3218 pDevice->DisableAutoNeg = FALSE;
3219 LM_SetupPhy(pDevice);
3222 if ((T3_ASIC_REV(pDevice->ChipRevId) ==
3223 T3_ASIC_REV_5703) ||
3224 (T3_ASIC_REV(pDevice->ChipRevId) ==
3225 T3_ASIC_REV_5704) ||
3226 (T3_ASIC_REV(pDevice->ChipRevId) ==
3229 LM_ResetPhy(pDevice);
3230 LM_SetupPhy(pDevice);
3232 pDevice->PhyFlags &= ~PHY_FIBER_FALLBACK;
3233 LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl);
3234 LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl |
3235 PHY_CTRL_AUTO_NEG_ENABLE |
3236 PHY_CTRL_RESTART_AUTO_NEG);
3238 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3242 #ifdef ETHTOOL_GEEPROM
3243 case ETHTOOL_GEEPROM: {
3244 struct ethtool_eeprom eeprom;
3246 LM_UINT32 buf1[64/4];
3247 int i, j, offset, len;
3249 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3252 if (eeprom.offset >= pDevice->NvramSize)
3255 /* maximum data limited */
3256 /* to read more, call again with a different offset */
3257 if (eeprom.len > 0x800) {
3259 if (mm_copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
3263 if (eeprom.len > 64) {
3264 buf = kmalloc(eeprom.len, GFP_KERNEL);
3271 useraddr += offsetof(struct ethtool_eeprom, data);
3273 offset = eeprom.offset;
3276 offset &= 0xfffffffc;
3277 len += (offset & 3);
3279 len = (len + 3) & 0xfffffffc;
3280 for (i = 0, j = 0; j < len; i++, j += 4) {
3281 if (LM_NvramRead(pDevice, offset + j, buf + i) !=
3282 LM_STATUS_SUCCESS) {
3287 buf += (eeprom.offset & 3);
3288 i = mm_copy_to_user(useraddr, buf, eeprom.len);
3290 if (eeprom.len > 64) {
3297 case ETHTOOL_SEEPROM: {
3298 struct ethtool_eeprom eeprom;
3299 LM_UINT32 buf[64/4];
3302 if(!capable(CAP_NET_ADMIN))
3304 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3307 if ((eeprom.offset & 3) || (eeprom.len & 3) ||
3308 (eeprom.offset >= pDevice->NvramSize)) {
3312 if ((eeprom.offset + eeprom.len) >= pDevice->NvramSize) {
3313 eeprom.len = pDevice->NvramSize - eeprom.offset;
3316 useraddr += offsetof(struct ethtool_eeprom, data);
3319 offset = eeprom.offset;
3325 if (mm_copy_from_user(&buf, useraddr, i))
3328 bcm5700_intr_off(pUmDevice);
3329 /* Prevent race condition on Grc.Mode register */
3330 bcm5700_poll_wait(pUmDevice);
3332 if (LM_NvramWriteBlock(pDevice, offset, buf, i/4) !=
3333 LM_STATUS_SUCCESS) {
3334 bcm5700_intr_on(pUmDevice);
3337 bcm5700_intr_on(pUmDevice);
3345 #ifdef ETHTOOL_GREGS
3346 #if (LINUX_VERSION_CODE >= 0x02040f)
3347 case ETHTOOL_GREGS: {
3348 struct ethtool_regs eregs;
3349 LM_UINT32 *buf, *buf1;
3352 if(!capable(CAP_NET_ADMIN))
3354 if (pDevice->Flags & UNDI_FIX_FLAG)
3356 if (mm_copy_from_user(&eregs, useraddr, sizeof(eregs)))
3358 if (eregs.len > 0x6c00)
3360 eregs.version = 0x0;
3361 if (mm_copy_to_user(useraddr, &eregs, sizeof(eregs)))
3363 buf = buf1 = kmalloc(eregs.len, GFP_KERNEL);
3366 bcm5700_get_reg_blk(pUmDevice, &buf, 0, 0xb0, 0);
3367 bcm5700_get_reg_blk(pUmDevice, &buf, 0xb0, 0x200, 1);
3368 bcm5700_get_reg_blk(pUmDevice, &buf, 0x200, 0x8f0, 0);
3369 bcm5700_get_reg_blk(pUmDevice, &buf, 0x8f0, 0xc00, 1);
3370 bcm5700_get_reg_blk(pUmDevice, &buf, 0xc00, 0xce0, 0);
3371 bcm5700_get_reg_blk(pUmDevice, &buf, 0xce0, 0x1000, 1);
3372 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1000, 0x1004, 0);
3373 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1004, 0x1400, 1);
3374 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1400, 0x1480, 0);
3375 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1480, 0x1800, 1);
3376 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1800, 0x1848, 0);
3377 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1848, 0x1c00, 1);
3378 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c00, 0x1c04, 0);
3379 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c04, 0x2000, 1);
3380 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2000, 0x225c, 0);
3381 bcm5700_get_reg_blk(pUmDevice, &buf, 0x225c, 0x2400, 1);
3382 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2400, 0x24c4, 0);
3383 bcm5700_get_reg_blk(pUmDevice, &buf, 0x24c4, 0x2800, 1);
3384 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2800, 0x2804, 0);
3385 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2804, 0x2c00, 1);
3386 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c00, 0x2c20, 0);
3387 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c20, 0x3000, 1);
3388 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3000, 0x3014, 0);
3389 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3014, 0x3400, 1);
3390 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3400, 0x3408, 0);
3391 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3408, 0x3800, 1);
3392 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3800, 0x3808, 0);
3393 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3808, 0x3c00, 1);
3394 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3c00, 0x3d00, 0);
3395 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3d00, 0x4000, 1);
3396 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4000, 0x4010, 0);
3397 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4010, 0x4400, 1);
3398 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4400, 0x4458, 0);
3399 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4458, 0x4800, 1);
3400 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4800, 0x4808, 0);
3401 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4808, 0x4c00, 1);
3402 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c00, 0x4c08, 0);
3403 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c08, 0x5000, 1);
3404 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5000, 0x5050, 0);
3405 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5050, 0x5400, 1);
3406 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5400, 0x5450, 0);
3407 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5450, 0x5800, 1);
3408 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5800, 0x5a10, 0);
3409 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5a10, 0x6000, 1);
3410 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6000, 0x600c, 0);
3411 bcm5700_get_reg_blk(pUmDevice, &buf, 0x600c, 0x6400, 1);
3412 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6400, 0x6404, 0);
3413 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6404, 0x6800, 1);
3414 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6800, 0x6848, 0);
3415 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6848, 0x6c00, 1);
3417 i = mm_copy_to_user(useraddr + sizeof(eregs), buf1, eregs.len);
3425 #ifdef ETHTOOL_GPAUSEPARAM
3426 case ETHTOOL_GPAUSEPARAM: {
3427 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
3429 if (!pDevice->DisableAutoNeg) {
3430 epause.autoneg = (pDevice->FlowControlCap &
3431 LM_FLOW_CONTROL_AUTO_PAUSE) != 0;
3437 (pDevice->FlowControl &
3438 LM_FLOW_CONTROL_RECEIVE_PAUSE) != 0;
3440 (pDevice->FlowControl &
3441 LM_FLOW_CONTROL_TRANSMIT_PAUSE) != 0;
3442 if (mm_copy_to_user(useraddr, &epause, sizeof(epause)))
3447 case ETHTOOL_SPAUSEPARAM: {
3448 struct ethtool_pauseparam epause;
3449 unsigned long flags;
3451 if(!capable(CAP_NET_ADMIN))
3453 if (mm_copy_from_user(&epause, useraddr, sizeof(epause)))
3455 pDevice->FlowControlCap = 0;
3456 if (epause.autoneg && !pDevice->DisableAutoNeg) {
3457 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
3459 if (epause.rx_pause) {
3460 pDevice->FlowControlCap |=
3461 LM_FLOW_CONTROL_RECEIVE_PAUSE;
3463 if (epause.tx_pause) {
3464 pDevice->FlowControlCap |=
3465 LM_FLOW_CONTROL_TRANSMIT_PAUSE;
3467 if (netif_running(dev)) {
3468 BCM5700_PHY_LOCK(pUmDevice, flags);
3469 LM_SetupPhy(pDevice);
3470 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3476 #ifdef ETHTOOL_GRXCSUM
3477 case ETHTOOL_GRXCSUM: {
3478 struct ethtool_value edata = { ETHTOOL_GRXCSUM };
3481 (pDevice->TaskToOffload &
3482 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM) != 0;
3483 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3488 case ETHTOOL_SRXCSUM: {
3489 struct ethtool_value edata;
3491 if(!capable(CAP_NET_ADMIN))
3493 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3496 if (!(pDevice->TaskOffloadCap &
3497 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3501 pDevice->TaskToOffload |=
3502 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3503 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
3506 pDevice->TaskToOffload &=
3507 ~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3508 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM);
3512 case ETHTOOL_GTXCSUM: {
3513 struct ethtool_value edata = { ETHTOOL_GTXCSUM };
3516 (dev->features & get_csum_flag( pDevice->ChipRevId)) != 0;
3517 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3522 case ETHTOOL_STXCSUM: {
3523 struct ethtool_value edata;
3525 if(!capable(CAP_NET_ADMIN))
3527 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3530 if (!(pDevice->TaskOffloadCap &
3531 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3535 dev->features |= get_csum_flag( pDevice->ChipRevId);
3536 pDevice->TaskToOffload |=
3537 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3538 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
3541 dev->features &= ~get_csum_flag( pDevice->ChipRevId);
3542 pDevice->TaskToOffload &=
3543 ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3544 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM);
3549 struct ethtool_value edata = { ETHTOOL_GSG };
3552 (dev->features & NETIF_F_SG) != 0;
3553 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3558 struct ethtool_value edata;
3560 if(!capable(CAP_NET_ADMIN))
3562 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3565 dev->features |= NETIF_F_SG;
3568 dev->features &= ~NETIF_F_SG;
3573 #ifdef ETHTOOL_GRINGPARAM
3574 case ETHTOOL_GRINGPARAM: {
3575 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
3577 ering.rx_max_pending = T3_STD_RCV_RCB_ENTRY_COUNT - 1;
3578 ering.rx_pending = pDevice->RxStdDescCnt;
3579 ering.rx_mini_max_pending = 0;
3580 ering.rx_mini_pending = 0;
3581 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3582 ering.rx_jumbo_max_pending = T3_JUMBO_RCV_RCB_ENTRY_COUNT - 1;
3583 ering.rx_jumbo_pending = pDevice->RxJumboDescCnt;
3585 ering.rx_jumbo_max_pending = 0;
3586 ering.rx_jumbo_pending = 0;
3588 ering.tx_max_pending = MAX_TX_PACKET_DESC_COUNT - 1;
3589 ering.tx_pending = pDevice->TxPacketDescCnt;
3590 if (mm_copy_to_user(useraddr, &ering, sizeof(ering)))
3595 #ifdef ETHTOOL_PHYS_ID
3596 case ETHTOOL_PHYS_ID: {
3597 struct ethtool_value edata;
3599 if(!capable(CAP_NET_ADMIN))
3601 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3603 if (LM_BlinkLED(pDevice, edata.data) == LM_STATUS_SUCCESS)
3608 #ifdef ETHTOOL_GSTRINGS
3609 case ETHTOOL_GSTRINGS: {
3610 struct ethtool_gstrings egstr = { ETHTOOL_GSTRINGS };
3612 if (mm_copy_from_user(&egstr, useraddr, sizeof(egstr)))
3614 switch(egstr.string_set) {
3615 #ifdef ETHTOOL_GSTATS
3617 egstr.len = ETH_NUM_STATS;
3618 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3620 if (mm_copy_to_user(useraddr + sizeof(egstr),
3621 bcm5700_stats_str_arr,
3622 sizeof(bcm5700_stats_str_arr)))
3628 egstr.len = ETH_NUM_TESTS;
3629 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3631 if (mm_copy_to_user(useraddr + sizeof(egstr),
3632 bcm5700_tests_str_arr,
3633 sizeof(bcm5700_tests_str_arr)))
3642 #ifdef ETHTOOL_GSTATS
3643 case ETHTOOL_GSTATS: {
3644 struct ethtool_stats estats = { ETHTOOL_GSTATS };
3645 uint64_t stats[ETH_NUM_STATS];
3648 (uint64_t *) pDevice->pStatsBlkVirt;
3650 estats.n_stats = ETH_NUM_STATS;
3652 memset(stats, 0, sizeof(stats));
3656 for (i = 0; i < ETH_NUM_STATS; i++) {
3657 if (bcm5700_stats_offset_arr[i] != 0) {
3658 stats[i] = SWAP_DWORD_64(*(pStats +
3659 bcm5700_stats_offset_arr[i]));
3661 else if (i == RX_CRC_IDX) {
3663 bcm5700_crc_count(pUmDevice);
3665 else if (i == RX_MAC_ERR_IDX) {
3667 bcm5700_rx_err_count(pUmDevice);
3671 if (mm_copy_to_user(useraddr, &estats, sizeof(estats))) {
3674 if (mm_copy_to_user(useraddr + sizeof(estats), &stats,
3682 case ETHTOOL_TEST: {
3683 struct ethtool_test etest;
3684 uint64_t tests[ETH_NUM_TESTS] = {0, 0, 0, 0, 0, 0};
3685 LM_POWER_STATE old_power_level;
3687 printk( KERN_ALERT "Performing ethtool test.\n"
3688 "This test will take a few seconds to complete.\n" );
3690 if (mm_copy_from_user(&etest, useraddr, sizeof(etest)))
3693 etest.len = ETH_NUM_TESTS;
3694 old_power_level = pDevice->PowerLevel;
3695 if (old_power_level != LM_POWER_STATE_D0) {
3696 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
3697 LM_SwitchClocks(pDevice);
3699 MM_Sleep(pDevice, 1000);
3700 if (etest.flags & ETH_TEST_FL_OFFLINE) {
3701 b57_suspend_chip(pUmDevice);
3702 MM_Sleep(pDevice, 1000);
3703 LM_HaltCpu(pDevice,T3_RX_CPU_ID | T3_TX_CPU_ID);
3704 MM_Sleep(pDevice, 1000);
3705 if (b57_test_registers(pUmDevice) == 0) {
3706 etest.flags |= ETH_TEST_FL_FAILED;
3709 MM_Sleep(pDevice, 1000);
3710 if (b57_test_memory(pUmDevice) == 0) {
3711 etest.flags |= ETH_TEST_FL_FAILED;
3714 MM_Sleep(pDevice, 1000);
3715 if (b57_test_loopback(pUmDevice, NICE_LOOPBACK_TESTTYPE_MAC, 0) == 0) {
3716 etest.flags |= ETH_TEST_FL_FAILED;
3719 MM_Sleep(pDevice, 1000);
3720 b57_resume_chip(pUmDevice);
3721 /* wait for link to come up for the link test */
3722 MM_Sleep(pDevice, 4000);
3723 if ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
3724 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
3726 /* wait a little longer for linkup on copper */
3727 MM_Sleep(pDevice, 3000);
3730 if (b57_test_nvram(pUmDevice) == 0) {
3731 etest.flags |= ETH_TEST_FL_FAILED;
3734 MM_Sleep(pDevice, 1000);
3735 if (b57_test_intr(pUmDevice) == 0) {
3736 etest.flags |= ETH_TEST_FL_FAILED;
3739 MM_Sleep(pDevice, 1000);
3740 if (b57_test_link(pUmDevice) == 0) {
3741 etest.flags |= ETH_TEST_FL_FAILED;
3744 MM_Sleep(pDevice, 1000);
3745 if (old_power_level != LM_POWER_STATE_D0) {
3746 LM_SetPowerState(pDevice, old_power_level);
3748 if (mm_copy_to_user(useraddr, &etest, sizeof(etest))) {
3751 if (mm_copy_to_user(useraddr + sizeof(etest), tests,
3759 case ETHTOOL_GTSO: {
3760 struct ethtool_value edata = { ETHTOOL_GTSO };
3764 (dev->features & NETIF_F_TSO) != 0;
3768 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3774 case ETHTOOL_STSO: {
3776 struct ethtool_value edata;
3778 if (!capable(CAP_NET_ADMIN))
3781 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3784 if (!(pDevice->TaskToOffload &
3785 LM_TASK_OFFLOAD_TCP_SEGMENTATION)) {
3789 dev->features &= ~NETIF_F_TSO;
3792 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
3793 (dev->mtu > 1500)) {
3794 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
3797 dev->features |= NETIF_F_TSO;
3810 #endif /* #ifdef SIOCETHTOOL */
3812 #if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
3813 #include <linux/iobuf.h>
3816 /* Provide ioctl() calls to examine the MII xcvr state. */
3817 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3819 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
3820 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
3821 u16 *data = (u16 *)&rq->ifr_data;
3823 unsigned long flags;
3829 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
3830 data[0] = pDevice->PhyAddr;
3835 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
3836 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
3839 /* workaround for DHCP using ifup script */
3840 /* ifup only waits for 5 seconds for link up */
3841 /* NIC may take more than 5 seconds to establish link */
3842 if ((pUmDevice->delayed_link_ind > 0) &&
3843 delay_link[pUmDevice->index]) {
3847 BCM5700_PHY_LOCK(pUmDevice, flags);
3848 LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *) &value);
3849 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3850 data[3] = value & 0xffff;
3856 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
3857 if (!capable(CAP_NET_ADMIN))
3860 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
3863 BCM5700_PHY_LOCK(pUmDevice, flags);
3864 LM_WritePhy(pDevice, data[1] & 0x1f, data[2]);
3865 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3871 struct nice_req* nrq;
3873 if (!capable(CAP_NET_ADMIN))
3876 nrq = (struct nice_req*)&rq->ifr_ifru;
3877 if( nrq->cmd == NICE_CMD_QUERY_SUPPORT ) {
3878 nrq->nrq_magic = NICE_DEVICE_MAGIC;
3879 nrq->nrq_support_rx = 1;
3880 nrq->nrq_support_vlan = 1;
3881 nrq->nrq_support_get_speed = 1;
3882 #ifdef BCM_NAPI_RXPOLL
3883 nrq->nrq_support_rx_napi = 1;
3887 #ifdef BCM_NAPI_RXPOLL
3888 else if( nrq->cmd == NICE_CMD_SET_RX_NAPI )
3890 else if( nrq->cmd == NICE_CMD_SET_RX )
3893 pUmDevice->nice_rx = nrq->nrq_rx;
3894 pUmDevice->nice_ctx = nrq->nrq_ctx;
3895 bcm5700_set_vlan_mode(pUmDevice);
3898 #ifdef BCM_NAPI_RXPOLL
3899 else if( nrq->cmd == NICE_CMD_GET_RX_NAPI )
3901 else if( nrq->cmd == NICE_CMD_GET_RX )
3904 nrq->nrq_rx = pUmDevice->nice_rx;
3905 nrq->nrq_ctx = pUmDevice->nice_ctx;
3908 else if( nrq->cmd == NICE_CMD_GET_SPEED ) {
3909 if(pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE){
3912 else if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) {
3913 nrq->nrq_speed = SPEED_1000;
3914 } else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS) {
3915 nrq->nrq_speed = SPEED_100;
3916 } else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) {
3917 nrq->nrq_speed = SPEED_100;
3924 if (!pUmDevice->opened)
3928 case NICE_CMD_BLINK_LED:
3929 if (LM_BlinkLED(pDevice, nrq->nrq_blink_time) ==
3930 LM_STATUS_SUCCESS) {
3935 case NICE_CMD_DIAG_SUSPEND:
3936 b57_suspend_chip(pUmDevice);
3939 case NICE_CMD_DIAG_RESUME:
3940 b57_resume_chip(pUmDevice);
3943 case NICE_CMD_REG_READ:
3944 if (nrq->nrq_offset >= 0x10000) {
3945 nrq->nrq_data = LM_RegRdInd(pDevice,
3949 nrq->nrq_data = LM_RegRd(pDevice,
3954 case NICE_CMD_REG_WRITE:
3955 if (nrq->nrq_offset >= 0x10000) {
3956 LM_RegWrInd(pDevice, nrq->nrq_offset,
3960 LM_RegWr(pDevice, nrq->nrq_offset,
3961 nrq->nrq_data, FALSE);
3965 case NICE_CMD_REG_READ_DIRECT:
3966 case NICE_CMD_REG_WRITE_DIRECT:
3967 if ((nrq->nrq_offset >= 0x10000) ||
3968 (pDevice->Flags & UNDI_FIX_FLAG)) {
3972 if (nrq->cmd == NICE_CMD_REG_READ_DIRECT) {
3973 nrq->nrq_data = REG_RD_OFFSET(pDevice,
3977 REG_WR_OFFSET(pDevice, nrq->nrq_offset,
3982 case NICE_CMD_MEM_READ:
3983 nrq->nrq_data = LM_MemRdInd(pDevice,
3987 case NICE_CMD_MEM_WRITE:
3988 LM_MemWrInd(pDevice, nrq->nrq_offset,
3992 case NICE_CMD_CFG_READ32:
3993 pci_read_config_dword(pUmDevice->pdev,
3994 nrq->nrq_offset, (u32 *)&nrq->nrq_data);
3997 case NICE_CMD_CFG_READ16:
3998 pci_read_config_word(pUmDevice->pdev,
3999 nrq->nrq_offset, (u16 *)&nrq->nrq_data);
4002 case NICE_CMD_CFG_READ8:
4003 pci_read_config_byte(pUmDevice->pdev,
4004 nrq->nrq_offset, (u8 *)&nrq->nrq_data);
4007 case NICE_CMD_CFG_WRITE32:
4008 pci_write_config_dword(pUmDevice->pdev,
4009 nrq->nrq_offset, (u32)nrq->nrq_data);
4012 case NICE_CMD_CFG_WRITE16:
4013 pci_write_config_word(pUmDevice->pdev,
4014 nrq->nrq_offset, (u16)nrq->nrq_data);
4017 case NICE_CMD_CFG_WRITE8:
4018 pci_write_config_byte(pUmDevice->pdev,
4019 nrq->nrq_offset, (u8)nrq->nrq_data);
4022 case NICE_CMD_RESET:
4026 case NICE_CMD_ENABLE_MAC_LOOPBACK:
4027 if (pDevice->LoopBackMode != 0) {
4031 BCM5700_PHY_LOCK(pUmDevice, flags);
4032 LM_EnableMacLoopBack(pDevice);
4033 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4036 case NICE_CMD_DISABLE_MAC_LOOPBACK:
4037 if (pDevice->LoopBackMode !=
4038 LM_MAC_LOOP_BACK_MODE) {
4042 BCM5700_PHY_LOCK(pUmDevice, flags);
4043 LM_DisableMacLoopBack(pDevice);
4044 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4047 case NICE_CMD_ENABLE_PHY_LOOPBACK:
4048 if (pDevice->LoopBackMode != 0) {
4052 BCM5700_PHY_LOCK(pUmDevice, flags);
4053 LM_EnablePhyLoopBack(pDevice);
4054 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4057 case NICE_CMD_DISABLE_PHY_LOOPBACK:
4058 if (pDevice->LoopBackMode !=
4059 LM_PHY_LOOP_BACK_MODE) {
4063 BCM5700_PHY_LOCK(pUmDevice, flags);
4064 LM_DisablePhyLoopBack(pDevice);
4065 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4068 case NICE_CMD_ENABLE_EXT_LOOPBACK:
4069 if (pDevice->LoopBackMode != 0) {
4073 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
4074 if (nrq->nrq_speed != 1000)
4078 if ((nrq->nrq_speed != 1000) &&
4079 (nrq->nrq_speed != 100) &&
4080 (nrq->nrq_speed != 10)) {
4084 BCM5700_PHY_LOCK(pUmDevice, flags);
4085 LM_EnableExtLoopBack(pDevice, nrq->nrq_speed);
4086 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4089 case NICE_CMD_DISABLE_EXT_LOOPBACK:
4090 if (pDevice->LoopBackMode !=
4091 LM_EXT_LOOP_BACK_MODE) {
4095 BCM5700_PHY_LOCK(pUmDevice, flags);
4096 LM_DisableExtLoopBack(pDevice);
4097 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4100 case NICE_CMD_INTERRUPT_TEST:
4101 nrq->nrq_intr_test_result =
4102 b57_test_intr(pUmDevice);
4105 case NICE_CMD_LOOPBACK_TEST:
4107 switch (nrq->nrq_looptype) {
4108 case NICE_LOOPBACK_TESTTYPE_EXT:
4109 if ((nrq->nrq_loopspeed & ~NICE_LOOPBACK_TEST_SPEEDMASK) ||
4110 !(nrq->nrq_loopspeed & NICE_LOOPBACK_TEST_SPEEDMASK))
4112 switch (nrq->nrq_loopspeed) {
4113 case NICE_LOOPBACK_TEST_10MBPS:
4114 value = LM_LINE_SPEED_10MBPS;
4116 case NICE_LOOPBACK_TEST_100MBPS:
4117 value = LM_LINE_SPEED_100MBPS;
4119 case NICE_LOOPBACK_TEST_1000MBPS:
4120 value = LM_LINE_SPEED_1000MBPS;
4125 case NICE_LOOPBACK_TESTTYPE_MAC:
4126 case NICE_LOOPBACK_TESTTYPE_PHY:
4127 b57_suspend_chip(pUmDevice);
4128 value = b57_test_loopback(pUmDevice,
4129 nrq->nrq_looptype, value);
4130 b57_resume_chip(pUmDevice);
4135 /* A '1' indicates success */
4143 case NICE_CMD_KMALLOC_PHYS: {
4144 #if (LINUX_VERSION_CODE >= 0x020400)
4149 struct page *pg, *last_pg;
4151 for (i = 0; i < MAX_MEM2; i++) {
4152 if (pUmDevice->mem_size_list2[i] == 0)
4157 ptr = pci_alloc_consistent(pUmDevice->pdev,
4158 nrq->nrq_size, &mapping);
4162 pUmDevice->mem_size_list2[i] = nrq->nrq_size;
4163 pUmDevice->mem_list2[i] = ptr;
4164 pUmDevice->dma_list2[i] = mapping;
4166 /* put pci mapping at the beginning of buffer */
4167 *((__u64 *) ptr) = (__u64) mapping;
4169 /* Probably won't work on some architectures */
4170 /* get CPU mapping */
4171 cpu_pa = (__u64) virt_to_phys(ptr);
4172 pUmDevice->cpu_pa_list2[i] = cpu_pa;
4173 nrq->nrq_phys_addr_lo = (__u32) cpu_pa;
4174 nrq->nrq_phys_addr_hi = (__u32) (cpu_pa >> 32);
4176 pg = virt_to_page(ptr);
4177 last_pg = virt_to_page(ptr + nrq->nrq_size - 1);
4179 #if (LINUX_VERSION_CODE > 0x020500)
4180 SetPageReserved(pg);
4182 mem_map_reserve(pg);
4193 case NICE_CMD_KFREE_PHYS: {
4197 cpu_pa = (__u64) nrq->nrq_phys_addr_lo +
4198 ((__u64) nrq->nrq_phys_addr_hi << 32);
4199 for (i = 0; i < MAX_MEM2; i++) {
4200 if (pUmDevice->cpu_pa_list2[i] ==
4209 bcm5700_freemem2(pUmDevice, i);
4213 case NICE_CMD_SET_WRITE_PROTECT:
4214 if (nrq->nrq_write_protect)
4215 pDevice->Flags |= EEPROM_WP_FLAG;
4217 pDevice->Flags &= ~EEPROM_WP_FLAG;
4219 case NICE_CMD_GET_STATS_BLOCK: {
4220 PT3_STATS_BLOCK pStats =
4221 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4222 if (mm_copy_to_user(nrq->nrq_stats_useraddr,
4223 pStats, nrq->nrq_stats_size)) {
4228 case NICE_CMD_CLR_STATS_BLOCK: {
4230 PT3_STATS_BLOCK pStats =
4231 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4233 memset(pStats, 0, sizeof(T3_STATS_BLOCK));
4234 if (T3_ASIC_REV(pDevice->ChipRevId) ==
4238 for(j = 0x0300; j < 0x0b00; j = j + 4) {
4239 MEM_WR_OFFSET(pDevice, j, 0);
4249 #endif /* NICE_SUPPORT */
4252 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
4260 STATIC void bcm5700_do_rx_mode(struct net_device *dev)
4262 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4263 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4265 struct dev_mc_list *mclist;
4267 LM_MulticastClear(pDevice);
4268 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4269 i++, mclist = mclist->next) {
4270 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4272 if (dev->flags & IFF_ALLMULTI) {
4273 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4274 LM_SetReceiveMask(pDevice,
4275 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4278 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4279 LM_SetReceiveMask(pDevice,
4280 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4282 if (dev->flags & IFF_PROMISC) {
4283 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4284 LM_SetReceiveMask(pDevice,
4285 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4288 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4289 LM_SetReceiveMask(pDevice,
4290 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4295 STATIC void bcm5700_set_rx_mode(struct net_device *dev)
4297 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4298 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4300 struct dev_mc_list *mclist;
4301 unsigned long flags;
4303 BCM5700_PHY_LOCK(pUmDevice, flags);
4305 LM_MulticastClear(pDevice);
4306 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4307 i++, mclist = mclist->next) {
4308 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4310 if (dev->flags & IFF_ALLMULTI) {
4311 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4312 LM_SetReceiveMask(pDevice,
4313 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4316 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4317 LM_SetReceiveMask(pDevice,
4318 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4320 if (dev->flags & IFF_PROMISC) {
4321 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4322 LM_SetReceiveMask(pDevice,
4323 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4326 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4327 LM_SetReceiveMask(pDevice,
4328 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4331 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4335 * Set the hardware MAC address.
4337 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p)
4339 struct sockaddr *addr=p;
4340 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) dev->priv;
4341 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4343 if(is_valid_ether_addr(addr->sa_data)){
4345 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
4346 if (pUmDevice->opened)
4347 LM_SetMacAddress(pDevice, dev->dev_addr);
4353 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4354 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu)
4356 int pkt_size = new_mtu + ETHERNET_PACKET_HEADER_SIZE;
4357 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4358 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4359 unsigned long flags;
4362 if ((pkt_size < MIN_ETHERNET_PACKET_SIZE_NO_CRC) ||
4363 (pkt_size > MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC)) {
4367 if ( !(pDevice->Flags & JUMBO_CAPABLE_FLAG) &&
4368 (pkt_size > MAX_ETHERNET_PACKET_SIZE_NO_CRC) ) {
4372 if (pUmDevice->suspended)
4375 if (pUmDevice->opened && (new_mtu != dev->mtu) &&
4376 (pDevice->Flags & JUMBO_CAPABLE_FLAG)) {
4380 BCM5700_PHY_LOCK(pUmDevice, flags);
4382 netif_stop_queue(dev);
4383 bcm5700_shutdown(pUmDevice);
4384 bcm5700_freemem(dev);
4388 if (pkt_size < MAX_ETHERNET_PACKET_SIZE_NO_CRC) {
4389 pDevice->RxMtu = pDevice->TxMtu =
4390 MAX_ETHERNET_PACKET_SIZE_NO_CRC;
4393 pDevice->RxMtu = pDevice->TxMtu = pkt_size;
4396 if (dev->mtu <= 1514) {
4397 pDevice->RxJumboDescCnt = 0;
4399 else if (pDevice->Flags & JUMBO_CAPABLE_FLAG){
4400 pDevice->RxJumboDescCnt =
4401 rx_jumbo_desc_cnt[pUmDevice->index];
4403 pDevice->RxPacketDescCnt = pDevice->RxJumboDescCnt +
4404 pDevice->RxStdDescCnt;
4406 pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ +
4407 COMMON_CACHE_LINE_SIZE-1) & ~COMMON_CACHE_LINE_MASK;
4410 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4411 (dev->mtu > 1514) ) {
4412 if (dev->features & NETIF_F_TSO) {
4413 dev->features &= ~NETIF_F_TSO;
4414 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4420 LM_InitializeAdapter(pDevice);
4421 bcm5700_do_rx_mode(dev);
4422 bcm5700_set_vlan_mode(pUmDevice);
4423 bcm5700_init_counters(pUmDevice);
4424 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
4425 LM_SetMacAddress(pDevice, dev->dev_addr);
4427 netif_start_queue(dev);
4428 bcm5700_intr_on(pUmDevice);
4430 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4437 #if (LINUX_VERSION_CODE < 0x020300)
4439 bcm5700_probe(struct net_device *dev)
4441 int cards_found = 0;
4442 struct pci_dev *pdev = NULL;
4443 struct pci_device_id *pci_tbl;
4446 if ( ! pci_present())
4449 pci_tbl = bcm5700_pci_tbl;
4450 while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
4453 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &ssvid);
4454 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &ssid);
4455 for (idx = 0; pci_tbl[idx].vendor; idx++) {
4456 if ((pci_tbl[idx].vendor == PCI_ANY_ID ||
4457 pci_tbl[idx].vendor == pdev->vendor) &&
4458 (pci_tbl[idx].device == PCI_ANY_ID ||
4459 pci_tbl[idx].device == pdev->device) &&
4460 (pci_tbl[idx].subvendor == PCI_ANY_ID ||
4461 pci_tbl[idx].subvendor == ssvid) &&
4462 (pci_tbl[idx].subdevice == PCI_ANY_ID ||
4463 pci_tbl[idx].subdevice == ssid))
4469 if (pci_tbl[idx].vendor == 0)
4473 if (bcm5700_init_one(pdev, &pci_tbl[idx]) == 0)
4477 return cards_found ? 0 : -ENODEV;
4481 int init_module(void)
4483 return bcm5700_probe(NULL);
4486 void cleanup_module(void)
4488 struct net_device *next_dev;
4489 PUM_DEVICE_BLOCK pUmDevice;
4492 bcm5700_proc_remove_notifier();
4494 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
4495 while (root_tigon3_dev) {
4496 pUmDevice = (PUM_DEVICE_BLOCK)root_tigon3_dev->priv;
4498 bcm5700_proc_remove_dev(root_tigon3_dev);
4500 next_dev = pUmDevice->next_module;
4501 unregister_netdev(root_tigon3_dev);
4502 if (pUmDevice->lm_dev.pMappedMemBase)
4503 iounmap(pUmDevice->lm_dev.pMappedMemBase);
4504 #if (LINUX_VERSION_CODE < 0x020600)
4505 kfree(root_tigon3_dev);
4507 free_netdev(root_tigon3_dev);
4509 root_tigon3_dev = next_dev;
4512 unregister_ioctl32_conversion(SIOCNICE);
4517 #else /* LINUX_VERSION_CODE < 0x020300 */
4519 #if (LINUX_VERSION_CODE >= 0x020406)
4520 static int bcm5700_suspend (struct pci_dev *pdev, u32 state)
4522 static void bcm5700_suspend (struct pci_dev *pdev)
4525 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4526 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4527 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4529 if (!netif_running(dev))
4530 #if (LINUX_VERSION_CODE >= 0x020406)
4536 netif_device_detach (dev);
4537 bcm5700_shutdown(pUmDevice);
4539 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
4541 /* pci_power_off(pdev, -1);*/
4542 #if (LINUX_VERSION_CODE >= 0x020406)
4548 #if (LINUX_VERSION_CODE >= 0x020406)
4549 static int bcm5700_resume(struct pci_dev *pdev)
4551 static void bcm5700_resume(struct pci_dev *pdev)
4554 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4555 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4556 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4558 if (!netif_running(dev))
4559 #if (LINUX_VERSION_CODE >= 0x020406)
4564 /* pci_power_on(pdev);*/
4565 netif_device_attach(dev);
4566 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
4567 MM_InitializeUmPackets(pDevice);
4569 #if (LINUX_VERSION_CODE >= 0x020406)
4575 static struct pci_driver bcm5700_pci_driver = {
4576 name: bcm5700_driver,
4577 id_table: bcm5700_pci_tbl,
4578 probe: bcm5700_init_one,
4579 remove: __devexit_p(bcm5700_remove_one),
4580 suspend: bcm5700_suspend,
4581 resume: bcm5700_resume,
4585 static int __init bcm5700_init_module (void)
4587 return pci_module_init(&bcm5700_pci_driver);
4591 static void __exit bcm5700_cleanup_module (void)
4594 bcm5700_proc_remove_notifier();
4596 pci_unregister_driver(&bcm5700_pci_driver);
4600 module_init(bcm5700_init_module);
4601 module_exit(bcm5700_cleanup_module);
4610 #ifdef BCM_NAPI_RXPOLL
4612 MM_ScheduleRxPoll(LM_DEVICE_BLOCK *pDevice)
4614 struct net_device *dev = ((UM_DEVICE_BLOCK *) pDevice)->dev;
4616 if (netif_rx_schedule_prep(dev)) {
4617 __netif_rx_schedule(dev);
4618 return LM_STATUS_SUCCESS;
4620 return LM_STATUS_FAILURE;
4625 MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4626 LM_UINT16 *pValue16)
4628 UM_DEVICE_BLOCK *pUmDevice;
4630 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4631 pci_read_config_word(pUmDevice->pdev, Offset, (u16 *) pValue16);
4632 return LM_STATUS_SUCCESS;
4636 MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4637 LM_UINT32 *pValue32)
4639 UM_DEVICE_BLOCK *pUmDevice;
4641 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4642 pci_read_config_dword(pUmDevice->pdev, Offset, (u32 *) pValue32);
4643 return LM_STATUS_SUCCESS;
4647 MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4650 UM_DEVICE_BLOCK *pUmDevice;
4652 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4653 pci_write_config_word(pUmDevice->pdev, Offset, Value16);
4654 return LM_STATUS_SUCCESS;
4658 MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4661 UM_DEVICE_BLOCK *pUmDevice;
4663 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4664 pci_write_config_dword(pUmDevice->pdev, Offset, Value32);
4665 return LM_STATUS_SUCCESS;
4669 MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4670 PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
4674 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4677 pvirt = pci_alloc_consistent(pUmDevice->pdev, BlockSize,
4680 return LM_STATUS_FAILURE;
4682 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4683 pUmDevice->dma_list[pUmDevice->mem_list_num] = mapping;
4684 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = BlockSize;
4685 memset(pvirt, 0, BlockSize);
4686 *pMemoryBlockVirt = (PLM_VOID) pvirt;
4687 MM_SetAddr(pMemoryBlockPhy, mapping);
4688 return LM_STATUS_SUCCESS;
4692 MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4693 PLM_VOID *pMemoryBlockVirt)
4696 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4699 /* Maximum in slab.c */
4700 if (BlockSize > 131072) {
4701 goto MM_Alloc_error;
4704 pvirt = kmalloc(BlockSize,GFP_ATOMIC);
4706 goto MM_Alloc_error;
4708 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4709 pUmDevice->dma_list[pUmDevice->mem_list_num] = 0;
4710 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = 0;
4711 /* mem_size_list[i] == 0 indicates that the memory should be freed */
4713 memset(pvirt, 0, BlockSize);
4714 *pMemoryBlockVirt = pvirt;
4715 return LM_STATUS_SUCCESS;
4718 printk(KERN_WARNING "%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice->dev->name);
4719 return LM_STATUS_FAILURE;
4723 MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
4725 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4727 pDevice->pMappedMemBase = ioremap_nocache(
4728 pci_resource_start(pUmDevice->pdev, 0), sizeof(T3_STD_MEM_MAP));
4729 if (pDevice->pMappedMemBase == 0)
4730 return LM_STATUS_FAILURE;
4732 return LM_STATUS_SUCCESS;
4736 MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
4739 struct sk_buff *skb;
4740 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4741 PUM_PACKET pUmPacket;
4744 for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
4745 pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
4746 pUmPacket = (PUM_PACKET) pPacket;
4748 printk(KERN_DEBUG "Bad RxPacketFreeQ\n");
4750 if (pUmPacket->skbuff == 0) {
4751 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2);
4753 pUmPacket->skbuff = 0;
4755 &pUmDevice->rx_out_of_buf_q.Container,
4759 pUmPacket->skbuff = skb;
4760 skb->dev = pUmDevice->dev;
4761 skb_reserve(skb, pUmDevice->rx_buf_align);
4763 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
4765 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
4766 /* reallocate buffers in the ISR */
4767 pUmDevice->rx_buf_repl_thresh = 0;
4768 pUmDevice->rx_buf_repl_panic_thresh = 0;
4769 pUmDevice->rx_buf_repl_isr_limit = 0;
4772 pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 8;
4773 pUmDevice->rx_buf_repl_panic_thresh =
4774 pDevice->RxPacketDescCnt * 7 / 8;
4776 /* This limits the time spent in the ISR when the receiver */
4777 /* is in a steady state of being overrun. */
4778 pUmDevice->rx_buf_repl_isr_limit = pDevice->RxPacketDescCnt / 8;
4780 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4781 if (pDevice->RxJumboDescCnt != 0) {
4782 if (pUmDevice->rx_buf_repl_thresh >=
4783 pDevice->RxJumboDescCnt) {
4785 pUmDevice->rx_buf_repl_thresh =
4786 pUmDevice->rx_buf_repl_panic_thresh =
4787 pDevice->RxJumboDescCnt - 1;
4789 if (pUmDevice->rx_buf_repl_thresh >=
4790 pDevice->RxStdDescCnt) {
4792 pUmDevice->rx_buf_repl_thresh =
4793 pUmDevice->rx_buf_repl_panic_thresh =
4794 pDevice->RxStdDescCnt - 1;
4799 return LM_STATUS_SUCCESS;
4803 MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
4805 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4806 int index = pUmDevice->index;
4807 struct net_device *dev = pUmDevice->dev;
4809 if (index >= MAX_UNITS)
4810 return LM_STATUS_SUCCESS;
4812 #if LINUX_KERNEL_VERSION < 0x0020609
4814 bcm5700_validate_param_range(pUmDevice, &auto_speed[index], "auto_speed",
4816 if (auto_speed[index] == 0)
4817 pDevice->DisableAutoNeg = TRUE;
4819 pDevice->DisableAutoNeg = FALSE;
4821 if (line_speed[index] == 0) {
4822 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4823 pDevice->DisableAutoNeg = FALSE;
4826 bcm5700_validate_param_range(pUmDevice, &full_duplex[index],
4827 "full_duplex", 0, 1, 1);
4828 if (full_duplex[index]) {
4829 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
4832 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_HALF;
4835 if (line_speed[index] == 1000) {
4836 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
4837 if (pDevice->PhyFlags & PHY_NO_GIGABIT) {
4838 pDevice->RequestedLineSpeed =
4839 LM_LINE_SPEED_100MBPS;
4840 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver, index);
4843 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
4844 !full_duplex[index]) {
4845 printk(KERN_WARNING "%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver, index);
4846 pDevice->RequestedDuplexMode =
4847 LM_DUPLEX_MODE_FULL;
4850 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
4851 !auto_speed[index] && !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
4852 printk(KERN_WARNING "%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver, index);
4853 pDevice->DisableAutoNeg = FALSE;
4857 else if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
4858 (pDevice->PhyFlags & PHY_IS_FIBER)){
4859 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4860 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
4861 pDevice->DisableAutoNeg = FALSE;
4862 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver, index, line_speed[index]);
4864 else if (line_speed[index] == 100) {
4866 pDevice->RequestedLineSpeed = LM_LINE_SPEED_100MBPS;
4868 else if (line_speed[index] == 10) {
4870 pDevice->RequestedLineSpeed = LM_LINE_SPEED_10MBPS;
4873 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4874 pDevice->DisableAutoNeg = FALSE;
4875 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver, index, line_speed[index]);
4880 #endif /* LINUX_KERNEL_VERSION */
4882 /* This is an unmanageable switch nic and will have link problems if
4885 if(pDevice->SubsystemVendorId==0x103c && pDevice->SubsystemId==0x3226)
4887 if(pDevice->RequestedLineSpeed != LM_LINE_SPEED_AUTO)
4889 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n",
4890 bcm5700_driver, index, line_speed[index]);
4892 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4893 pDevice->DisableAutoNeg = FALSE;
4896 #if LINUX_KERNEL_VERSION < 0x0020609
4898 pDevice->FlowControlCap = 0;
4899 bcm5700_validate_param_range(pUmDevice, &rx_flow_control[index],
4900 "rx_flow_control", 0, 1, 0);
4901 if (rx_flow_control[index] != 0) {
4902 pDevice->FlowControlCap |= LM_FLOW_CONTROL_RECEIVE_PAUSE;
4904 bcm5700_validate_param_range(pUmDevice, &tx_flow_control[index],
4905 "tx_flow_control", 0, 1, 0);
4906 if (tx_flow_control[index] != 0) {
4907 pDevice->FlowControlCap |= LM_FLOW_CONTROL_TRANSMIT_PAUSE;
4909 bcm5700_validate_param_range(pUmDevice, &auto_flow_control[index],
4910 "auto_flow_control", 0, 1, 0);
4911 if (auto_flow_control[index] != 0) {
4912 if (pDevice->DisableAutoNeg == FALSE) {
4914 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
4915 if ((tx_flow_control[index] == 0) &&
4916 (rx_flow_control[index] == 0)) {
4918 pDevice->FlowControlCap |=
4919 LM_FLOW_CONTROL_TRANSMIT_PAUSE |
4920 LM_FLOW_CONTROL_RECEIVE_PAUSE;
4925 if (dev->mtu > 1500) {
4927 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4928 (dev->features & NETIF_F_TSO)) {
4929 dev->features &= ~NETIF_F_TSO;
4930 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4933 pDevice->RxMtu = dev->mtu + 14;
4936 if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) &&
4937 !(pDevice->Flags & BCM5788_FLAG)) {
4938 pDevice->Flags |= USE_TAGGED_STATUS_FLAG;
4939 pUmDevice->timer_interval = HZ;
4940 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) &&
4941 (pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
4942 pUmDevice->timer_interval = HZ/4;
4946 pUmDevice->timer_interval = HZ/10;
4949 bcm5700_validate_param_range(pUmDevice, &tx_pkt_desc_cnt[index],
4950 "tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT-1, TX_DESC_CNT);
4951 pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[index];
4952 bcm5700_validate_param_range(pUmDevice, &rx_std_desc_cnt[index],
4953 "rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT-1,
4955 pDevice->RxStdDescCnt = rx_std_desc_cnt[index];
4957 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4958 bcm5700_validate_param_range(pUmDevice, &rx_jumbo_desc_cnt[index],
4959 "rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT-1,
4962 if (mtu[index] <= 1514)
4963 pDevice->RxJumboDescCnt = 0;
4964 else if(!T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
4965 pDevice->RxJumboDescCnt = rx_jumbo_desc_cnt[index];
4970 bcm5700_validate_param_range(pUmDevice, &adaptive_coalesce[index],
4971 "adaptive_coalesce", 0, 1, 1);
4972 #ifdef BCM_NAPI_RXPOLL
4973 if (adaptive_coalesce[index]) {
4974 printk(KERN_WARNING "%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver, index);
4975 adaptive_coalesce[index] = 0;
4979 pUmDevice->adaptive_coalesce = adaptive_coalesce[index];
4980 if (!pUmDevice->adaptive_coalesce) {
4981 bcm5700_validate_param_range(pUmDevice,
4982 &rx_coalesce_ticks[index], "rx_coalesce_ticks", 0,
4983 MAX_RX_COALESCING_TICKS, RX_COAL_TK);
4984 if ((rx_coalesce_ticks[index] == 0) &&
4985 (rx_max_coalesce_frames[index] == 0)) {
4987 printk(KERN_WARNING "%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
4988 bcm5700_driver, index, RX_COAL_TK, RX_COAL_FM);
4990 rx_coalesce_ticks[index] = RX_COAL_TK;
4991 rx_max_coalesce_frames[index] = RX_COAL_FM;
4993 pDevice->RxCoalescingTicks = pUmDevice->rx_curr_coalesce_ticks =
4994 rx_coalesce_ticks[index];
4995 #ifdef BCM_NAPI_RXPOLL
4996 pDevice->RxCoalescingTicksDuringInt = rx_coalesce_ticks[index];
4999 bcm5700_validate_param_range(pUmDevice,
5000 &rx_max_coalesce_frames[index],
5001 "rx_max_coalesce_frames", 0,
5002 MAX_RX_MAX_COALESCED_FRAMES, RX_COAL_FM);
5004 pDevice->RxMaxCoalescedFrames =
5005 pUmDevice->rx_curr_coalesce_frames =
5006 rx_max_coalesce_frames[index];
5007 #ifdef BCM_NAPI_RXPOLL
5008 pDevice->RxMaxCoalescedFramesDuringInt =
5009 rx_max_coalesce_frames[index];
5012 bcm5700_validate_param_range(pUmDevice,
5013 &tx_coalesce_ticks[index], "tx_coalesce_ticks", 0,
5014 MAX_TX_COALESCING_TICKS, TX_COAL_TK);
5015 if ((tx_coalesce_ticks[index] == 0) &&
5016 (tx_max_coalesce_frames[index] == 0)) {
5018 printk(KERN_WARNING "%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5019 bcm5700_driver, index, TX_COAL_TK, TX_COAL_FM);
5021 tx_coalesce_ticks[index] = TX_COAL_TK;
5022 tx_max_coalesce_frames[index] = TX_COAL_FM;
5024 pDevice->TxCoalescingTicks = tx_coalesce_ticks[index];
5025 bcm5700_validate_param_range(pUmDevice,
5026 &tx_max_coalesce_frames[index],
5027 "tx_max_coalesce_frames", 0,
5028 MAX_TX_MAX_COALESCED_FRAMES, TX_COAL_FM);
5029 pDevice->TxMaxCoalescedFrames = tx_max_coalesce_frames[index];
5030 pUmDevice->tx_curr_coalesce_frames =
5031 pDevice->TxMaxCoalescedFrames;
5033 bcm5700_validate_param_range(pUmDevice,
5034 &stats_coalesce_ticks[index], "stats_coalesce_ticks",
5035 0, MAX_STATS_COALESCING_TICKS, ST_COAL_TK);
5036 if (adaptive_coalesce[index]) {
5037 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver, index);
5039 if ((stats_coalesce_ticks[index] > 0) &&
5040 (stats_coalesce_ticks[index] < 100)) {
5041 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver, index, (unsigned int) stats_coalesce_ticks[index]);
5042 stats_coalesce_ticks[index] = 100;
5043 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5044 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5049 pUmDevice->rx_curr_coalesce_frames = RX_COAL_FM;
5050 pUmDevice->rx_curr_coalesce_ticks = RX_COAL_TK;
5051 pUmDevice->tx_curr_coalesce_frames = TX_COAL_FM;
5055 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
5056 unsigned int tmpvar;
5058 tmpvar = pDevice->StatsCoalescingTicks / BCM_TIMER_GRANULARITY;
5061 * If the result is zero, the request is too demanding.
5067 pDevice->StatsCoalescingTicks = tmpvar * BCM_TIMER_GRANULARITY;
5069 pUmDevice->statstimer_interval = tmpvar;
5073 bcm5700_validate_param_range(pUmDevice, &enable_wol[index],
5074 "enable_wol", 0, 1, 0);
5075 if (enable_wol[index]) {
5076 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
5077 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
5080 #ifdef INCLUDE_TBI_SUPPORT
5081 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
5082 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) ||
5083 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) {
5084 /* just poll since we have hardware autoneg. in 5704 */
5085 pDevice->TbiFlags |= TBI_PURE_POLLING_FLAG;
5088 pDevice->TbiFlags |= TBI_POLLING_INTR_FLAG;
5092 bcm5700_validate_param_range(pUmDevice, &scatter_gather[index],
5093 "scatter_gather", 0, 1, 1);
5094 bcm5700_validate_param_range(pUmDevice, &tx_checksum[index],
5095 "tx_checksum", 0, 1, 1);
5096 bcm5700_validate_param_range(pUmDevice, &rx_checksum[index],
5097 "rx_checksum", 0, 1, 1);
5098 if (!(pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
5099 if (tx_checksum[index] || rx_checksum[index]) {
5101 pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
5102 printk(KERN_WARNING "%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver, index);
5106 if (rx_checksum[index]) {
5107 pDevice->TaskToOffload |=
5108 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
5109 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
5111 if (tx_checksum[index]) {
5112 pDevice->TaskToOffload |=
5113 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
5114 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
5115 pDevice->Flags |= NO_TX_PSEUDO_HDR_CSUM_FLAG;
5119 bcm5700_validate_param_range(pUmDevice, &enable_tso[index],
5120 "enable_tso", 0, 1, 1);
5122 /* Always enable TSO firmware if supported */
5123 /* This way we can turn it on or off on the fly */
5124 if (pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TCP_SEGMENTATION)
5126 pDevice->TaskToOffload |=
5127 LM_TASK_OFFLOAD_TCP_SEGMENTATION;
5129 if (enable_tso[index] &&
5130 !(pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION))
5132 printk(KERN_WARNING "%s-%d: TSO not available on this NIC\n", bcm5700_driver, index);
5136 bcm5700_validate_param_range(pUmDevice, &vlan_tag_mode[index],
5137 "vlan_strip_mode", 0, 2, 0);
5138 pUmDevice->vlan_tag_mode = vlan_tag_mode[index];
5140 pUmDevice->vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
5143 #endif /* LINUX_KERNEL_VERSION */
5145 #ifdef BCM_NIC_SEND_BD
5146 bcm5700_validate_param_range(pUmDevice, &nic_tx_bd[index], "nic_tx_bd",
5148 if (nic_tx_bd[index])
5149 pDevice->Flags |= NIC_SEND_BD_FLAG;
5150 if ((pDevice->Flags & ENABLE_PCIX_FIX_FLAG) ||
5151 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5705)) {
5152 if (pDevice->Flags & NIC_SEND_BD_FLAG) {
5153 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
5154 printk(KERN_WARNING "%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver, index);
5158 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5159 bcm5700_validate_param_range(pUmDevice, &disable_msi[pUmDevice->index],
5160 "disable_msi", 0, 1, 0);
5163 bcm5700_validate_param_range(pUmDevice, &delay_link[index],
5164 "delay_link", 0, 1, 0);
5166 bcm5700_validate_param_range(pUmDevice, &disable_d3hot[index],
5167 "disable_d3hot", 0, 1, 0);
5168 if (disable_d3hot[index]) {
5171 if (enable_wol[index]) {
5172 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
5173 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
5174 printk(KERN_WARNING "%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver, index);
5177 pDevice->Flags |= DISABLE_D3HOT_FLAG;
5180 return LM_STATUS_SUCCESS;
5184 MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
5186 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5188 PUM_PACKET pUmPacket;
5189 struct sk_buff *skb;
5191 int vlan_tag_size = 0;
5193 if (pDevice->ReceiveMask & LM_KEEP_VLAN_TAG)
5197 pPacket = (PLM_PACKET)
5198 QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
5201 pUmPacket = (PUM_PACKET) pPacket;
5202 #if ! defined(NO_PCI_UNMAP)
5203 pci_unmap_single(pUmDevice->pdev,
5204 pci_unmap_addr(pUmPacket, map[0]),
5205 pPacket->u.Rx.RxBufferSize,
5206 PCI_DMA_FROMDEVICE);
5208 if ((pPacket->PacketStatus != LM_STATUS_SUCCESS) ||
5209 ((size = pPacket->PacketSize) >
5210 (pDevice->RxMtu + vlan_tag_size))) {
5214 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5216 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5218 pUmDevice->rx_misc_errors++;
5221 skb = pUmPacket->skbuff;
5224 skb->protocol = eth_type_trans(skb, skb->dev);
5225 if (size > pDevice->RxMtu) {
5226 /* Make sure we have a valid VLAN tag */
5227 if (htons(skb->protocol) != 0x8100) {
5228 dev_kfree_skb_irq(skb);
5229 pUmDevice->rx_misc_errors++;
5233 if ((pPacket->Flags & RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD) &&
5234 (pDevice->TaskToOffload &
5235 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM)) {
5236 if (pPacket->u.Rx.TcpUdpChecksum == 0xffff) {
5238 skb->ip_summed = CHECKSUM_UNNECESSARY;
5240 pUmDevice->rx_good_chksum_count++;
5244 skb->ip_summed = CHECKSUM_NONE;
5245 pUmDevice->rx_bad_chksum_count++;
5249 skb->ip_summed = CHECKSUM_NONE;
5252 if( pUmDevice->nice_rx ) {
5253 vlan_tag_t *vlan_tag;
5255 vlan_tag = (vlan_tag_t *) &skb->cb[0];
5256 if (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG) {
5257 vlan_tag->signature = 0x7777;
5258 vlan_tag->tag = pPacket->VlanTag;
5261 vlan_tag->signature = 0;
5263 pUmDevice->nice_rx(skb, pUmDevice->nice_ctx);
5269 if (pUmDevice->vlgrp &&
5270 (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG)) {
5272 #ifdef BCM_NAPI_RXPOLL
5273 vlan_hwaccel_receive_skb(skb, pUmDevice->vlgrp,
5276 vlan_hwaccel_rx(skb, pUmDevice->vlgrp,
5283 #ifdef BCM_NAPI_RXPOLL
5284 netif_receive_skb(skb);
5290 pUmDevice->dev->last_rx = jiffies;
5294 pUmPacket->skbuff = 0;
5295 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5297 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2);
5299 pUmPacket->skbuff = 0;
5300 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5303 pUmPacket->skbuff = skb;
5304 skb->dev = pUmDevice->dev;
5305 skb_reserve(skb, pUmDevice->rx_buf_align);
5306 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5310 return LM_STATUS_SUCCESS;
5314 MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5316 PUM_PACKET pUmPacket = (PUM_PACKET) pPacket;
5317 struct sk_buff *skb = pUmPacket->skbuff;
5318 struct sk_buff *nskb;
5319 #if ! defined(NO_PCI_UNMAP)
5320 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5322 pci_unmap_single(pUmDevice->pdev,
5323 pci_unmap_addr(pUmPacket, map[0]),
5324 pci_unmap_len(pUmPacket, map_len[0]),
5330 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5331 pci_unmap_page(pUmDevice->pdev,
5332 pci_unmap_addr(pUmPacket, map[i + 1]),
5333 pci_unmap_len(pUmPacket, map_len[i + 1]),
5339 if ((nskb = skb_copy(skb, GFP_ATOMIC))) {
5340 pUmPacket->lm_packet.u.Tx.FragCount = 1;
5342 pUmPacket->skbuff = nskb;
5343 return LM_STATUS_SUCCESS;
5346 pUmPacket->skbuff = 0;
5347 return LM_STATUS_FAILURE;
5350 /* Returns 1 if not all buffers are allocated */
5352 replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max)
5355 PUM_PACKET pUmPacket;
5356 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
5357 struct sk_buff *skb;
5362 while ((pUmPacket = (PUM_PACKET)
5363 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) {
5364 pPacket = (PLM_PACKET) pUmPacket;
5365 if (pUmPacket->skbuff) {
5366 /* reuse an old skb */
5367 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5371 if ((skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2)) == 0) {
5372 QQ_PushHead(&pUmDevice->rx_out_of_buf_q.Container,
5377 pUmPacket->skbuff = skb;
5378 skb->dev = pUmDevice->dev;
5379 skb_reserve(skb, pUmDevice->rx_buf_align);
5380 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5384 if (alloc_cnt >= max)
5388 if (queue_rx || pDevice->QueueAgain) {
5389 LM_QueueRxPackets(pDevice);
5395 MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
5397 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5399 PUM_PACKET pUmPacket;
5400 struct sk_buff *skb;
5401 #if ! defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
5406 pPacket = (PLM_PACKET)
5407 QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
5410 pUmPacket = (PUM_PACKET) pPacket;
5411 skb = pUmPacket->skbuff;
5412 #if ! defined(NO_PCI_UNMAP)
5413 pci_unmap_single(pUmDevice->pdev,
5414 pci_unmap_addr(pUmPacket, map[0]),
5415 pci_unmap_len(pUmPacket, map_len[0]),
5418 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5419 pci_unmap_page(pUmDevice->pdev,
5420 pci_unmap_addr(pUmPacket, map[i + 1]),
5421 pci_unmap_len(pUmPacket, map_len[i + 1]),
5426 dev_kfree_skb_irq(skb);
5427 pUmPacket->skbuff = 0;
5428 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
5430 if (pUmDevice->tx_full) {
5431 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) >=
5432 (pDevice->TxPacketDescCnt >> 1)) {
5434 pUmDevice->tx_full = 0;
5435 netif_wake_queue(pUmDevice->dev);
5438 return LM_STATUS_SUCCESS;
5442 MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
5444 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5445 struct net_device *dev = pUmDevice->dev;
5446 LM_FLOW_CONTROL flow_control;
5449 if (!pUmDevice->opened)
5450 return LM_STATUS_SUCCESS;
5452 if (!pUmDevice->suspended) {
5453 if (Status == LM_STATUS_LINK_DOWN) {
5454 netif_carrier_off(dev);
5456 else if (Status == LM_STATUS_LINK_ACTIVE) {
5457 netif_carrier_on(dev);
5461 if (pUmDevice->delayed_link_ind > 0) {
5462 pUmDevice->delayed_link_ind = 0;
5463 if (Status == LM_STATUS_LINK_DOWN) {
5464 printk(KERN_ERR "%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name);
5466 else if (Status == LM_STATUS_LINK_ACTIVE) {
5467 printk(KERN_INFO "%s: %s NIC Link is UP, ", bcm5700_driver, dev->name);
5471 if (Status == LM_STATUS_LINK_DOWN) {
5472 printk(KERN_ERR "%s: %s NIC Link is Down\n", bcm5700_driver, dev->name);
5474 else if (Status == LM_STATUS_LINK_ACTIVE) {
5475 printk(KERN_INFO "%s: %s NIC Link is Up, ", bcm5700_driver, dev->name);
5479 if (Status == LM_STATUS_LINK_ACTIVE) {
5480 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
5482 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
5484 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
5487 printk("%d Mbps ", speed);
5489 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
5490 printk("full duplex");
5492 printk("half duplex");
5494 flow_control = pDevice->FlowControl &
5495 (LM_FLOW_CONTROL_RECEIVE_PAUSE |
5496 LM_FLOW_CONTROL_TRANSMIT_PAUSE);
5498 if (flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE) {
5499 printk(", receive ");
5500 if (flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
5501 printk("& transmit ");
5504 printk(", transmit ");
5506 printk("flow control ON");
5510 return LM_STATUS_SUCCESS;
5514 MM_UnmapRxDma(LM_DEVICE_BLOCK *pDevice, LM_PACKET *pPacket)
5516 #if ! defined(NO_PCI_UNMAP)
5517 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5518 UM_PACKET *pUmPacket = (UM_PACKET *) pPacket;
5520 if (!pUmPacket->skbuff)
5523 pci_unmap_single(pUmDevice->pdev,
5524 pci_unmap_addr(pUmPacket, map[0]),
5525 pPacket->u.Rx.RxBufferSize,
5526 PCI_DMA_FROMDEVICE);
5531 MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5533 PUM_PACKET pUmPacket;
5534 struct sk_buff *skb;
5537 return LM_STATUS_SUCCESS;
5538 pUmPacket = (PUM_PACKET) pPacket;
5539 if ((skb = pUmPacket->skbuff)) {
5540 /* DMA address already unmapped */
5543 pUmPacket->skbuff = 0;
5544 return LM_STATUS_SUCCESS;
5548 MM_Sleep(LM_DEVICE_BLOCK *pDevice, LM_UINT32 msec)
5550 current->state = TASK_INTERRUPTIBLE;
5551 if (schedule_timeout(HZ * msec / 1000) != 0) {
5552 return LM_STATUS_FAILURE;
5554 if (signal_pending(current))
5555 return LM_STATUS_FAILURE;
5557 return LM_STATUS_SUCCESS;
5561 bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice)
5563 LM_DEVICE_BLOCK *pDevice = (LM_DEVICE_BLOCK *) pUmDevice;
5565 bcm5700_intr_off(pUmDevice);
5566 netif_carrier_off(pUmDevice->dev);
5568 tasklet_kill(&pUmDevice->tasklet);
5570 bcm5700_poll_wait(pUmDevice);
5574 pDevice->InitDone = 0;
5575 bcm5700_free_remaining_rx_bufs(pUmDevice);
5579 bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice)
5581 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
5582 UM_PACKET *pUmPacket;
5585 cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
5586 for (i = 0; i < cnt; i++) {
5588 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container))
5591 MM_UnmapRxDma(pDevice, (LM_PACKET *) pUmPacket);
5592 MM_FreeRxBuffer(pDevice, &pUmPacket->lm_packet);
5593 QQ_PushTail(&pDevice->RxPacketFreeQ.Container,
5600 bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
5601 char *param_name, int min, int max, int deflt)
5603 if (((unsigned int) *param < (unsigned int) min) ||
5604 ((unsigned int) *param > (unsigned int) max)) {
5606 printk(KERN_WARNING "%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver, pUmDevice->index, param_name, (unsigned int) *param, (unsigned int) deflt);
5612 bcm5700_find_peer(struct net_device *dev)
5614 struct net_device *tmp_dev;
5615 UM_DEVICE_BLOCK *pUmDevice, *pUmTmp;
5616 LM_DEVICE_BLOCK *pDevice;
5619 pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
5620 pDevice = &pUmDevice->lm_dev;
5621 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
5622 tmp_dev = root_tigon3_dev;
5624 pUmTmp = (PUM_DEVICE_BLOCK) tmp_dev->priv;
5625 if ((tmp_dev != dev) &&
5626 (pUmDevice->pdev->bus->number ==
5627 pUmTmp->pdev->bus->number) &&
5628 PCI_SLOT(pUmDevice->pdev->devfn) ==
5629 PCI_SLOT(pUmTmp->pdev->devfn)) {
5633 tmp_dev = pUmTmp->next_module;
5640 MM_FindPeerDev(LM_DEVICE_BLOCK *pDevice)
5642 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5643 struct net_device *dev = pUmDevice->dev;
5644 struct net_device *peer_dev;
5646 peer_dev = bcm5700_find_peer(dev);
5649 return ((LM_DEVICE_BLOCK *) peer_dev->priv);
5652 int MM_FindCapability(LM_DEVICE_BLOCK *pDevice, int capability)
5654 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5655 return (pci_find_capability(pUmDevice->pdev, capability));
5658 #if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
5660 poll_bcm5700(struct net_device *dev)
5662 UM_DEVICE_BLOCK *pUmDevice = dev->priv;
5664 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
5666 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5667 #ifdef BCM_NAPI_RXPOLL
5668 if (dev->poll_list.prev) {
5671 bcm5700_poll(dev, &budget);
5678 disable_irq(pUmDevice->pdev->irq);
5679 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5680 enable_irq(pUmDevice->pdev->irq);