1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2006 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
14 char bcm5700_driver[] = "bcm5700";
15 char bcm5700_version[] = "8.3.17b";
16 char bcm5700_date[] = "(02/21/06)";
21 /* A few user-configurable values. */
24 /* Used to pass the full-duplex flag, etc. */
25 static int line_speed[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
26 static int auto_speed[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
27 static int full_duplex[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
28 static int rx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
29 static int tx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
30 static int auto_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
31 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
32 static int mtu[MAX_UNITS] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500}; /* Jumbo MTU for interfaces. */
34 static int tx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
35 static int rx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
36 static int scatter_gather[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
38 #define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
39 static unsigned int tx_pkt_desc_cnt[MAX_UNITS] =
40 {TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
41 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
42 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
45 #define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
46 static unsigned int rx_std_desc_cnt[MAX_UNITS] =
47 {RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
48 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
49 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
52 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
53 #define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
54 static unsigned int rx_jumbo_desc_cnt[MAX_UNITS] =
55 {JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
56 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
57 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
62 #ifdef BCM_NAPI_RXPOLL
63 static unsigned int adaptive_coalesce[MAX_UNITS] =
64 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
66 static unsigned int adaptive_coalesce[MAX_UNITS] =
67 {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
70 #define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
71 static unsigned int rx_coalesce_ticks[MAX_UNITS] =
72 {RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
73 RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
74 RX_COAL_TK,RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
77 #define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
78 static unsigned int rx_max_coalesce_frames[MAX_UNITS] =
79 {RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
80 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
81 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
84 #define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
85 static unsigned int tx_coalesce_ticks[MAX_UNITS] =
86 {TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
87 TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
88 TX_COAL_TK,TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
91 #define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
92 static unsigned int tx_max_coalesce_frames[MAX_UNITS] =
93 {TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
94 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
95 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
98 #define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
99 static unsigned int stats_coalesce_ticks[MAX_UNITS] =
100 {ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
101 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
102 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
107 static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
110 static int enable_tso[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
112 #ifdef BCM_NIC_SEND_BD
113 static int nic_tx_bd[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
116 static int vlan_tag_mode[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
118 static int delay_link[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
119 static int disable_d3hot[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
121 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
122 static int disable_msi[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
123 static int bcm_msi_chipset_bug = 0;
126 #define BCM_TIMER_GRANULARITY (1000000 / HZ)
128 /* Operational parameters that usually are not changed. */
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT (2*HZ)
132 #define BCM_TX_TIMEOUT (5*HZ)
134 #if (LINUX_VERSION_CODE < 0x02030d)
135 #define pci_resource_start(dev, bar) (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
136 #elif (LINUX_VERSION_CODE < 0x02032b)
137 #define pci_resource_start(dev, bar) (dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
140 #if (LINUX_VERSION_CODE < 0x02032b)
141 #define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
142 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
143 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
145 static inline void netif_start_queue(struct net_device *dev)
152 #define netif_queue_stopped(dev) dev->tbusy
153 #define netif_running(dev) dev->start
155 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
157 queue_task(tasklet, &tq_immediate);
158 mark_bh(IMMEDIATE_BH);
161 static inline void tasklet_init(struct tasklet_struct *tasklet,
162 void (*func)(unsigned long),
165 tasklet->next = NULL;
167 tasklet->routine = (void (*)(void *))func;
168 tasklet->data = (void *)data;
171 #define tasklet_kill(tasklet)
175 #if (LINUX_VERSION_CODE < 0x020300)
176 struct pci_device_id {
177 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
178 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
179 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
180 unsigned long driver_data; /* Data private to the driver */
185 #define pci_set_drvdata(pdev, dev)
186 #define pci_get_drvdata(pdev) 0
188 #define pci_enable_device(pdev) 0
190 #define __devinit __init
191 #define __devinitdata __initdata
194 #define SET_MODULE_OWNER(dev)
195 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
199 #if (LINUX_VERSION_CODE < 0x020411)
201 #define __devexit_p(x) x
205 #ifndef MODULE_LICENSE
206 #define MODULE_LICENSE(license)
210 typedef void irqreturn_t;
211 #define IRQ_RETVAL(x)
214 #if (LINUX_VERSION_CODE < 0x02032a)
215 static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
216 dma_addr_t *dma_handle)
220 /* Maximum in slab.c */
224 virt_ptr = kmalloc(size, GFP_KERNEL);
225 *dma_handle = virt_to_bus(virt_ptr);
228 #define pci_free_consistent(dev, size, ptr, dma_ptr) kfree(ptr)
230 #endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
233 #if (LINUX_VERSION_CODE < 0x02040d)
235 #if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
237 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
238 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
241 /* pci_set_dma_mask is using dma_addr_t */
243 #define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
244 #define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
248 #else /* (LINUX_VERSION_CODE < 0x02040d) */
250 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
251 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
254 #if (LINUX_VERSION_CODE < 0x020329)
255 #define pci_set_dma_mask(pdev, mask) (0)
257 #if (LINUX_VERSION_CODE < 0x020403)
259 pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
261 if(! pci_dma_supported(dev, mask))
264 dev->dma_mask = mask;
271 #if (LINUX_VERSION_CODE < 0x020547)
272 #define pci_set_consistent_dma_mask(pdev, mask) (0)
275 #if (LINUX_VERSION_CODE < 0x020402)
276 #define pci_request_regions(pdev, name) (0)
277 #define pci_release_regions(pdev)
280 #if ! defined(spin_is_locked)
281 #define spin_is_locked(lock) (test_bit(0,(lock)))
284 #define BCM5700_LOCK(pUmDevice, flags) \
285 if ((pUmDevice)->do_global_lock) { \
286 spin_lock_irqsave(&(pUmDevice)->global_lock, flags); \
289 #define BCM5700_UNLOCK(pUmDevice, flags) \
290 if ((pUmDevice)->do_global_lock) { \
291 spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
294 /* Fix for RQM 289636 */
296 bcm5700_netif_stop_queue(struct net_device *dev)
298 dev->trans_start = jiffies; /* prevent tx timeout */
299 netif_stop_queue(dev);
303 bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice)
305 if (pUmDevice->do_global_lock) {
306 spin_lock(&pUmDevice->global_lock);
311 bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice)
313 if (pUmDevice->do_global_lock) {
314 spin_unlock(&pUmDevice->global_lock);
319 bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice)
321 atomic_inc(&pUmDevice->intr_sem);
322 LM_DisableInterrupt(&pUmDevice->lm_dev);
323 #if (LINUX_VERSION_CODE >= 0x2051c)
324 synchronize_irq(pUmDevice->dev->irq);
328 LM_DisableInterrupt(&pUmDevice->lm_dev);
332 bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice)
334 if (atomic_dec_and_test(&pUmDevice->intr_sem)) {
335 LM_EnableInterrupt(&pUmDevice->lm_dev);
340 * Broadcom NIC Extension support
351 #endif /* NICE_SUPPORT */
353 int MM_Packet_Desc_Size = sizeof(UM_PACKET);
356 MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
357 MODULE_DESCRIPTION("BCM5700 Driver");
358 MODULE_LICENSE("GPL");
360 #if (LINUX_VERSION_CODE < 0x020605)
362 MODULE_PARM(debug, "i");
363 MODULE_PARM(line_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
364 MODULE_PARM(auto_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
365 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
366 MODULE_PARM(rx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
367 MODULE_PARM(tx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
368 MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
369 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
370 MODULE_PARM(mtu, "1-" __MODULE_STRING(MAX_UNITS) "i");
372 MODULE_PARM(tx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
373 MODULE_PARM(rx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
374 MODULE_PARM(scatter_gather, "1-" __MODULE_STRING(MAX_UNITS) "i");
375 MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
376 MODULE_PARM(rx_std_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
377 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
378 MODULE_PARM(rx_jumbo_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
381 MODULE_PARM(adaptive_coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
382 MODULE_PARM(rx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
383 MODULE_PARM(rx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
384 MODULE_PARM(tx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
385 MODULE_PARM(tx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
386 MODULE_PARM(stats_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
389 MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i");
392 MODULE_PARM(enable_tso, "1-" __MODULE_STRING(MAX_UNITS) "i");
394 #ifdef BCM_NIC_SEND_BD
395 MODULE_PARM(nic_tx_bd, "1-" __MODULE_STRING(MAX_UNITS) "i");
398 MODULE_PARM(vlan_tag_mode, "1-" __MODULE_STRING(MAX_UNITS) "i");
400 MODULE_PARM(delay_link, "1-" __MODULE_STRING(MAX_UNITS) "i");
401 MODULE_PARM(disable_d3hot, "1-" __MODULE_STRING(MAX_UNITS) "i");
403 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
404 MODULE_PARM(disable_msi, "1-" __MODULE_STRING(MAX_UNITS) "i");
409 #if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
417 #if (LINUX_VERSION_CODE >= 0x2060a)
423 module_param_array(line_speed, int, numvar, 0);
424 module_param_array(auto_speed, int, numvar, 0);
425 module_param_array(full_duplex, int, numvar, 0);
426 module_param_array(rx_flow_control, int, numvar, 0);
427 module_param_array(tx_flow_control, int, numvar, 0);
428 module_param_array(auto_flow_control, int, numvar, 0);
429 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
430 module_param_array(mtu, int, numvar, 0);
432 module_param_array(tx_checksum, int, numvar, 0);
433 module_param_array(rx_checksum, int, numvar, 0);
434 module_param_array(scatter_gather, int, numvar, 0);
435 module_param_array(tx_pkt_desc_cnt, int, numvar, 0);
436 module_param_array(rx_std_desc_cnt, int, numvar, 0);
437 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
438 module_param_array(rx_jumbo_desc_cnt, int, numvar, 0);
441 module_param_array(adaptive_coalesce, int, numvar, 0);
442 module_param_array(rx_coalesce_ticks, int, numvar, 0);
443 module_param_array(rx_max_coalesce_frames, int, numvar, 0);
444 module_param_array(tx_coalesce_ticks, int, numvar, 0);
445 module_param_array(tx_max_coalesce_frames, int, numvar, 0);
446 module_param_array(stats_coalesce_ticks, int, numvar, 0);
449 module_param_array(enable_wol, int, numvar, 0);
452 module_param_array(enable_tso, int, numvar, 0);
454 #ifdef BCM_NIC_SEND_BD
455 module_param_array(nic_tx_bd, int, numvar, 0);
458 module_param_array(vlan_tag_mode, int, numvar, 0);
460 module_param_array(delay_link, int, numvar, 0);
461 module_param_array(disable_d3hot, int, numvar, 0);
463 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
464 module_param_array(disable_msi, int, numvar, 0);
473 #define RUN_AT(x) (jiffies + (x))
475 char kernel_version[] = UTS_RELEASE;
477 #define PCI_SUPPORT_VER2
479 #if ! defined(CAP_NET_ADMIN)
480 #define capable(CAP_XXX) (suser())
483 #define tigon3_debug debug
485 static int tigon3_debug = TIGON3_DEBUG;
487 static int tigon3_debug = 0;
491 int bcm5700_open(struct net_device *dev);
492 STATIC void bcm5700_timer(unsigned long data);
493 STATIC void bcm5700_stats_timer(unsigned long data);
494 STATIC void bcm5700_reset(struct net_device *dev);
495 STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev);
496 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
498 STATIC void bcm5700_tasklet(unsigned long data);
500 STATIC int bcm5700_close(struct net_device *dev);
501 STATIC struct net_device_stats *bcm5700_get_stats(struct net_device *dev);
502 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
503 STATIC void bcm5700_do_rx_mode(struct net_device *dev);
504 STATIC void bcm5700_set_rx_mode(struct net_device *dev);
505 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p);
506 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
507 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu);
509 #ifdef BCM_NAPI_RXPOLL
510 STATIC int bcm5700_poll(struct net_device *dev, int *budget);
512 STATIC int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max);
513 STATIC int bcm5700_freemem(struct net_device *dev);
515 STATIC int bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index);
518 #ifndef BCM_NAPI_RXPOLL
519 STATIC int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice);
522 STATIC void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice);
523 STATIC int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice);
525 STATIC void bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
526 STATIC void bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid);
528 void bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice);
529 void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice);
530 void bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
531 char *param_name, int min, int max, int deflt);
533 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
534 STATIC void poll_bcm5700(struct net_device *dev);
537 /* A list of all installed bcm5700 devices. */
538 static struct net_device *root_tigon3_dev = NULL;
540 #if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
543 #if (LINUX_VERSION_CODE < 0x20500)
544 extern int register_ioctl32_conversion(unsigned int cmd,
545 int (*handler)(unsigned int, unsigned int, unsigned long,
547 int unregister_ioctl32_conversion(unsigned int cmd);
549 #include <linux/ioctl32.h>
552 #define BCM_IOCTL32 1
554 atomic_t bcm5700_load_count = ATOMIC_INIT(0);
557 bcm5700_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg,
561 struct net_device *tmp_dev = root_tigon3_dev;
563 struct nice_req* nrq;
564 struct ifreq_nice32 {
572 if (!capable(CAP_NET_ADMIN))
575 if (mm_copy_from_user(&nrq32, (char *) arg, 32))
578 memcpy(rq.ifr_name, nrq32.ifnr_name, 16);
580 nrq = (struct nice_req*) &rq.ifr_ifru;
581 nrq->cmd = nrq32.cmd;
582 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK) {
583 nrq->nrq_stats_useraddr = (void *) ((__u64) nrq32.nrq1);
584 nrq->nrq_stats_size = nrq32.nrq2;
587 memcpy(&nrq->nrq_speed, &nrq32.nrq1, 12);
590 if (strcmp(rq.ifr_name, tmp_dev->name) == 0) {
591 ret = bcm5700_ioctl(tmp_dev, &rq, cmd);
593 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK)
596 memcpy(&nrq32.nrq1, &nrq->nrq_speed, 12);
597 if (mm_copy_to_user((char *) arg, &nrq32, 32))
602 tmp_dev = ((UM_DEVICE_BLOCK *)(tmp_dev->priv))->next_module;
606 #endif /* NICE_SUPPORT */
610 #ifdef MEFHACK_NOTFORPLANETLAB
659 #ifdef MEFHACK_NOTFORPLANETLAB
692 /* indexed by board_t, above */
695 } board_info[] __devinitdata = {
696 #ifdef MEFHACK_NOTFORPLANETLAB
697 { "Broadcom BCM5700 1000Base-T" },
698 { "Broadcom BCM5700 1000Base-SX" },
699 { "Broadcom BCM5700 1000Base-SX" },
700 { "Broadcom BCM5700 1000Base-T" },
701 { "Broadcom BCM5700" },
702 { "Broadcom BCM5701 1000Base-T" },
703 { "Broadcom BCM5701 1000Base-T" },
704 { "Broadcom BCM5701 1000Base-T" },
705 { "Broadcom BCM5701 1000Base-SX" },
706 { "Broadcom BCM5701 1000Base-T" },
707 { "Broadcom BCM5701 1000Base-T" },
708 { "Broadcom BCM5701" },
709 { "Broadcom BCM5702 1000Base-T" },
710 { "Broadcom BCM5703 1000Base-T" },
711 { "Broadcom BCM5703 1000Base-SX" },
712 { "Broadcom B5703 1000Base-SX" },
713 { "3Com 3C996 10/100/1000 Server NIC" },
714 { "3Com 3C996 10/100/1000 Server NIC" },
715 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
716 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
717 { "3Com 3C996B Gigabit Server NIC" },
718 { "3Com 3C997 Gigabit Server NIC" },
719 { "3Com 3C997 Gigabit Fiber-SX Server NIC" },
720 { "3Com 3C1000 Gigabit NIC" },
721 { "3Com 3C1000B-T 10/100/1000 PCI" },
722 { "3Com 3C940 Gigabit LOM (21X21)" },
723 { "3Com 3C942 Gigabit LOM (31X31)" },
724 { "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
725 { "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
726 { "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
728 { "HP NC6770 Gigabit Server Adapter" },
729 { "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
730 { "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
731 { "HP NC7760 Gigabit Server Adapter" },
732 { "HP NC7761 Gigabit Server Adapter" },
733 { "HP NC7770 Gigabit Server Adapter" },
734 { "HP NC7771 Gigabit Server Adapter" },
735 { "HP NC7780 Gigabit Server Adapter" },
736 { "HP NC7781 Gigabit Server Adapter" },
737 { "HP NC7772 Gigabit Server Adapter" },
738 { "HP NC7782 Gigabit Server Adapter" },
739 { "HP NC7783 Gigabit Server Adapter" },
740 { "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
741 { "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
742 { "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
743 { "HP NC324i Integrated Dual Port PCI Express Gigabit Server Adapter" },
744 { "HP NC326i Integrated Dual Port PCI Express Gigabit Server Adapter" },
745 #ifdef MEFHACK_NOTFORPLANETLAB
746 { "Broadcom BCM5704 CIOB-E 1000Base-T" },
747 { "Broadcom BCM5704 1000Base-T" },
748 { "Broadcom BCM5704 1000Base-SX" },
749 { "Broadcom BCM5705 1000Base-T" },
750 { "Broadcom BCM5705M 1000Base-T" },
751 { "Broadcom 570x 10/100 Integrated Controller" },
752 { "Broadcom BCM5901 100Base-TX" },
753 { "Broadcom NetXtreme Gigabit Ethernet for hp" },
754 { "Broadcom BCM5788 NetLink 1000Base-T" },
755 { "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
756 { "Broadcom BCM5750 1000Base-T PCI" },
757 { "Broadcom BCM5750M 1000Base-T PCI" },
758 { "Broadcom BCM5720 1000Base-T PCI" },
759 { "Broadcom BCM5751 1000Base-T PCI Express" },
760 { "Broadcom BCM5751M 1000Base-T PCI Express" },
761 { "Broadcom BCM5751F 100Base-TX PCI Express" },
762 { "Broadcom BCM5721 1000Base-T PCI Express" },
763 { "Broadcom BCM5753 1000Base-T PCI Express" },
764 { "Broadcom BCM5753M 1000Base-T PCI Express" },
765 { "Broadcom BCM5753F 100Base-TX PCI Express" },
766 { "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
767 { "Broadcom BCM5752 1000Base-T PCI Express" },
768 { "Broadcom BCM5752M 1000Base-T PCI Express" },
769 { "Broadcom BCM5714 1000Base-T " },
770 { "Broadcom BCM5780 1000Base-T" },
771 { "Broadcom BCM5780S 1000Base-SX" },
772 { "Broadcom BCM5715 1000Base-T " },
773 { "Broadcom BCM5903M Gigabit Ethernet " },
778 static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = {
779 #ifdef MEFHACK_NOTFORPLANETLAB
780 {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 },
781 {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 },
782 {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 },
783 {0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9 },
784 {0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700 },
785 {0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700 },
786 {0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700 },
787 {0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700 },
788 {0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T },
789 {0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST },
790 {0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX },
791 {0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T },
792 {0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX },
793 {0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01 },
794 {0x14e4, 0x1644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5700 },
795 {0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5 },
796 {0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1 },
797 {0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8 },
798 {0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7 },
799 {0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10 },
800 {0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12 },
802 {0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770 },
803 {0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770 },
804 {0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780 },
805 #ifdef MEFHACK_NOTFORPLANETLAB
806 {0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701 },
807 {0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX },
808 {0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT },
809 {0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T },
810 {0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01 },
811 {0x14e4, 0x1645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5701 },
812 {0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702 },
813 {0x14e4, 0x1646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
814 {0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702 },
815 {0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702 },
816 {0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760 },
817 {0x14e4, 0x16a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
818 {0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT },
819 {0x14e4, 0x16c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
820 {0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703 },
821 {0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
822 {0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703 },
823 {0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703 },
824 {0x14e4, 0x1647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
825 {0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703 },
826 {0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
827 {0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703 },
828 {0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703 },
830 {0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771 },
831 {0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781 },
832 #ifdef MEFHACK_NOTFORPLANETLAB
833 {0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE },
834 {0x14e4, 0x16a7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
835 {0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
837 {0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771 },
838 {0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781 },
839 #ifdef MEFHACK_NOTFORPLANETLAB
840 {0x14e4, 0x16c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
842 {0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772 },
843 {0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782 },
844 {0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783 },
845 #ifdef MEFHACK_NOTFORPLANETLAB
846 {0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T },
847 {0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T },
848 {0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE },
849 {0x14e4, 0x1648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704 },
850 {0x14e4, 0x1649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
851 {0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S },
852 {0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX },
853 {0x14e4, 0x16a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
855 {0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761 },
856 #ifdef MEFHACK_NOTFORPLANETLAB
857 {0x14e4, 0x1653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
859 {0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761 },
860 {0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020 },
861 {0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T },
862 #ifdef MEFHACK_NOTFORPLANETLAB
863 {0x14e4, 0x1654, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
864 {0x14e4, 0x165d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
865 {0x14e4, 0x165e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
866 {0x14e4, 0x166e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705F },
867 {0x14e4, 0x1696, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5782 },
868 {0x14e4, 0x169c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5788 },
869 {0x14e4, 0x169d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5789 },
870 {0x14e4, 0x170d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
871 {0x14e4, 0x170e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
872 {0x14e4, 0x1676, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750 },
873 {0x14e4, 0x167c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750M },
874 {0x14e4, 0x1677, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751 },
875 {0x14e4, 0x167d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751M },
876 {0x14e4, 0x167e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751F },
877 {0x14e4, 0x1658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5720 },
879 {0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T },
880 {0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T },
881 {0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I },
882 {0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I },
883 {0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I },
884 {0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I },
885 {0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I },
886 {0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I },
887 #ifdef MEFHACK_NOTFORPLANETLAB
888 {0x14e4, 0x1659, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5721 },
889 {0x14e4, 0x16f7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753 },
890 {0x14e4, 0x16fd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753M },
891 {0x14e4, 0x16fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753F },
892 {0x14e4, 0x16dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5781 },
893 {0x14e4, 0x1600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752 },
894 {0x14e4, 0x1601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752M },
895 {0x14e4, 0x1668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714 },
896 {0x14e4, 0x1669, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714S },
897 {0x14e4, 0x166a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780 },
898 {0x14e4, 0x166b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780S },
899 {0x14e4, 0x1678, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715 },
900 {0x14e4, 0x1679, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715S },
901 {0x14e4, 0x16ff, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5903M },
906 MODULE_DEVICE_TABLE(pci, bcm5700_pci_tbl);
909 extern int bcm5700_proc_create(void);
910 extern int bcm5700_proc_create_dev(struct net_device *dev);
911 extern int bcm5700_proc_remove_dev(struct net_device *dev);
912 extern int bcm5700_proc_remove_notifier(void);
915 #if (LINUX_VERSION_CODE >= 0x2060a)
916 static struct pci_device_id pci_AMD762id[]={
917 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
918 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
923 /*******************************************************************************
924 *******************************************************************************
927 int get_csum_flag(LM_UINT32 ChipRevId)
929 return NETIF_F_IP_CSUM;
932 /*******************************************************************************
933 *******************************************************************************
935 This function returns true if the device passed to it is attached to an
936 ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
937 or newer, it returns false.
939 This function determines which bridge it is attached to by scaning the pci
940 bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
941 the bridge's subordinate's secondary bus number is compared with this
942 devices bus number. If they match, then the device is attached to this
943 bridge. The bridge's device id is compared to a list of known device ids for
944 ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
945 chip revision must also be checked to determine if the chip is older than an
948 To scan the bus, one of two functions is used depending on the kernel
949 version. For 2.4 kernels, the pci_find_device function is used. This
950 function has been depricated in the 2.6 kernel and replaced with the
951 fucntion pci_get_device. The macro walk_pci_bus determines which function to
952 use when the driver is built.
955 #if (LINUX_VERSION_CODE >= 0x2060a)
956 #define walk_pci_bus(d) while ((d = pci_get_device( \
957 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
959 #define unwalk_pci_bus(d) pci_dev_put(d)
962 #define walk_pci_bus(d) while ((d = pci_find_device( \
963 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
964 #define unwalk_pci_bus(d)
968 #define ICH5_CHIP_VERSION 0xc0
970 static struct pci_device_id pci_ICHtable[] = {
971 {0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8 */
972 {0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8 */
973 {0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6 */
974 {0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
978 int attached_to_ICH4_or_older( struct pci_dev *pdev)
980 struct pci_dev *tmp_pdev = NULL;
981 struct pci_device_id *ich_table;
984 walk_pci_bus (tmp_pdev) {
985 if ((tmp_pdev->hdr_type == 1) &&
986 (tmp_pdev->subordinate != NULL) &&
987 (tmp_pdev->subordinate->secondary == pdev->bus->number)) {
989 ich_table = pci_ICHtable;
991 while (ich_table->vendor) {
992 if ((ich_table->vendor == tmp_pdev->vendor) &&
993 (ich_table->device == tmp_pdev->device)) {
995 pci_read_config_byte( tmp_pdev,
996 PCI_REVISION_ID, &chip_rev);
998 if (chip_rev < ICH5_CHIP_VERSION) {
999 unwalk_pci_bus( tmp_pdev);
1010 static int __devinit bcm5700_init_board(struct pci_dev *pdev,
1011 struct net_device **dev_out,
1014 struct net_device *dev;
1015 PUM_DEVICE_BLOCK pUmDevice;
1016 PLM_DEVICE_BLOCK pDevice;
1021 /* dev zeroed in init_etherdev */
1022 #if (LINUX_VERSION_CODE >= 0x20600)
1023 dev = alloc_etherdev(sizeof(*pUmDevice));
1025 dev = init_etherdev(NULL, sizeof(*pUmDevice));
1028 printk (KERN_ERR "%s: unable to alloc new ethernet\n",
1032 SET_MODULE_OWNER(dev);
1033 #if (LINUX_VERSION_CODE >= 0x20600)
1034 SET_NETDEV_DEV(dev, &pdev->dev);
1036 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1038 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1039 rc = pci_enable_device (pdev);
1043 rc = pci_request_regions(pdev, bcm5700_driver);
1047 pci_set_master(pdev);
1049 if (pci_set_dma_mask(pdev, BCM_64BIT_DMA_MASK) == 0) {
1050 pUmDevice->using_dac = 1;
1051 if (pci_set_consistent_dma_mask(pdev, BCM_64BIT_DMA_MASK) != 0)
1053 printk(KERN_ERR "pci_set_consistent_dma_mask failed\n");
1054 pci_release_regions(pdev);
1058 else if (pci_set_dma_mask(pdev, BCM_32BIT_DMA_MASK) == 0) {
1059 pUmDevice->using_dac = 0;
1062 printk(KERN_ERR "System does not support DMA\n");
1063 pci_release_regions(pdev);
1067 pUmDevice->dev = dev;
1068 pUmDevice->pdev = pdev;
1069 pUmDevice->mem_list_num = 0;
1070 pUmDevice->next_module = root_tigon3_dev;
1071 pUmDevice->index = board_idx;
1072 root_tigon3_dev = dev;
1074 spin_lock_init(&pUmDevice->global_lock);
1076 spin_lock_init(&pUmDevice->undi_lock);
1078 spin_lock_init(&pUmDevice->phy_lock);
1080 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1082 pDevice->FunctNum = PCI_FUNC(pUmDevice->pdev->devfn);
1084 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1085 if (board_idx < MAX_UNITS) {
1086 bcm5700_validate_param_range(pUmDevice, &mtu[board_idx], "mtu", 1500, 9000, 1500);
1087 dev->mtu = mtu[board_idx];
1090 /* If we're attached to an ICH4 or older, we may need to implement a
1091 workaround for special cycles described in the BCM5704/357 Errata.
1092 This workaround is only need on 5703-A1/2 or 5704-A0 chips that
1093 are attached to a PCI-X bus. The NIC chip type and bus are checked
1094 later in the driver and the flag cleared if the workaround is not
1095 needed. The workaround is enabled by setting the flag UNDI_FIX_FLAG
1096 which casues the driver to use indirect pci-config cycles when
1097 accessing the low-priority mailboxes (MB_REG_WR/RD).
1100 if (attached_to_ICH4_or_older( pdev)) {
1101 pDevice->Flags |= UNDI_FIX_FLAG;
1104 #if (LINUX_VERSION_CODE >= 0x2060a)
1105 if(pci_dev_present(pci_AMD762id)){
1106 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1107 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1110 if (pci_find_device(0x1022, 0x700c, NULL)) {
1111 /* AMD762 writes I/O out of order */
1112 /* Setting bit 1 in 762's register 0x4C still doesn't work */
1114 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1115 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1118 if (LM_GetAdapterInfo(pDevice) != LM_STATUS_SUCCESS) {
1123 if ( (pDevice->Flags & JUMBO_CAPABLE_FLAG) == 0 ) {
1124 if (dev->mtu > 1500) {
1126 printk(KERN_WARNING "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n", bcm5700_driver, pUmDevice->index);
1130 pUmDevice->do_global_lock = 0;
1131 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
1132 /* The 5700 chip works best without interleaved register */
1133 /* accesses on certain machines. */
1134 pUmDevice->do_global_lock = 1;
1137 if ((T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5701) &&
1138 ((pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) == 0)) {
1140 pUmDevice->rx_buf_align = 0;
1143 pUmDevice->rx_buf_align = 2;
1145 dev->mem_start = pci_resource_start(pdev, 0);
1146 dev->mem_end = dev->mem_start + sizeof(T3_STD_MEM_MAP);
1147 dev->irq = pdev->irq;
1153 pci_release_regions(pdev);
1154 bcm5700_freemem(dev);
1157 #if (LINUX_VERSION_CODE < 0x020600)
1158 unregister_netdev(dev);
1166 static int __devinit
1167 bcm5700_print_ver(void)
1169 printk(KERN_INFO "Broadcom Gigabit Ethernet Driver %s ",
1172 printk("with Broadcom NIC Extension (NICE) ");
1174 printk("ver. %s %s\n", bcm5700_version, bcm5700_date);
1178 static int __devinit
1179 bcm5700_init_one(struct pci_dev *pdev,
1180 const struct pci_device_id *ent)
1182 struct net_device *dev = NULL;
1183 PUM_DEVICE_BLOCK pUmDevice;
1184 PLM_DEVICE_BLOCK pDevice;
1186 static int board_idx = -1;
1187 static int printed_version = 0;
1188 struct pci_dev *pci_dev;
1192 if (!printed_version) {
1193 bcm5700_print_ver();
1195 bcm5700_proc_create();
1197 printed_version = 1;
1200 i = bcm5700_init_board(pdev, &dev, board_idx);
1209 if (atomic_read(&bcm5700_load_count) == 0) {
1210 register_ioctl32_conversion(SIOCNICE, bcm5700_ioctl32);
1212 atomic_inc(&bcm5700_load_count);
1214 dev->open = bcm5700_open;
1215 dev->hard_start_xmit = bcm5700_start_xmit;
1216 dev->stop = bcm5700_close;
1217 dev->get_stats = bcm5700_get_stats;
1218 dev->set_multicast_list = bcm5700_set_rx_mode;
1219 dev->do_ioctl = bcm5700_ioctl;
1220 dev->set_mac_address = &bcm5700_set_mac_addr;
1221 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1222 dev->change_mtu = &bcm5700_change_mtu;
1224 #if (LINUX_VERSION_CODE >= 0x20400)
1225 dev->tx_timeout = bcm5700_reset;
1226 /* Fix for RQM 289636 */
1227 /* dev->watchdog_timeo = TX_TIMEOUT; */
1228 dev->watchdog_timeo = BCM_TX_TIMEOUT;
1231 dev->vlan_rx_register = &bcm5700_vlan_rx_register;
1232 dev->vlan_rx_kill_vid = &bcm5700_vlan_rx_kill_vid;
1234 #ifdef BCM_NAPI_RXPOLL
1235 dev->poll = bcm5700_poll;
1239 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1240 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1242 dev->base_addr = pci_resource_start(pdev, 0);
1243 dev->irq = pdev->irq;
1244 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1245 dev->poll_controller = poll_bcm5700;
1248 #if (LINUX_VERSION_CODE >= 0x20600)
1249 if ((i = register_netdev(dev))) {
1250 printk(KERN_ERR "%s: Cannot register net device\n",
1252 if (pUmDevice->lm_dev.pMappedMemBase)
1253 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1254 pci_release_regions(pdev);
1255 bcm5700_freemem(dev);
1262 pci_set_drvdata(pdev, dev);
1264 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1265 pUmDevice->name = board_info[ent->driver_data].name,
1266 printk(KERN_INFO "%s: %s found at mem %lx, IRQ %d, ",
1267 dev->name, pUmDevice->name, dev->base_addr,
1269 printk("node addr ");
1270 for (i = 0; i < 6; i++) {
1271 printk("%2.2x", dev->dev_addr[i]);
1275 printk(KERN_INFO "%s: ", dev->name);
1276 if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5400_PHY_ID)
1277 printk("Broadcom BCM5400 Copper ");
1278 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID)
1279 printk("Broadcom BCM5401 Copper ");
1280 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5411_PHY_ID)
1281 printk("Broadcom BCM5411 Copper ");
1282 else if (((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5701_PHY_ID) &&
1283 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
1284 printk("Broadcom BCM5701 Integrated Copper ");
1286 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5703_PHY_ID) {
1287 printk("Broadcom BCM5703 Integrated ");
1288 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1293 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) {
1294 printk("Broadcom BCM5704 Integrated ");
1295 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1300 else if (pDevice->PhyFlags & PHY_IS_FIBER){
1301 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5780_PHY_ID)
1302 printk("Broadcom BCM5780S Integrated Serdes ");
1304 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5714_PHY_ID)
1305 printk("Broadcom BCM5714S Integrated Serdes ");
1307 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID)
1308 printk("Broadcom BCM5705 Integrated Copper ");
1309 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5750_PHY_ID)
1310 printk("Broadcom BCM5750 Integrated Copper ");
1312 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5714_PHY_ID)
1313 printk("Broadcom BCM5714 Integrated Copper ");
1314 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5780_PHY_ID)
1315 printk("Broadcom BCM5780 Integrated Copper ");
1317 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5752_PHY_ID)
1318 printk("Broadcom BCM5752 Integrated Copper ");
1319 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID)
1320 printk("Broadcom BCM8002 SerDes ");
1321 else if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
1322 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1323 printk("Broadcom BCM5703 Integrated SerDes ");
1325 else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1326 printk("Broadcom BCM5704 Integrated SerDes ");
1329 printk("Agilent HDMP-1636 SerDes ");
1335 printk("transceiver found\n");
1337 #if (LINUX_VERSION_CODE >= 0x20400)
1338 if (scatter_gather[board_idx]) {
1339 dev->features |= NETIF_F_SG;
1340 if (pUmDevice->using_dac && !(pDevice->Flags & BCM5788_FLAG))
1341 dev->features |= NETIF_F_HIGHDMA;
1343 if ((pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM) &&
1344 tx_checksum[board_idx]) {
1346 dev->features |= get_csum_flag( pDevice->ChipRevId);
1349 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1352 /* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1353 the same time. Since only one of these features can be enable at a
1354 time, we'll enable only Jumbo Frames and disable TSO when the user
1355 tries to enable both.
1357 dev->features &= ~NETIF_F_TSO;
1359 if ((pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION) &&
1360 (enable_tso[board_idx])) {
1361 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
1362 (dev->mtu > 1500)) {
1363 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
1365 dev->features |= NETIF_F_TSO;
1369 printk(KERN_INFO "%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1371 (char *) ((dev->features & NETIF_F_SG) ? "ON" : "OFF"),
1372 (char *) ((dev->features & NETIF_F_HIGHDMA) ? "ON" : "OFF"),
1373 (char *) ((dev->features & get_csum_flag( pDevice->ChipRevId)) ? "ON" : "OFF"));
1375 if ((pDevice->ChipRevId != T3_CHIP_ID_5700_B0) &&
1376 rx_checksum[board_idx])
1377 printk("Rx Checksum ON");
1379 printk("Rx Checksum OFF");
1381 printk(", 802.1Q VLAN ON");
1384 if (dev->features & NETIF_F_TSO) {
1389 #ifdef BCM_NAPI_RXPOLL
1390 printk(", NAPI ON");
1395 bcm5700_proc_create_dev(dev);
1398 tasklet_init(&pUmDevice->tasklet, bcm5700_tasklet,
1399 (unsigned long) pUmDevice);
1401 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1402 if ((REG_RD(pDevice, PciCfg.DualMacCtrl) &
1403 T3_DUAL_MAC_CH_CTRL_MASK) == 3) {
1405 printk(KERN_WARNING "%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev->name);
1409 #if (LINUX_VERSION_CODE > 0x20605)
1411 if ((pci_dev = pci_get_device(0x1022, 0x700c, NULL))) {
1413 if ((pci_dev = pci_find_device(0x1022, 0x700c, NULL))) {
1417 /* Found AMD 762 North bridge */
1418 pci_read_config_dword(pci_dev, 0x4c, &val);
1419 if ((val & 0x02) == 0) {
1420 pci_write_config_dword(pci_dev, 0x4c, val | 0x02);
1421 printk(KERN_INFO "%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver);
1425 #if (LINUX_VERSION_CODE > 0x20605)
1427 pci_dev_put(pci_dev);
1429 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1431 if ((pci_dev = pci_get_device(0x1066, 0x0017, NULL))) {
1432 bcm_msi_chipset_bug = 1;
1434 pci_dev_put(pci_dev);
1442 static void __devexit
1443 bcm5700_remove_one (struct pci_dev *pdev)
1445 struct net_device *dev = pci_get_drvdata (pdev);
1446 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1449 bcm5700_proc_remove_dev(dev);
1452 atomic_dec(&bcm5700_load_count);
1453 if (atomic_read(&bcm5700_load_count) == 0)
1454 unregister_ioctl32_conversion(SIOCNICE);
1456 unregister_netdev(dev);
1458 if (pUmDevice->lm_dev.pMappedMemBase)
1459 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1461 pci_release_regions(pdev);
1463 #if (LINUX_VERSION_CODE < 0x020600)
1469 pci_set_drvdata(pdev, NULL);
1473 int b57_test_intr(UM_DEVICE_BLOCK *pUmDevice);
1476 bcm5700_open(struct net_device *dev)
1478 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1479 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1482 if (pUmDevice->suspended){
1485 /* delay for 6 seconds */
1486 pUmDevice->delayed_link_ind = (6 * HZ) / pUmDevice->timer_interval;
1489 #ifndef BCM_NAPI_RXPOLL
1490 pUmDevice->adaptive_expiry = HZ / pUmDevice->timer_interval;
1494 #ifdef INCLUDE_TBI_SUPPORT
1495 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
1496 (pDevice->TbiFlags & TBI_POLLING_FLAGS)) {
1497 pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval;
1498 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1499 pUmDevice->poll_tbi_interval /= 4;
1501 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1504 /* set this timer for 2 seconds */
1505 pUmDevice->asf_heartbeat = (2 * HZ) / pUmDevice->timer_interval;
1507 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1510 if ( ( (T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ) &&
1511 (T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5714_A0 ) &&
1512 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_AX ) &&
1513 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_BX ) ) &&
1514 !bcm_msi_chipset_bug ){
1516 if (disable_msi[pUmDevice->index]==1){
1517 /* do nothing-it's not turned on */
1519 pDevice->Flags |= USING_MSI_FLAG;
1521 REG_WR(pDevice, Msi.Mode, 2 );
1523 rc = pci_enable_msi(pUmDevice->pdev);
1526 pDevice->Flags &= ~ USING_MSI_FLAG;
1527 REG_WR(pDevice, Msi.Mode, 1 );
1535 if ((rc= request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt, SA_SHIRQ, dev->name, dev)))
1538 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1540 if(pDevice->Flags & USING_MSI_FLAG) {
1542 pci_disable_msi(pUmDevice->pdev);
1543 pDevice->Flags &= ~USING_MSI_FLAG;
1544 REG_WR(pDevice, Msi.Mode, 1 );
1551 pUmDevice->opened = 1;
1552 if (LM_InitializeAdapter(pDevice) != LM_STATUS_SUCCESS) {
1553 pUmDevice->opened = 0;
1554 free_irq(dev->irq, dev);
1555 bcm5700_freemem(dev);
1559 bcm5700_set_vlan_mode(pUmDevice);
1560 bcm5700_init_counters(pUmDevice);
1562 if (pDevice->Flags & UNDI_FIX_FLAG) {
1563 printk(KERN_INFO "%s: Using indirect register access\n", dev->name);
1566 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6))
1568 /* Do not use invalid eth addrs: any multicast & all zeros */
1569 if( is_valid_ether_addr(dev->dev_addr) ){
1570 LM_SetMacAddress(pDevice, dev->dev_addr);
1574 printk(KERN_INFO "%s: Invalid administered node address\n",dev->name);
1575 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1579 if (tigon3_debug > 1)
1580 printk(KERN_DEBUG "%s: tigon3_open() irq %d.\n", dev->name, dev->irq);
1582 QQ_InitQueue(&pUmDevice->rx_out_of_buf_q.Container,
1583 MAX_RX_PACKET_DESC_COUNT);
1586 #if (LINUX_VERSION_CODE < 0x020300)
1590 atomic_set(&pUmDevice->intr_sem, 0);
1592 LM_EnableInterrupt(pDevice);
1594 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1596 if (pDevice->Flags & USING_MSI_FLAG){
1598 /* int test to check support on older machines */
1599 if (b57_test_intr(pUmDevice) != 1) {
1601 LM_DisableInterrupt(pDevice);
1602 free_irq(pUmDevice->pdev->irq, dev);
1603 pci_disable_msi(pUmDevice->pdev);
1604 REG_WR(pDevice, Msi.Mode, 1 );
1605 pDevice->Flags &= ~USING_MSI_FLAG;
1607 rc = LM_ResetAdapter(pDevice);
1608 printk(KERN_ALERT " The MSI support in this system is not functional.\n");
1610 if (rc == LM_STATUS_SUCCESS)
1616 rc = request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt,
1617 SA_SHIRQ, dev->name, dev);
1622 bcm5700_freemem(dev);
1623 pUmDevice->opened = 0;
1628 pDevice->InitDone = TRUE;
1629 atomic_set(&pUmDevice->intr_sem, 0);
1630 LM_EnableInterrupt(pDevice);
1635 init_timer(&pUmDevice->timer);
1636 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1637 pUmDevice->timer.data = (unsigned long)dev;
1638 pUmDevice->timer.function = &bcm5700_timer;
1639 add_timer(&pUmDevice->timer);
1641 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
1642 init_timer(&pUmDevice->statstimer);
1643 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1644 pUmDevice->statstimer.data = (unsigned long)dev;
1645 pUmDevice->statstimer.function = &bcm5700_stats_timer;
1646 add_timer(&pUmDevice->statstimer);
1649 if(pDevice->Flags & USING_MSI_FLAG)
1650 printk(KERN_INFO "%s: Using Message Signaled Interrupt (MSI) \n", dev->name);
1652 printk(KERN_INFO "%s: Using PCI INTX interrupt \n", dev->name);
1654 netif_start_queue(dev);
1661 bcm5700_stats_timer(unsigned long data)
1663 struct net_device *dev = (struct net_device *)data;
1664 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1665 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1666 unsigned long flags = 0;
1668 if (!pUmDevice->opened)
1671 if (!atomic_read(&pUmDevice->intr_sem) &&
1672 !pUmDevice->suspended &&
1673 (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE)) {
1674 BCM5700_LOCK(pUmDevice, flags);
1675 LM_GetStats(pDevice);
1676 BCM5700_UNLOCK(pUmDevice, flags);
1679 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1681 add_timer(&pUmDevice->statstimer);
1686 bcm5700_timer(unsigned long data)
1688 struct net_device *dev = (struct net_device *)data;
1689 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1690 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1691 unsigned long flags = 0;
1694 if (!pUmDevice->opened)
1697 if (atomic_read(&pUmDevice->intr_sem) || pUmDevice->suspended) {
1698 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1699 add_timer(&pUmDevice->timer);
1703 #ifdef INCLUDE_TBI_SUPPORT
1704 if ((pDevice->TbiFlags & TBI_POLLING_FLAGS) &&
1705 (--pUmDevice->poll_tbi_expiry <= 0)) {
1707 BCM5700_PHY_LOCK(pUmDevice, flags);
1708 value32 = REG_RD(pDevice, MacCtrl.Status);
1709 if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) &&
1710 ((value32 & (MAC_STATUS_LINK_STATE_CHANGED |
1711 MAC_STATUS_CFG_CHANGED)) ||
1712 !(value32 & MAC_STATUS_PCS_SYNCED)))
1714 ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
1715 (value32 & (MAC_STATUS_PCS_SYNCED |
1716 MAC_STATUS_SIGNAL_DETECTED))))
1718 LM_SetupPhy(pDevice);
1720 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1721 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1726 if (pUmDevice->delayed_link_ind > 0) {
1727 if (pUmDevice->delayed_link_ind == 1)
1728 MM_IndicateStatus(pDevice, pDevice->LinkStatus);
1730 pUmDevice->delayed_link_ind--;
1733 if (pUmDevice->crc_counter_expiry > 0)
1734 pUmDevice->crc_counter_expiry--;
1736 if (!pUmDevice->interrupt) {
1737 if (!(pDevice->Flags & USE_TAGGED_STATUS_FLAG)) {
1738 BCM5700_LOCK(pUmDevice, flags);
1739 if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) {
1740 /* This will generate an interrupt */
1741 REG_WR(pDevice, Grc.LocalCtrl,
1742 pDevice->GrcLocalCtrl |
1743 GRC_MISC_LOCAL_CTRL_SET_INT);
1746 REG_WR(pDevice, HostCoalesce.Mode,
1747 pDevice->CoalesceMode |
1748 HOST_COALESCE_ENABLE |
1751 if (!(REG_RD(pDevice, DmaWrite.Mode) &
1752 DMA_WRITE_MODE_ENABLE)) {
1753 BCM5700_UNLOCK(pUmDevice, flags);
1757 BCM5700_UNLOCK(pUmDevice, flags);
1759 if (pUmDevice->tx_queued) {
1760 pUmDevice->tx_queued = 0;
1761 netif_wake_queue(dev);
1764 #if (LINUX_VERSION_CODE < 0x02032b)
1765 if ((QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) !=
1766 pDevice->TxPacketDescCnt) &&
1767 ((jiffies - dev->trans_start) > TX_TIMEOUT)) {
1769 printk(KERN_WARNING "%s: Tx hung\n", dev->name);
1775 #ifndef BCM_NAPI_RXPOLL
1776 if (pUmDevice->adaptive_coalesce) {
1777 pUmDevice->adaptive_expiry--;
1778 if (pUmDevice->adaptive_expiry == 0) {
1779 pUmDevice->adaptive_expiry = HZ /
1780 pUmDevice->timer_interval;
1781 bcm5700_adapt_coalesce(pUmDevice);
1786 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) >
1787 (unsigned int) pUmDevice->rx_buf_repl_panic_thresh) {
1788 /* Generate interrupt and let isr allocate buffers */
1789 REG_WR(pDevice, HostCoalesce.Mode, pDevice->CoalesceMode |
1790 HOST_COALESCE_ENABLE | HOST_COALESCE_NOW);
1794 if (pDevice->AsfFlags & ASF_ENABLED) {
1795 pUmDevice->asf_heartbeat--;
1796 if (pUmDevice->asf_heartbeat == 0) {
1797 if( (pDevice->Flags & UNDI_FIX_FLAG) ||
1798 (pDevice->Flags & ENABLE_PCIX_FIX_FLAG)) {
1799 MEM_WR_OFFSET(pDevice, T3_CMD_MAILBOX,
1800 T3_CMD_NICDRV_ALIVE2);
1801 MEM_WR_OFFSET(pDevice, T3_CMD_LENGTH_MAILBOX,
1803 MEM_WR_OFFSET(pDevice, T3_CMD_DATA_MAILBOX, 5);
1806 (T3_NIC_MBUF_POOL_ADDR +
1808 T3_CMD_NICDRV_ALIVE2, 1);
1810 (T3_NIC_MBUF_POOL_ADDR +
1811 T3_CMD_LENGTH_MAILBOX),4,1);
1813 (T3_NIC_MBUF_POOL_ADDR +
1814 T3_CMD_DATA_MAILBOX),5,1);
1817 value32 = REG_RD(pDevice, Grc.RxCpuEvent);
1818 REG_WR(pDevice, Grc.RxCpuEvent, value32 | BIT_14);
1819 pUmDevice->asf_heartbeat = (2 * HZ) /
1820 pUmDevice->timer_interval;
1825 if (pDevice->PhyFlags & PHY_IS_FIBER){
1826 BCM5700_PHY_LOCK(pUmDevice, flags);
1827 LM_5714_FamFiberCheckLink(pDevice);
1828 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1831 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1832 add_timer(&pUmDevice->timer);
1836 bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice)
1839 #ifndef BCM_NAPI_RXPOLL
1840 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1842 pUmDevice->rx_curr_coalesce_frames = pDevice->RxMaxCoalescedFrames;
1843 pUmDevice->rx_curr_coalesce_ticks = pDevice->RxCoalescingTicks;
1844 pUmDevice->tx_curr_coalesce_frames = pDevice->TxMaxCoalescedFrames;
1845 pUmDevice->rx_last_cnt = 0;
1846 pUmDevice->tx_last_cnt = 0;
1849 pUmDevice->phy_crc_count = 0;
1851 pUmDevice->tx_zc_count = 0;
1852 pUmDevice->tx_chksum_count = 0;
1853 pUmDevice->tx_himem_count = 0;
1854 pUmDevice->rx_good_chksum_count = 0;
1855 pUmDevice->rx_bad_chksum_count = 0;
1858 pUmDevice->tso_pkt_count = 0;
1864 #ifndef BCM_NAPI_RXPOLL
1866 bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice,
1867 int rx_frames, int rx_ticks, int tx_frames, int rx_frames_intr)
1869 unsigned long flags = 0;
1870 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1872 if (pUmDevice->do_global_lock) {
1873 if (spin_is_locked(&pUmDevice->global_lock))
1875 spin_lock_irqsave(&pUmDevice->global_lock, flags);
1877 pUmDevice->rx_curr_coalesce_frames = rx_frames;
1878 pUmDevice->rx_curr_coalesce_ticks = rx_ticks;
1879 pUmDevice->tx_curr_coalesce_frames = tx_frames;
1880 pUmDevice->rx_curr_coalesce_frames_intr = rx_frames_intr;
1881 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFrames, rx_frames);
1883 REG_WR(pDevice, HostCoalesce.RxCoalescingTicks, rx_ticks);
1885 REG_WR(pDevice, HostCoalesce.TxMaxCoalescedFrames, tx_frames);
1887 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt,
1890 BCM5700_UNLOCK(pUmDevice, flags);
1895 bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice)
1897 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
1898 uint rx_curr_cnt, tx_curr_cnt, rx_delta, tx_delta, total_delta;
1900 rx_curr_cnt = pDevice->pStatsBlkVirt->ifHCInUcastPkts.Low;
1901 tx_curr_cnt = pDevice->pStatsBlkVirt->ifHCOutUcastPkts.Low;
1902 if ((rx_curr_cnt <= pUmDevice->rx_last_cnt) ||
1903 (tx_curr_cnt < pUmDevice->tx_last_cnt)) {
1905 /* skip if there is counter rollover */
1906 pUmDevice->rx_last_cnt = rx_curr_cnt;
1907 pUmDevice->tx_last_cnt = tx_curr_cnt;
1911 rx_delta = rx_curr_cnt - pUmDevice->rx_last_cnt;
1912 tx_delta = tx_curr_cnt - pUmDevice->tx_last_cnt;
1913 total_delta = (((rx_delta + rx_delta) + tx_delta) / 3) << 1;
1915 pUmDevice->rx_last_cnt = rx_curr_cnt;
1916 pUmDevice->tx_last_cnt = tx_curr_cnt;
1918 if (total_delta < ADAPTIVE_LO_PKT_THRESH) {
1919 if (pUmDevice->rx_curr_coalesce_frames !=
1920 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES) {
1922 bcm5700_do_adapt_coalesce(pUmDevice,
1923 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES,
1924 ADAPTIVE_LO_RX_COALESCING_TICKS,
1925 ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES,
1926 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT);
1929 else if (total_delta < ADAPTIVE_HI_PKT_THRESH) {
1930 if (pUmDevice->rx_curr_coalesce_frames !=
1931 DEFAULT_RX_MAX_COALESCED_FRAMES) {
1933 bcm5700_do_adapt_coalesce(pUmDevice,
1934 DEFAULT_RX_MAX_COALESCED_FRAMES,
1935 DEFAULT_RX_COALESCING_TICKS,
1936 DEFAULT_TX_MAX_COALESCED_FRAMES,
1937 DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT);
1941 if (pUmDevice->rx_curr_coalesce_frames !=
1942 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES) {
1944 bcm5700_do_adapt_coalesce(pUmDevice,
1945 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES,
1946 ADAPTIVE_HI_RX_COALESCING_TICKS,
1947 ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES,
1948 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT);
1957 bcm5700_reset(struct net_device *dev)
1959 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1960 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1961 unsigned long flags;
1965 if( (dev->features & NETIF_F_TSO) &&
1966 (pUmDevice->tx_full) ) {
1968 dev->features &= ~NETIF_F_TSO;
1972 netif_stop_queue(dev);
1973 bcm5700_intr_off(pUmDevice);
1974 BCM5700_PHY_LOCK(pUmDevice, flags);
1975 LM_ResetAdapter(pDevice);
1976 pDevice->InitDone = TRUE;
1977 bcm5700_do_rx_mode(dev);
1978 bcm5700_set_vlan_mode(pUmDevice);
1979 bcm5700_init_counters(pUmDevice);
1980 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
1981 LM_SetMacAddress(pDevice, dev->dev_addr);
1983 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1984 atomic_set(&pUmDevice->intr_sem, 1);
1985 bcm5700_intr_on(pUmDevice);
1986 netif_wake_queue(dev);
1990 bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice)
1992 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1993 LM_UINT32 ReceiveMask = pDevice->ReceiveMask;
1994 int vlan_tag_mode = pUmDevice->vlan_tag_mode;
1996 if (vlan_tag_mode == VLAN_TAG_MODE_AUTO_STRIP) {
1997 if (pDevice->AsfFlags & ASF_ENABLED) {
1998 vlan_tag_mode = VLAN_TAG_MODE_FORCED_STRIP;
2001 vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
2004 if (vlan_tag_mode == VLAN_TAG_MODE_NORMAL_STRIP) {
2005 ReceiveMask |= LM_KEEP_VLAN_TAG;
2007 if (pUmDevice->vlgrp)
2008 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2011 if (pUmDevice->nice_rx)
2012 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2015 else if (vlan_tag_mode == VLAN_TAG_MODE_FORCED_STRIP) {
2016 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2018 if (ReceiveMask != pDevice->ReceiveMask)
2020 LM_SetReceiveMask(pDevice, ReceiveMask);
2025 bcm5700_poll_wait(UM_DEVICE_BLOCK *pUmDevice)
2027 #ifdef BCM_NAPI_RXPOLL
2028 while (pUmDevice->lm_dev.RxPoll) {
2029 current->state = TASK_INTERRUPTIBLE;
2030 schedule_timeout(1);
2038 bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
2040 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2042 bcm5700_intr_off(pUmDevice);
2043 bcm5700_poll_wait(pUmDevice);
2044 pUmDevice->vlgrp = vlgrp;
2045 bcm5700_set_vlan_mode(pUmDevice);
2046 bcm5700_intr_on(pUmDevice);
2050 bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
2052 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2054 bcm5700_intr_off(pUmDevice);
2055 bcm5700_poll_wait(pUmDevice);
2056 if (pUmDevice->vlgrp) {
2057 pUmDevice->vlgrp->vlan_devices[vid] = NULL;
2059 bcm5700_intr_on(pUmDevice);
2064 bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2066 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2067 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2069 PUM_PACKET pUmPacket;
2070 unsigned long flags = 0;
2073 vlan_tag_t *vlan_tag;
2077 uint16_t ip_tcp_len, tcp_opt_len, tcp_seg_flags;
2080 if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) ||
2081 !pDevice->InitDone || pUmDevice->suspended)
2087 #if (LINUX_VERSION_CODE < 0x02032b)
2088 if (test_and_set_bit(0, &dev->tbusy)) {
2093 if (pUmDevice->do_global_lock && pUmDevice->interrupt) {
2094 /* Fix for RQM 289636 */
2095 /* netif_stop_queue(dev); */
2096 bcm5700_netif_stop_queue(dev);
2097 pUmDevice->tx_queued = 1;
2098 if (!pUmDevice->interrupt) {
2099 netif_wake_queue(dev);
2100 pUmDevice->tx_queued = 0;
2105 pPacket = (PLM_PACKET)
2106 QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
2108 /* Fix for RQM 289636 */
2109 /* netif_stop_queue(dev); */
2110 bcm5700_netif_stop_queue(dev);
2111 pUmDevice->tx_full = 1;
2112 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container)) {
2113 netif_wake_queue(dev);
2114 pUmDevice->tx_full = 0;
2118 pUmPacket = (PUM_PACKET) pPacket;
2119 pUmPacket->skbuff = skb;
2121 if (skb->ip_summed == CHECKSUM_HW) {
2122 pPacket->Flags = SND_BD_FLAG_TCP_UDP_CKSUM;
2124 pUmDevice->tx_chksum_count++;
2131 frag_no = skb_shinfo(skb)->nr_frags;
2135 if (atomic_read(&pDevice->SendBdLeft) < (frag_no + 1)) {
2136 /* Fix for RQM 289636 */
2137 /* netif_stop_queue(dev); */
2138 bcm5700_netif_stop_queue(dev);
2139 pUmDevice->tx_full = 1;
2140 QQ_PushHead(&pDevice->TxPacketFreeQ.Container, pPacket);
2141 if (atomic_read(&pDevice->SendBdLeft) >= (frag_no + 1)) {
2142 netif_wake_queue(dev);
2143 pUmDevice->tx_full = 0;
2148 pPacket->u.Tx.FragCount = frag_no + 1;
2150 if (pPacket->u.Tx.FragCount > 1)
2151 pUmDevice->tx_zc_count++;
2155 if (pUmDevice->vlgrp && vlan_tx_tag_present(skb)) {
2156 pPacket->VlanTag = vlan_tx_tag_get(skb);
2157 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2161 vlan_tag = (vlan_tag_t *) &skb->cb[0];
2162 if (vlan_tag->signature == 0x5555) {
2163 pPacket->VlanTag = vlan_tag->tag;
2164 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2165 vlan_tag->signature = 0;
2170 if ((mss = (LM_UINT32) skb_shinfo(skb)->tso_size) &&
2171 (skb->len > pDevice->TxMtu)) {
2173 #if (LINUX_VERSION_CODE >= 0x02060c)
2175 if (skb_header_cloned(skb) &&
2176 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2182 pUmDevice->tso_pkt_count++;
2184 pPacket->Flags |= SND_BD_FLAG_CPU_PRE_DMA |
2185 SND_BD_FLAG_CPU_POST_DMA;
2188 if (skb->h.th->doff > 5) {
2189 tcp_opt_len = (skb->h.th->doff - 5) << 2;
2191 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
2192 skb->nh.iph->check = 0;
2194 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2195 skb->h.th->check = 0;
2196 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2199 skb->h.th->check = ~csum_tcpudp_magic(
2200 skb->nh.iph->saddr, skb->nh.iph->daddr,
2204 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2207 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
2208 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2210 ((skb->nh.iph->ihl - 5) +
2211 (tcp_opt_len >> 2)) << 11;
2215 ((skb->nh.iph->ihl - 5) +
2216 (tcp_opt_len >> 2)) << 12;
2219 pPacket->u.Tx.MaxSegmentSize = mss | tcp_seg_flags;
2223 pPacket->u.Tx.MaxSegmentSize = 0;
2226 BCM5700_LOCK(pUmDevice, flags);
2227 LM_SendPacket(pDevice, pPacket);
2228 BCM5700_UNLOCK(pUmDevice, flags);
2230 #if (LINUX_VERSION_CODE < 0x02032b)
2231 netif_wake_queue(dev);
2233 dev->trans_start = jiffies;
2239 #ifdef BCM_NAPI_RXPOLL
2241 bcm5700_poll(struct net_device *dev, int *budget)
2243 int orig_budget = *budget;
2245 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
2246 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2247 unsigned long flags = 0;
2250 if (orig_budget > dev->quota)
2251 orig_budget = dev->quota;
2253 BCM5700_LOCK(pUmDevice, flags);
2254 work_done = LM_ServiceRxPoll(pDevice, orig_budget);
2255 *budget -= work_done;
2256 dev->quota -= work_done;
2258 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2259 replenish_rx_buffers(pUmDevice, 0);
2261 BCM5700_UNLOCK(pUmDevice, flags);
2263 MM_IndicateRxPackets(pDevice);
2264 BCM5700_LOCK(pUmDevice, flags);
2265 LM_QueueRxPackets(pDevice);
2266 BCM5700_UNLOCK(pUmDevice, flags);
2268 if ((work_done < orig_budget) || atomic_read(&pUmDevice->intr_sem) ||
2269 pUmDevice->suspended) {
2271 netif_rx_complete(dev);
2272 BCM5700_LOCK(pUmDevice, flags);
2273 REG_WR(pDevice, Grc.Mode, pDevice->GrcMode);
2274 pDevice->RxPoll = FALSE;
2275 if (pDevice->RxPoll) {
2276 BCM5700_UNLOCK(pUmDevice, flags);
2279 /* Take care of possible missed rx interrupts */
2280 REG_RD_BACK(pDevice, Grc.Mode); /* flush the register write */
2281 tag = pDevice->pStatusBlkVirt->StatusTag;
2282 if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2283 (pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx !=
2284 pDevice->RcvRetConIdx)) {
2286 REG_WR(pDevice, HostCoalesce.Mode,
2287 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2290 /* If a new status block is pending in the WDMA state machine */
2291 /* before the register write to enable the rx interrupt, */
2292 /* the new status block may DMA with no interrupt. In this */
2293 /* scenario, the tag read above will be older than the tag in */
2294 /* the pending status block and writing the older tag will */
2295 /* cause interrupt to be generated. */
2296 else if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2297 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low,
2299 /* Make sure we service tx in case some tx interrupts */
2301 if (atomic_read(&pDevice->SendBdLeft) <
2302 (T3_SEND_RCB_ENTRY_COUNT / 2)) {
2303 REG_WR(pDevice, HostCoalesce.Mode,
2304 pDevice->CoalesceMode |
2305 HOST_COALESCE_ENABLE |
2309 BCM5700_UNLOCK(pUmDevice, flags);
2314 #endif /* BCM_NAPI_RXPOLL */
2317 bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2319 struct net_device *dev = (struct net_device *)dev_instance;
2320 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2321 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2322 LM_UINT32 oldtag, newtag;
2323 int i, max_intr_loop;
2327 unsigned int handled = 1;
2329 if (!pDevice->InitDone) {
2331 return IRQ_RETVAL(handled);
2334 bcm5700_intr_lock(pUmDevice);
2335 if (atomic_read(&pUmDevice->intr_sem)) {
2336 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2337 bcm5700_intr_unlock(pUmDevice);
2339 return IRQ_RETVAL(handled);
2342 if (test_and_set_bit(0, (void*)&pUmDevice->interrupt)) {
2343 printk(KERN_ERR "%s: Duplicate entry of the interrupt handler\n",
2345 bcm5700_intr_unlock(pUmDevice);
2347 return IRQ_RETVAL(handled);
2350 if ((pDevice->Flags & USING_MSI_FLAG) ||
2351 (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2352 !(REG_RD(pDevice,PciCfg.PciState) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) )
2355 if (pUmDevice->intr_test) {
2356 if (!(REG_RD(pDevice, PciCfg.PciState) &
2357 T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) ||
2358 pDevice->Flags & USING_MSI_FLAG ) {
2359 pUmDevice->intr_test_result = 1;
2361 pUmDevice->intr_test = 0;
2364 #ifdef BCM_NAPI_RXPOLL
2369 if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2370 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2371 oldtag = pDevice->pStatusBlkVirt->StatusTag;
2373 for (i = 0; ; i++) {
2374 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2376 LM_ServiceInterrupts(pDevice);
2377 newtag = pDevice->pStatusBlkVirt->StatusTag;
2378 if ((newtag == oldtag) || (i > max_intr_loop)) {
2379 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, oldtag << 24);
2380 pDevice->LastTag = oldtag;
2381 if (pDevice->Flags & UNDI_FIX_FLAG) {
2382 REG_WR(pDevice, Grc.LocalCtrl,
2383 pDevice->GrcLocalCtrl | 0x2);
2396 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2397 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2398 LM_ServiceInterrupts(pDevice);
2399 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 0);
2400 dummy = MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2403 while ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) &&
2404 (i < max_intr_loop));
2406 if (pDevice->Flags & UNDI_FIX_FLAG) {
2407 REG_WR(pDevice, Grc.LocalCtrl,
2408 pDevice->GrcLocalCtrl | 0x2);
2414 /* not my interrupt */
2419 repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
2420 if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) ||
2421 pDevice->QueueAgain) &&
2422 (!test_and_set_bit(0, &pUmDevice->tasklet_busy))) {
2424 replenish_rx_buffers(pUmDevice, pUmDevice->rx_buf_repl_isr_limit);
2425 clear_bit(0, (void*)&pUmDevice->tasklet_busy);
2427 else if ((repl_buf_count > pUmDevice->rx_buf_repl_thresh) &&
2428 !pUmDevice->tasklet_pending) {
2430 pUmDevice->tasklet_pending = 1;
2431 tasklet_schedule(&pUmDevice->tasklet);
2434 #ifdef BCM_NAPI_RXPOLL
2435 if (!pDevice->RxPoll &&
2436 QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2437 pDevice->RxPoll = 1;
2438 MM_ScheduleRxPoll(pDevice);
2441 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2442 replenish_rx_buffers(pUmDevice, 0);
2445 if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container) ||
2446 pDevice->QueueAgain) {
2448 LM_QueueRxPackets(pDevice);
2453 clear_bit(0, (void*)&pUmDevice->interrupt);
2454 bcm5700_intr_unlock(pUmDevice);
2455 if (pUmDevice->tx_queued) {
2456 pUmDevice->tx_queued = 0;
2457 netif_wake_queue(dev);
2459 return IRQ_RETVAL(handled);
2465 bcm5700_tasklet(unsigned long data)
2467 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)data;
2468 unsigned long flags = 0;
2470 /* RH 7.2 Beta 3 tasklets are reentrant */
2471 if (test_and_set_bit(0, &pUmDevice->tasklet_busy)) {
2472 pUmDevice->tasklet_pending = 0;
2476 pUmDevice->tasklet_pending = 0;
2477 if (pUmDevice->opened && !pUmDevice->suspended) {
2478 BCM5700_LOCK(pUmDevice, flags);
2479 replenish_rx_buffers(pUmDevice, 0);
2480 BCM5700_UNLOCK(pUmDevice, flags);
2483 clear_bit(0, &pUmDevice->tasklet_busy);
2488 bcm5700_close(struct net_device *dev)
2491 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2492 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2494 #if (LINUX_VERSION_CODE < 0x02032b)
2497 /* Fix for RQM 289636 */
2498 /* netif_stop_queue(dev); */
2499 bcm5700_netif_stop_queue(dev);
2500 pUmDevice->opened = 0;
2503 if( !(pDevice->AsfFlags & ASF_ENABLED) )
2506 if( enable_wol[pUmDevice->index] == 0 )
2508 printk(KERN_INFO "%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name);
2510 if (tigon3_debug > 1)
2511 printk(KERN_DEBUG "%s: Shutting down Tigon3\n",
2514 LM_MulticastClear(pDevice);
2515 bcm5700_shutdown(pUmDevice);
2517 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
2518 del_timer_sync(&pUmDevice->statstimer);
2521 del_timer_sync(&pUmDevice->timer);
2523 free_irq(pUmDevice->pdev->irq, dev);
2525 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2527 if(pDevice->Flags & USING_MSI_FLAG) {
2528 pci_disable_msi(pUmDevice->pdev);
2529 REG_WR(pDevice, Msi.Mode, 1 );
2530 pDevice->Flags &= ~USING_MSI_FLAG;
2536 #if (LINUX_VERSION_CODE < 0x020300)
2541 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
2544 bcm5700_freemem(dev);
2546 QQ_InitQueue(&pDevice->RxPacketFreeQ.Container,
2547 MAX_RX_PACKET_DESC_COUNT);
2553 bcm5700_freemem(struct net_device *dev)
2556 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2557 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2559 for (i = 0; i < pUmDevice->mem_list_num; i++) {
2560 if (pUmDevice->mem_size_list[i] == 0) {
2561 kfree(pUmDevice->mem_list[i]);
2564 pci_free_consistent(pUmDevice->pdev,
2565 (size_t) pUmDevice->mem_size_list[i],
2566 pUmDevice->mem_list[i],
2567 pUmDevice->dma_list[i]);
2571 pDevice->pStatusBlkVirt = 0;
2572 pDevice->pStatsBlkVirt = 0;
2573 pUmDevice->mem_list_num = 0;
2576 if (!pUmDevice->opened) {
2577 for (i = 0; i < MAX_MEM2; i++) {
2578 if (pUmDevice->mem_size_list2[i]) {
2579 bcm5700_freemem2(pUmDevice, i);
2588 /* Frees consistent memory allocated through ioctl */
2589 /* The memory to be freed is in mem_list2[index] */
2591 bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index)
2593 #if (LINUX_VERSION_CODE >= 0x020400)
2595 struct page *pg, *last_pg;
2597 /* Probably won't work on some architectures */
2598 ptr = pUmDevice->mem_list2[index],
2599 pg = virt_to_page(ptr);
2600 last_pg = virt_to_page(ptr + pUmDevice->mem_size_list2[index] - 1);
2602 #if (LINUX_VERSION_CODE > 0x020500)
2603 ClearPageReserved(pg);
2605 mem_map_unreserve(pg);
2610 pci_free_consistent(pUmDevice->pdev,
2611 (size_t) pUmDevice->mem_size_list2[index],
2612 pUmDevice->mem_list2[index],
2613 pUmDevice->dma_list2[index]);
2614 pUmDevice->mem_size_list2[index] = 0;
2621 bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice)
2623 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2625 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2626 unsigned long flags;
2628 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5700 ||
2629 T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5701) &&
2630 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
2632 if (!pUmDevice->opened || !pDevice->InitDone)
2638 /* regulate MDIO access during run time */
2639 if (pUmDevice->crc_counter_expiry > 0)
2640 return pUmDevice->phy_crc_count;
2642 pUmDevice->crc_counter_expiry = (5 * HZ) /
2643 pUmDevice->timer_interval;
2645 BCM5700_PHY_LOCK(pUmDevice, flags);
2646 LM_ReadPhy(pDevice, 0x1e, &Value32);
2647 if ((Value32 & 0x8000) == 0)
2648 LM_WritePhy(pDevice, 0x1e, Value32 | 0x8000);
2649 LM_ReadPhy(pDevice, 0x14, &Value32);
2650 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2651 /* Sometimes data on the MDIO bus can be corrupted */
2652 if (Value32 != 0xffff)
2653 pUmDevice->phy_crc_count += Value32;
2654 return pUmDevice->phy_crc_count;
2656 else if (pStats == 0) {
2660 return (MM_GETSTATS64(pStats->dot3StatsFCSErrors));
2665 bcm5700_rx_err_count(UM_DEVICE_BLOCK *pUmDevice)
2667 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2668 T3_STATS_BLOCK *pStats = (T3_STATS_BLOCK *) pDevice->pStatsBlkVirt;
2672 return (bcm5700_crc_count(pUmDevice) +
2673 MM_GETSTATS64(pStats->dot3StatsAlignmentErrors) +
2674 MM_GETSTATS64(pStats->etherStatsUndersizePkts) +
2675 MM_GETSTATS64(pStats->etherStatsFragments) +
2676 MM_GETSTATS64(pStats->dot3StatsFramesTooLong) +
2677 MM_GETSTATS64(pStats->etherStatsJabbers));
2680 STATIC struct net_device_stats *
2681 bcm5700_get_stats(struct net_device *dev)
2683 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2684 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2685 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2686 struct net_device_stats *p_netstats = &pUmDevice->stats;
2691 /* Get stats from LM */
2692 p_netstats->rx_packets =
2693 MM_GETSTATS(pStats->ifHCInUcastPkts) +
2694 MM_GETSTATS(pStats->ifHCInMulticastPkts) +
2695 MM_GETSTATS(pStats->ifHCInBroadcastPkts);
2696 p_netstats->tx_packets =
2697 MM_GETSTATS(pStats->ifHCOutUcastPkts) +
2698 MM_GETSTATS(pStats->ifHCOutMulticastPkts) +
2699 MM_GETSTATS(pStats->ifHCOutBroadcastPkts);
2700 p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2701 p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2702 p_netstats->tx_errors =
2703 MM_GETSTATS(pStats->dot3StatsInternalMacTransmitErrors) +
2704 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors) +
2705 MM_GETSTATS(pStats->ifOutDiscards) +
2706 MM_GETSTATS(pStats->ifOutErrors);
2707 p_netstats->multicast = MM_GETSTATS(pStats->ifHCInMulticastPkts);
2708 p_netstats->collisions = MM_GETSTATS(pStats->etherStatsCollisions);
2709 p_netstats->rx_length_errors =
2710 MM_GETSTATS(pStats->dot3StatsFramesTooLong) +
2711 MM_GETSTATS(pStats->etherStatsUndersizePkts);
2712 p_netstats->rx_over_errors = MM_GETSTATS(pStats->nicNoMoreRxBDs);
2713 p_netstats->rx_frame_errors =
2714 MM_GETSTATS(pStats->dot3StatsAlignmentErrors);
2715 p_netstats->rx_crc_errors = (unsigned long)
2716 bcm5700_crc_count(pUmDevice);
2717 p_netstats->rx_errors = (unsigned long)
2718 bcm5700_rx_err_count(pUmDevice);
2720 p_netstats->tx_aborted_errors = MM_GETSTATS(pStats->ifOutDiscards);
2721 p_netstats->tx_carrier_errors =
2722 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors);
2728 b57_suspend_chip(UM_DEVICE_BLOCK *pUmDevice)
2730 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2732 if (pUmDevice->opened) {
2733 bcm5700_intr_off(pUmDevice);
2734 netif_carrier_off(pUmDevice->dev);
2735 netif_stop_queue(pUmDevice->dev);
2737 tasklet_kill(&pUmDevice->tasklet);
2739 bcm5700_poll_wait(pUmDevice);
2741 pUmDevice->suspended = 1;
2742 LM_ShutdownChip(pDevice, LM_SUSPEND_RESET);
2746 b57_resume_chip(UM_DEVICE_BLOCK *pUmDevice)
2748 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2750 if (pUmDevice->suspended) {
2751 pUmDevice->suspended = 0;
2752 if (pUmDevice->opened) {
2753 bcm5700_reset(pUmDevice->dev);
2756 LM_ShutdownChip(pDevice, LM_SHUTDOWN_RESET);
2761 /* Returns 0 on failure, 1 on success */
2763 b57_test_intr(UM_DEVICE_BLOCK *pUmDevice)
2765 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2768 if (!pUmDevice->opened)
2770 pUmDevice->intr_test_result = 0;
2771 pUmDevice->intr_test = 1;
2773 REG_WR(pDevice, HostCoalesce.Mode,
2774 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2777 for (j = 0; j < 10; j++) {
2778 if (pUmDevice->intr_test_result){
2782 REG_WR(pDevice, HostCoalesce.Mode,
2783 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2786 MM_Sleep(pDevice, 1);
2789 return pUmDevice->intr_test_result;
2795 #ifdef ETHTOOL_GSTRINGS
2797 #define ETH_NUM_STATS 30
2798 #define RX_CRC_IDX 5
2799 #define RX_MAC_ERR_IDX 14
2802 char string[ETH_GSTRING_LEN];
2803 } bcm5700_stats_str_arr[ETH_NUM_STATS] = {
2804 { "rx_unicast_packets" },
2805 { "rx_multicast_packets" },
2806 { "rx_broadcast_packets" },
2809 { "rx_crc_errors" }, /* this needs to be calculated */
2810 { "rx_align_errors" },
2811 { "rx_xon_frames" },
2812 { "rx_xoff_frames" },
2813 { "rx_long_frames" },
2814 { "rx_short_frames" },
2818 { "rx_mac_errors" }, /* this needs to be calculated */
2819 { "tx_unicast_packets" },
2820 { "tx_multicast_packets" },
2821 { "tx_broadcast_packets" },
2824 { "tx_single_collisions" },
2825 { "tx_multi_collisions" },
2826 { "tx_total_collisions" },
2827 { "tx_excess_collisions" },
2828 { "tx_late_collisions" },
2829 { "tx_xon_frames" },
2830 { "tx_xoff_frames" },
2831 { "tx_internal_mac_errors" },
2832 { "tx_carrier_errors" },
2836 #define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
2839 #define SWAP_DWORD_64(x) (x)
2841 #define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
2844 unsigned long bcm5700_stats_offset_arr[ETH_NUM_STATS] = {
2845 STATS_OFFSET(ifHCInUcastPkts),
2846 STATS_OFFSET(ifHCInMulticastPkts),
2847 STATS_OFFSET(ifHCInBroadcastPkts),
2848 STATS_OFFSET(ifHCInOctets),
2849 STATS_OFFSET(etherStatsFragments),
2851 STATS_OFFSET(dot3StatsAlignmentErrors),
2852 STATS_OFFSET(xonPauseFramesReceived),
2853 STATS_OFFSET(xoffPauseFramesReceived),
2854 STATS_OFFSET(dot3StatsFramesTooLong),
2855 STATS_OFFSET(etherStatsUndersizePkts),
2856 STATS_OFFSET(etherStatsJabbers),
2857 STATS_OFFSET(ifInDiscards),
2858 STATS_OFFSET(ifInErrors),
2860 STATS_OFFSET(ifHCOutUcastPkts),
2861 STATS_OFFSET(ifHCOutMulticastPkts),
2862 STATS_OFFSET(ifHCOutBroadcastPkts),
2863 STATS_OFFSET(ifHCOutOctets),
2864 STATS_OFFSET(dot3StatsDeferredTransmissions),
2865 STATS_OFFSET(dot3StatsSingleCollisionFrames),
2866 STATS_OFFSET(dot3StatsMultipleCollisionFrames),
2867 STATS_OFFSET(etherStatsCollisions),
2868 STATS_OFFSET(dot3StatsExcessiveCollisions),
2869 STATS_OFFSET(dot3StatsLateCollisions),
2870 STATS_OFFSET(outXonSent),
2871 STATS_OFFSET(outXoffSent),
2872 STATS_OFFSET(dot3StatsInternalMacTransmitErrors),
2873 STATS_OFFSET(dot3StatsCarrierSenseErrors),
2874 STATS_OFFSET(ifOutErrors),
2877 #endif /* ETHTOOL_GSTRINGS */
2880 #define ETH_NUM_TESTS 6
2882 char string[ETH_GSTRING_LEN];
2883 } bcm5700_tests_str_arr[ETH_NUM_TESTS] = {
2884 { "register test (offline)" },
2885 { "memory test (offline)" },
2886 { "loopback test (offline)" },
2887 { "nvram test (online)" },
2888 { "interrupt test (online)" },
2889 { "link test (online)" },
2892 extern LM_STATUS b57_test_registers(UM_DEVICE_BLOCK *pUmDevice);
2893 extern LM_STATUS b57_test_memory(UM_DEVICE_BLOCK *pUmDevice);
2894 extern LM_STATUS b57_test_nvram(UM_DEVICE_BLOCK *pUmDevice);
2895 extern LM_STATUS b57_test_link(UM_DEVICE_BLOCK *pUmDevice);
2896 extern LM_STATUS b57_test_loopback(UM_DEVICE_BLOCK *pUmDevice, int looptype, int linespeed);
2899 #ifdef ETHTOOL_GREGS
2900 #if (LINUX_VERSION_CODE >= 0x02040f)
2902 bcm5700_get_reg_blk(UM_DEVICE_BLOCK *pUmDevice, u32 **buf, u32 start, u32 end,
2906 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2909 memset(*buf, 0, end - start);
2910 *buf = *buf + (end - start)/4;
2913 for (offset = start; offset < end; offset+=4, *buf = *buf + 1) {
2914 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
2915 if (((offset >= 0x3400) && (offset < 0x3c00)) ||
2916 ((offset >= 0x5400) && (offset < 0x5800)) ||
2917 ((offset >= 0x6400) && (offset < 0x6800))) {
2922 **buf = REG_RD_OFFSET(pDevice, offset);
2928 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
2930 struct ethtool_cmd ethcmd;
2931 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2932 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2934 if (mm_copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2937 switch (ethcmd.cmd) {
2938 #ifdef ETHTOOL_GDRVINFO
2939 case ETHTOOL_GDRVINFO: {
2940 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2942 strcpy(info.driver, bcm5700_driver);
2943 #ifdef INCLUDE_5701_AX_FIX
2944 if(pDevice->ChipRevId == T3_CHIP_ID_5701_A0) {
2945 extern int t3FwReleaseMajor;
2946 extern int t3FwReleaseMinor;
2947 extern int t3FwReleaseFix;
2949 sprintf(info.fw_version, "%i.%i.%i",
2950 t3FwReleaseMajor, t3FwReleaseMinor,
2954 strcpy(info.fw_version, pDevice->BootCodeVer);
2955 strcpy(info.version, bcm5700_version);
2956 #if (LINUX_VERSION_CODE <= 0x020422)
2957 strcpy(info.bus_info, pUmDevice->pdev->slot_name);
2959 strcpy(info.bus_info, pci_name(pUmDevice->pdev));
2964 #ifdef ETHTOOL_GEEPROM
2965 BCM_EEDUMP_LEN(&info, pDevice->NvramSize);
2967 #ifdef ETHTOOL_GREGS
2968 /* dump everything, including holes in the register space */
2969 info.regdump_len = 0x6c00;
2971 #ifdef ETHTOOL_GSTATS
2972 info.n_stats = ETH_NUM_STATS;
2975 info.testinfo_len = ETH_NUM_TESTS;
2977 if (mm_copy_to_user(useraddr, &info, sizeof(info)))
2982 case ETHTOOL_GSET: {
2983 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG)||
2984 (pDevice->PhyFlags & PHY_IS_FIBER)) {
2986 (SUPPORTED_1000baseT_Full |
2988 ethcmd.supported |= SUPPORTED_FIBRE;
2989 ethcmd.port = PORT_FIBRE;
2993 (SUPPORTED_10baseT_Half |
2994 SUPPORTED_10baseT_Full |
2995 SUPPORTED_100baseT_Half |
2996 SUPPORTED_100baseT_Full |
2997 SUPPORTED_1000baseT_Half |
2998 SUPPORTED_1000baseT_Full |
3000 ethcmd.supported |= SUPPORTED_TP;
3001 ethcmd.port = PORT_TP;
3004 ethcmd.transceiver = XCVR_INTERNAL;
3005 ethcmd.phy_address = 0;
3007 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
3008 ethcmd.speed = SPEED_1000;
3009 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
3010 ethcmd.speed = SPEED_100;
3011 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
3012 ethcmd.speed = SPEED_10;
3016 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
3017 ethcmd.duplex = DUPLEX_FULL;
3019 ethcmd.duplex = DUPLEX_HALF;
3021 if (pDevice->DisableAutoNeg == FALSE) {
3022 ethcmd.autoneg = AUTONEG_ENABLE;
3023 ethcmd.advertising = ADVERTISED_Autoneg;
3024 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
3025 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3026 ethcmd.advertising |=
3027 ADVERTISED_1000baseT_Full |
3031 ethcmd.advertising |=
3033 if (pDevice->advertising &
3034 PHY_AN_AD_10BASET_HALF) {
3036 ethcmd.advertising |=
3037 ADVERTISED_10baseT_Half;
3039 if (pDevice->advertising &
3040 PHY_AN_AD_10BASET_FULL) {
3042 ethcmd.advertising |=
3043 ADVERTISED_10baseT_Full;
3045 if (pDevice->advertising &
3046 PHY_AN_AD_100BASETX_HALF) {
3048 ethcmd.advertising |=
3049 ADVERTISED_100baseT_Half;
3051 if (pDevice->advertising &
3052 PHY_AN_AD_100BASETX_FULL) {
3054 ethcmd.advertising |=
3055 ADVERTISED_100baseT_Full;
3057 if (pDevice->advertising1000 &
3058 BCM540X_AN_AD_1000BASET_HALF) {
3060 ethcmd.advertising |=
3061 ADVERTISED_1000baseT_Half;
3063 if (pDevice->advertising1000 &
3064 BCM540X_AN_AD_1000BASET_FULL) {
3066 ethcmd.advertising |=
3067 ADVERTISED_1000baseT_Full;
3072 ethcmd.autoneg = AUTONEG_DISABLE;
3073 ethcmd.advertising = 0;
3076 ethcmd.maxtxpkt = pDevice->TxMaxCoalescedFrames;
3077 ethcmd.maxrxpkt = pDevice->RxMaxCoalescedFrames;
3079 if(mm_copy_to_user(useraddr, ðcmd, sizeof(ethcmd)))
3083 case ETHTOOL_SSET: {
3084 unsigned long flags;
3086 if(!capable(CAP_NET_ADMIN))
3088 if (ethcmd.autoneg == AUTONEG_ENABLE) {
3089 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3090 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_UNKNOWN;
3091 pDevice->DisableAutoNeg = FALSE;
3094 if (ethcmd.speed == SPEED_1000 &&
3095 pDevice->PhyFlags & PHY_NO_GIGABIT)
3098 if (ethcmd.speed == SPEED_1000 &&
3099 (pDevice->TbiFlags & ENABLE_TBI_FLAG ||
3100 pDevice->PhyFlags & PHY_IS_FIBER ) ) {
3102 pDevice->RequestedLineSpeed =
3103 LM_LINE_SPEED_1000MBPS;
3105 pDevice->RequestedDuplexMode =
3106 LM_DUPLEX_MODE_FULL;
3108 else if (ethcmd.speed == SPEED_100 &&
3109 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3110 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3112 pDevice->RequestedLineSpeed =
3113 LM_LINE_SPEED_100MBPS;
3115 else if (ethcmd.speed == SPEED_10 &&
3116 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3117 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3119 pDevice->RequestedLineSpeed =
3120 LM_LINE_SPEED_10MBPS;
3126 pDevice->DisableAutoNeg = TRUE;
3127 if (ethcmd.duplex == DUPLEX_FULL) {
3128 pDevice->RequestedDuplexMode =
3129 LM_DUPLEX_MODE_FULL;
3132 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3133 !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
3135 pDevice->RequestedDuplexMode =
3136 LM_DUPLEX_MODE_HALF;
3140 if (netif_running(dev)) {
3141 BCM5700_PHY_LOCK(pUmDevice, flags);
3142 LM_SetupPhy(pDevice);
3143 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3149 case ETHTOOL_GWOL: {
3150 struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
3152 if (((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3153 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3154 (pDevice->Flags & DISABLE_D3HOT_FLAG)) {
3159 wol.supported = WAKE_MAGIC;
3160 if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET)
3162 wol.wolopts = WAKE_MAGIC;
3168 if (mm_copy_to_user(useraddr, &wol, sizeof(wol)))
3172 case ETHTOOL_SWOL: {
3173 struct ethtool_wolinfo wol;
3175 if(!capable(CAP_NET_ADMIN))
3177 if (mm_copy_from_user(&wol, useraddr, sizeof(wol)))
3179 if ((((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3180 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3181 (pDevice->Flags & DISABLE_D3HOT_FLAG)) &&
3186 if ((wol.wolopts & ~WAKE_MAGIC) != 0) {
3189 if (wol.wolopts & WAKE_MAGIC) {
3190 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
3191 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
3194 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
3195 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
3201 #ifdef ETHTOOL_GLINK
3202 case ETHTOOL_GLINK: {
3203 struct ethtool_value edata = {ETHTOOL_GLINK};
3205 /* workaround for DHCP using ifup script */
3206 /* ifup only waits for 5 seconds for link up */
3207 /* NIC may take more than 5 seconds to establish link */
3208 if ((pUmDevice->delayed_link_ind > 0) &&
3209 delay_link[pUmDevice->index])
3212 if (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) {
3218 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3223 #ifdef ETHTOOL_NWAY_RST
3224 case ETHTOOL_NWAY_RST: {
3226 unsigned long flags;
3228 if(!capable(CAP_NET_ADMIN))
3230 if (pDevice->DisableAutoNeg) {
3233 if (!netif_running(dev))
3235 BCM5700_PHY_LOCK(pUmDevice, flags);
3236 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
3237 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
3238 pDevice->DisableAutoNeg = TRUE;
3239 LM_SetupPhy(pDevice);
3241 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3242 pDevice->DisableAutoNeg = FALSE;
3243 LM_SetupPhy(pDevice);
3246 if ((T3_ASIC_REV(pDevice->ChipRevId) ==
3247 T3_ASIC_REV_5703) ||
3248 (T3_ASIC_REV(pDevice->ChipRevId) ==
3249 T3_ASIC_REV_5704) ||
3250 (T3_ASIC_REV(pDevice->ChipRevId) ==
3253 LM_ResetPhy(pDevice);
3254 LM_SetupPhy(pDevice);
3256 pDevice->PhyFlags &= ~PHY_FIBER_FALLBACK;
3257 LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl);
3258 LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl |
3259 PHY_CTRL_AUTO_NEG_ENABLE |
3260 PHY_CTRL_RESTART_AUTO_NEG);
3262 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3266 #ifdef ETHTOOL_GEEPROM
3267 case ETHTOOL_GEEPROM: {
3268 struct ethtool_eeprom eeprom;
3270 LM_UINT32 buf1[64/4];
3271 int i, j, offset, len;
3273 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3276 if (eeprom.offset >= pDevice->NvramSize)
3279 /* maximum data limited */
3280 /* to read more, call again with a different offset */
3281 if (eeprom.len > 0x800) {
3283 if (mm_copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
3287 if (eeprom.len > 64) {
3288 buf = kmalloc(eeprom.len, GFP_KERNEL);
3295 useraddr += offsetof(struct ethtool_eeprom, data);
3297 offset = eeprom.offset;
3300 offset &= 0xfffffffc;
3301 len += (offset & 3);
3303 len = (len + 3) & 0xfffffffc;
3304 for (i = 0, j = 0; j < len; i++, j += 4) {
3305 if (LM_NvramRead(pDevice, offset + j, buf + i) !=
3306 LM_STATUS_SUCCESS) {
3311 buf += (eeprom.offset & 3);
3312 i = mm_copy_to_user(useraddr, buf, eeprom.len);
3314 if (eeprom.len > 64) {
3321 case ETHTOOL_SEEPROM: {
3322 struct ethtool_eeprom eeprom;
3323 LM_UINT32 buf[64/4];
3326 if(!capable(CAP_NET_ADMIN))
3328 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3331 if ((eeprom.offset & 3) || (eeprom.len & 3) ||
3332 (eeprom.offset >= pDevice->NvramSize)) {
3336 if ((eeprom.offset + eeprom.len) >= pDevice->NvramSize) {
3337 eeprom.len = pDevice->NvramSize - eeprom.offset;
3340 useraddr += offsetof(struct ethtool_eeprom, data);
3343 offset = eeprom.offset;
3349 if (mm_copy_from_user(&buf, useraddr, i))
3352 bcm5700_intr_off(pUmDevice);
3353 /* Prevent race condition on Grc.Mode register */
3354 bcm5700_poll_wait(pUmDevice);
3356 if (LM_NvramWriteBlock(pDevice, offset, buf, i/4) !=
3357 LM_STATUS_SUCCESS) {
3358 bcm5700_intr_on(pUmDevice);
3361 bcm5700_intr_on(pUmDevice);
3369 #ifdef ETHTOOL_GREGS
3370 #if (LINUX_VERSION_CODE >= 0x02040f)
3371 case ETHTOOL_GREGS: {
3372 struct ethtool_regs eregs;
3373 LM_UINT32 *buf, *buf1;
3376 if(!capable(CAP_NET_ADMIN))
3378 if (pDevice->Flags & UNDI_FIX_FLAG)
3380 if (mm_copy_from_user(&eregs, useraddr, sizeof(eregs)))
3382 if (eregs.len > 0x6c00)
3384 eregs.version = 0x0;
3385 if (mm_copy_to_user(useraddr, &eregs, sizeof(eregs)))
3387 buf = buf1 = kmalloc(eregs.len, GFP_KERNEL);
3390 bcm5700_get_reg_blk(pUmDevice, &buf, 0, 0xb0, 0);
3391 bcm5700_get_reg_blk(pUmDevice, &buf, 0xb0, 0x200, 1);
3392 bcm5700_get_reg_blk(pUmDevice, &buf, 0x200, 0x8f0, 0);
3393 bcm5700_get_reg_blk(pUmDevice, &buf, 0x8f0, 0xc00, 1);
3394 bcm5700_get_reg_blk(pUmDevice, &buf, 0xc00, 0xce0, 0);
3395 bcm5700_get_reg_blk(pUmDevice, &buf, 0xce0, 0x1000, 1);
3396 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1000, 0x1004, 0);
3397 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1004, 0x1400, 1);
3398 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1400, 0x1480, 0);
3399 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1480, 0x1800, 1);
3400 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1800, 0x1848, 0);
3401 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1848, 0x1c00, 1);
3402 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c00, 0x1c04, 0);
3403 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c04, 0x2000, 1);
3404 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2000, 0x225c, 0);
3405 bcm5700_get_reg_blk(pUmDevice, &buf, 0x225c, 0x2400, 1);
3406 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2400, 0x24c4, 0);
3407 bcm5700_get_reg_blk(pUmDevice, &buf, 0x24c4, 0x2800, 1);
3408 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2800, 0x2804, 0);
3409 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2804, 0x2c00, 1);
3410 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c00, 0x2c20, 0);
3411 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c20, 0x3000, 1);
3412 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3000, 0x3014, 0);
3413 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3014, 0x3400, 1);
3414 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3400, 0x3408, 0);
3415 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3408, 0x3800, 1);
3416 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3800, 0x3808, 0);
3417 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3808, 0x3c00, 1);
3418 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3c00, 0x3d00, 0);
3419 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3d00, 0x4000, 1);
3420 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4000, 0x4010, 0);
3421 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4010, 0x4400, 1);
3422 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4400, 0x4458, 0);
3423 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4458, 0x4800, 1);
3424 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4800, 0x4808, 0);
3425 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4808, 0x4c00, 1);
3426 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c00, 0x4c08, 0);
3427 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c08, 0x5000, 1);
3428 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5000, 0x5050, 0);
3429 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5050, 0x5400, 1);
3430 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5400, 0x5450, 0);
3431 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5450, 0x5800, 1);
3432 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5800, 0x5a10, 0);
3433 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5a10, 0x6000, 1);
3434 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6000, 0x600c, 0);
3435 bcm5700_get_reg_blk(pUmDevice, &buf, 0x600c, 0x6400, 1);
3436 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6400, 0x6404, 0);
3437 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6404, 0x6800, 1);
3438 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6800, 0x6848, 0);
3439 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6848, 0x6c00, 1);
3441 i = mm_copy_to_user(useraddr + sizeof(eregs), buf1, eregs.len);
3449 #ifdef ETHTOOL_GPAUSEPARAM
3450 case ETHTOOL_GPAUSEPARAM: {
3451 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
3453 if (!pDevice->DisableAutoNeg) {
3454 epause.autoneg = (pDevice->FlowControlCap &
3455 LM_FLOW_CONTROL_AUTO_PAUSE) != 0;
3461 (pDevice->FlowControl &
3462 LM_FLOW_CONTROL_RECEIVE_PAUSE) != 0;
3464 (pDevice->FlowControl &
3465 LM_FLOW_CONTROL_TRANSMIT_PAUSE) != 0;
3466 if (mm_copy_to_user(useraddr, &epause, sizeof(epause)))
3471 case ETHTOOL_SPAUSEPARAM: {
3472 struct ethtool_pauseparam epause;
3473 unsigned long flags;
3475 if(!capable(CAP_NET_ADMIN))
3477 if (mm_copy_from_user(&epause, useraddr, sizeof(epause)))
3479 pDevice->FlowControlCap = 0;
3480 if (epause.autoneg && !pDevice->DisableAutoNeg) {
3481 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
3483 if (epause.rx_pause) {
3484 pDevice->FlowControlCap |=
3485 LM_FLOW_CONTROL_RECEIVE_PAUSE;
3487 if (epause.tx_pause) {
3488 pDevice->FlowControlCap |=
3489 LM_FLOW_CONTROL_TRANSMIT_PAUSE;
3491 if (netif_running(dev)) {
3492 BCM5700_PHY_LOCK(pUmDevice, flags);
3493 LM_SetupPhy(pDevice);
3494 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3500 #ifdef ETHTOOL_GRXCSUM
3501 case ETHTOOL_GRXCSUM: {
3502 struct ethtool_value edata = { ETHTOOL_GRXCSUM };
3505 (pDevice->TaskToOffload &
3506 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM) != 0;
3507 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3512 case ETHTOOL_SRXCSUM: {
3513 struct ethtool_value edata;
3515 if(!capable(CAP_NET_ADMIN))
3517 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3520 if (!(pDevice->TaskOffloadCap &
3521 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3525 pDevice->TaskToOffload |=
3526 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3527 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
3530 pDevice->TaskToOffload &=
3531 ~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3532 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM);
3536 case ETHTOOL_GTXCSUM: {
3537 struct ethtool_value edata = { ETHTOOL_GTXCSUM };
3540 (dev->features & get_csum_flag( pDevice->ChipRevId)) != 0;
3541 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3546 case ETHTOOL_STXCSUM: {
3547 struct ethtool_value edata;
3549 if(!capable(CAP_NET_ADMIN))
3551 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3554 if (!(pDevice->TaskOffloadCap &
3555 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3559 dev->features |= get_csum_flag( pDevice->ChipRevId);
3560 pDevice->TaskToOffload |=
3561 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3562 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
3565 dev->features &= ~get_csum_flag( pDevice->ChipRevId);
3566 pDevice->TaskToOffload &=
3567 ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3568 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM);
3573 struct ethtool_value edata = { ETHTOOL_GSG };
3576 (dev->features & NETIF_F_SG) != 0;
3577 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3582 struct ethtool_value edata;
3584 if(!capable(CAP_NET_ADMIN))
3586 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3589 dev->features |= NETIF_F_SG;
3592 dev->features &= ~NETIF_F_SG;
3597 #ifdef ETHTOOL_GRINGPARAM
3598 case ETHTOOL_GRINGPARAM: {
3599 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
3601 ering.rx_max_pending = T3_STD_RCV_RCB_ENTRY_COUNT - 1;
3602 ering.rx_pending = pDevice->RxStdDescCnt;
3603 ering.rx_mini_max_pending = 0;
3604 ering.rx_mini_pending = 0;
3605 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3606 ering.rx_jumbo_max_pending = T3_JUMBO_RCV_RCB_ENTRY_COUNT - 1;
3607 ering.rx_jumbo_pending = pDevice->RxJumboDescCnt;
3609 ering.rx_jumbo_max_pending = 0;
3610 ering.rx_jumbo_pending = 0;
3612 ering.tx_max_pending = MAX_TX_PACKET_DESC_COUNT - 1;
3613 ering.tx_pending = pDevice->TxPacketDescCnt;
3614 if (mm_copy_to_user(useraddr, &ering, sizeof(ering)))
3619 #ifdef ETHTOOL_PHYS_ID
3620 case ETHTOOL_PHYS_ID: {
3621 struct ethtool_value edata;
3623 if(!capable(CAP_NET_ADMIN))
3625 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3627 if (LM_BlinkLED(pDevice, edata.data) == LM_STATUS_SUCCESS)
3632 #ifdef ETHTOOL_GSTRINGS
3633 case ETHTOOL_GSTRINGS: {
3634 struct ethtool_gstrings egstr = { ETHTOOL_GSTRINGS };
3636 if (mm_copy_from_user(&egstr, useraddr, sizeof(egstr)))
3638 switch(egstr.string_set) {
3639 #ifdef ETHTOOL_GSTATS
3641 egstr.len = ETH_NUM_STATS;
3642 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3644 if (mm_copy_to_user(useraddr + sizeof(egstr),
3645 bcm5700_stats_str_arr,
3646 sizeof(bcm5700_stats_str_arr)))
3652 egstr.len = ETH_NUM_TESTS;
3653 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3655 if (mm_copy_to_user(useraddr + sizeof(egstr),
3656 bcm5700_tests_str_arr,
3657 sizeof(bcm5700_tests_str_arr)))
3666 #ifdef ETHTOOL_GSTATS
3667 case ETHTOOL_GSTATS: {
3668 struct ethtool_stats estats = { ETHTOOL_GSTATS };
3669 uint64_t stats[ETH_NUM_STATS];
3672 (uint64_t *) pDevice->pStatsBlkVirt;
3674 estats.n_stats = ETH_NUM_STATS;
3676 memset(stats, 0, sizeof(stats));
3680 for (i = 0; i < ETH_NUM_STATS; i++) {
3681 if (bcm5700_stats_offset_arr[i] != 0) {
3682 stats[i] = SWAP_DWORD_64(*(pStats +
3683 bcm5700_stats_offset_arr[i]));
3685 else if (i == RX_CRC_IDX) {
3687 bcm5700_crc_count(pUmDevice);
3689 else if (i == RX_MAC_ERR_IDX) {
3691 bcm5700_rx_err_count(pUmDevice);
3695 if (mm_copy_to_user(useraddr, &estats, sizeof(estats))) {
3698 if (mm_copy_to_user(useraddr + sizeof(estats), &stats,
3706 case ETHTOOL_TEST: {
3707 struct ethtool_test etest;
3708 uint64_t tests[ETH_NUM_TESTS] = {0, 0, 0, 0, 0, 0};
3709 LM_POWER_STATE old_power_level;
3711 printk( KERN_ALERT "Performing ethtool test.\n"
3712 "This test will take a few seconds to complete.\n" );
3714 if (mm_copy_from_user(&etest, useraddr, sizeof(etest)))
3717 etest.len = ETH_NUM_TESTS;
3718 old_power_level = pDevice->PowerLevel;
3719 if (old_power_level != LM_POWER_STATE_D0) {
3720 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
3721 LM_SwitchClocks(pDevice);
3723 MM_Sleep(pDevice, 1000);
3724 if (etest.flags & ETH_TEST_FL_OFFLINE) {
3725 b57_suspend_chip(pUmDevice);
3726 MM_Sleep(pDevice, 1000);
3727 LM_HaltCpu(pDevice,T3_RX_CPU_ID | T3_TX_CPU_ID);
3728 MM_Sleep(pDevice, 1000);
3729 if (b57_test_registers(pUmDevice) == 0) {
3730 etest.flags |= ETH_TEST_FL_FAILED;
3733 MM_Sleep(pDevice, 1000);
3734 if (b57_test_memory(pUmDevice) == 0) {
3735 etest.flags |= ETH_TEST_FL_FAILED;
3738 MM_Sleep(pDevice, 1000);
3739 if (b57_test_loopback(pUmDevice, NICE_LOOPBACK_TESTTYPE_MAC, 0) == 0) {
3740 etest.flags |= ETH_TEST_FL_FAILED;
3743 MM_Sleep(pDevice, 1000);
3744 b57_resume_chip(pUmDevice);
3745 /* wait for link to come up for the link test */
3746 MM_Sleep(pDevice, 4000);
3747 if ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
3748 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
3750 /* wait a little longer for linkup on copper */
3751 MM_Sleep(pDevice, 3000);
3754 if (b57_test_nvram(pUmDevice) == 0) {
3755 etest.flags |= ETH_TEST_FL_FAILED;
3758 MM_Sleep(pDevice, 1000);
3759 if (b57_test_intr(pUmDevice) == 0) {
3760 etest.flags |= ETH_TEST_FL_FAILED;
3763 MM_Sleep(pDevice, 1000);
3764 if (b57_test_link(pUmDevice) == 0) {
3765 etest.flags |= ETH_TEST_FL_FAILED;
3768 MM_Sleep(pDevice, 1000);
3769 if (old_power_level != LM_POWER_STATE_D0) {
3770 LM_SetPowerState(pDevice, old_power_level);
3772 if (mm_copy_to_user(useraddr, &etest, sizeof(etest))) {
3775 if (mm_copy_to_user(useraddr + sizeof(etest), tests,
3783 case ETHTOOL_GTSO: {
3784 struct ethtool_value edata = { ETHTOOL_GTSO };
3788 (dev->features & NETIF_F_TSO) != 0;
3792 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3798 case ETHTOOL_STSO: {
3800 struct ethtool_value edata;
3802 if (!capable(CAP_NET_ADMIN))
3805 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3808 if (!(pDevice->TaskToOffload &
3809 LM_TASK_OFFLOAD_TCP_SEGMENTATION)) {
3813 dev->features &= ~NETIF_F_TSO;
3816 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
3817 (dev->mtu > 1500)) {
3818 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
3821 dev->features |= NETIF_F_TSO;
3834 #endif /* #ifdef SIOCETHTOOL */
3836 #if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
3837 #include <linux/iobuf.h>
3840 /* Provide ioctl() calls to examine the MII xcvr state. */
3841 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3843 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
3844 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
3845 u16 *data = (u16 *)&rq->ifr_data;
3847 unsigned long flags;
3853 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
3854 data[0] = pDevice->PhyAddr;
3859 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
3860 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
3863 /* workaround for DHCP using ifup script */
3864 /* ifup only waits for 5 seconds for link up */
3865 /* NIC may take more than 5 seconds to establish link */
3866 if ((pUmDevice->delayed_link_ind > 0) &&
3867 delay_link[pUmDevice->index]) {
3871 BCM5700_PHY_LOCK(pUmDevice, flags);
3872 LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *) &value);
3873 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3874 data[3] = value & 0xffff;
3880 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
3881 if (!capable(CAP_NET_ADMIN))
3884 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
3887 BCM5700_PHY_LOCK(pUmDevice, flags);
3888 LM_WritePhy(pDevice, data[1] & 0x1f, data[2]);
3889 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3895 struct nice_req* nrq;
3897 if (!capable(CAP_NET_ADMIN))
3900 nrq = (struct nice_req*)&rq->ifr_ifru;
3901 if( nrq->cmd == NICE_CMD_QUERY_SUPPORT ) {
3902 nrq->nrq_magic = NICE_DEVICE_MAGIC;
3903 nrq->nrq_support_rx = 1;
3904 nrq->nrq_support_vlan = 1;
3905 nrq->nrq_support_get_speed = 1;
3906 #ifdef BCM_NAPI_RXPOLL
3907 nrq->nrq_support_rx_napi = 1;
3911 #ifdef BCM_NAPI_RXPOLL
3912 else if( nrq->cmd == NICE_CMD_SET_RX_NAPI )
3914 else if( nrq->cmd == NICE_CMD_SET_RX )
3917 pUmDevice->nice_rx = nrq->nrq_rx;
3918 pUmDevice->nice_ctx = nrq->nrq_ctx;
3919 bcm5700_set_vlan_mode(pUmDevice);
3922 #ifdef BCM_NAPI_RXPOLL
3923 else if( nrq->cmd == NICE_CMD_GET_RX_NAPI )
3925 else if( nrq->cmd == NICE_CMD_GET_RX )
3928 nrq->nrq_rx = pUmDevice->nice_rx;
3929 nrq->nrq_ctx = pUmDevice->nice_ctx;
3932 else if( nrq->cmd == NICE_CMD_GET_SPEED ) {
3933 if(pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE){
3936 else if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) {
3937 nrq->nrq_speed = SPEED_1000;
3938 } else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS) {
3939 nrq->nrq_speed = SPEED_100;
3940 } else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) {
3941 nrq->nrq_speed = SPEED_100;
3948 if (!pUmDevice->opened)
3952 case NICE_CMD_BLINK_LED:
3953 if (LM_BlinkLED(pDevice, nrq->nrq_blink_time) ==
3954 LM_STATUS_SUCCESS) {
3959 case NICE_CMD_DIAG_SUSPEND:
3960 b57_suspend_chip(pUmDevice);
3963 case NICE_CMD_DIAG_RESUME:
3964 b57_resume_chip(pUmDevice);
3967 case NICE_CMD_REG_READ:
3968 if (nrq->nrq_offset >= 0x10000) {
3969 nrq->nrq_data = LM_RegRdInd(pDevice,
3973 nrq->nrq_data = LM_RegRd(pDevice,
3978 case NICE_CMD_REG_WRITE:
3979 if (nrq->nrq_offset >= 0x10000) {
3980 LM_RegWrInd(pDevice, nrq->nrq_offset,
3984 LM_RegWr(pDevice, nrq->nrq_offset,
3985 nrq->nrq_data, FALSE);
3989 case NICE_CMD_REG_READ_DIRECT:
3990 case NICE_CMD_REG_WRITE_DIRECT:
3991 if ((nrq->nrq_offset >= 0x10000) ||
3992 (pDevice->Flags & UNDI_FIX_FLAG)) {
3996 if (nrq->cmd == NICE_CMD_REG_READ_DIRECT) {
3997 nrq->nrq_data = REG_RD_OFFSET(pDevice,
4001 REG_WR_OFFSET(pDevice, nrq->nrq_offset,
4006 case NICE_CMD_MEM_READ:
4007 nrq->nrq_data = LM_MemRdInd(pDevice,
4011 case NICE_CMD_MEM_WRITE:
4012 LM_MemWrInd(pDevice, nrq->nrq_offset,
4016 case NICE_CMD_CFG_READ32:
4017 pci_read_config_dword(pUmDevice->pdev,
4018 nrq->nrq_offset, (u32 *)&nrq->nrq_data);
4021 case NICE_CMD_CFG_READ16:
4022 pci_read_config_word(pUmDevice->pdev,
4023 nrq->nrq_offset, (u16 *)&nrq->nrq_data);
4026 case NICE_CMD_CFG_READ8:
4027 pci_read_config_byte(pUmDevice->pdev,
4028 nrq->nrq_offset, (u8 *)&nrq->nrq_data);
4031 case NICE_CMD_CFG_WRITE32:
4032 pci_write_config_dword(pUmDevice->pdev,
4033 nrq->nrq_offset, (u32)nrq->nrq_data);
4036 case NICE_CMD_CFG_WRITE16:
4037 pci_write_config_word(pUmDevice->pdev,
4038 nrq->nrq_offset, (u16)nrq->nrq_data);
4041 case NICE_CMD_CFG_WRITE8:
4042 pci_write_config_byte(pUmDevice->pdev,
4043 nrq->nrq_offset, (u8)nrq->nrq_data);
4046 case NICE_CMD_RESET:
4050 case NICE_CMD_ENABLE_MAC_LOOPBACK:
4051 if (pDevice->LoopBackMode != 0) {
4055 BCM5700_PHY_LOCK(pUmDevice, flags);
4056 LM_EnableMacLoopBack(pDevice);
4057 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4060 case NICE_CMD_DISABLE_MAC_LOOPBACK:
4061 if (pDevice->LoopBackMode !=
4062 LM_MAC_LOOP_BACK_MODE) {
4066 BCM5700_PHY_LOCK(pUmDevice, flags);
4067 LM_DisableMacLoopBack(pDevice);
4068 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4071 case NICE_CMD_ENABLE_PHY_LOOPBACK:
4072 if (pDevice->LoopBackMode != 0) {
4076 BCM5700_PHY_LOCK(pUmDevice, flags);
4077 LM_EnablePhyLoopBack(pDevice);
4078 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4081 case NICE_CMD_DISABLE_PHY_LOOPBACK:
4082 if (pDevice->LoopBackMode !=
4083 LM_PHY_LOOP_BACK_MODE) {
4087 BCM5700_PHY_LOCK(pUmDevice, flags);
4088 LM_DisablePhyLoopBack(pDevice);
4089 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4092 case NICE_CMD_ENABLE_EXT_LOOPBACK:
4093 if (pDevice->LoopBackMode != 0) {
4097 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
4098 if (nrq->nrq_speed != 1000)
4102 if ((nrq->nrq_speed != 1000) &&
4103 (nrq->nrq_speed != 100) &&
4104 (nrq->nrq_speed != 10)) {
4108 BCM5700_PHY_LOCK(pUmDevice, flags);
4109 LM_EnableExtLoopBack(pDevice, nrq->nrq_speed);
4110 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4113 case NICE_CMD_DISABLE_EXT_LOOPBACK:
4114 if (pDevice->LoopBackMode !=
4115 LM_EXT_LOOP_BACK_MODE) {
4119 BCM5700_PHY_LOCK(pUmDevice, flags);
4120 LM_DisableExtLoopBack(pDevice);
4121 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4124 case NICE_CMD_INTERRUPT_TEST:
4125 nrq->nrq_intr_test_result =
4126 b57_test_intr(pUmDevice);
4129 case NICE_CMD_LOOPBACK_TEST:
4131 switch (nrq->nrq_looptype) {
4132 case NICE_LOOPBACK_TESTTYPE_EXT:
4133 if ((nrq->nrq_loopspeed & ~NICE_LOOPBACK_TEST_SPEEDMASK) ||
4134 !(nrq->nrq_loopspeed & NICE_LOOPBACK_TEST_SPEEDMASK))
4136 switch (nrq->nrq_loopspeed) {
4137 case NICE_LOOPBACK_TEST_10MBPS:
4138 value = LM_LINE_SPEED_10MBPS;
4140 case NICE_LOOPBACK_TEST_100MBPS:
4141 value = LM_LINE_SPEED_100MBPS;
4143 case NICE_LOOPBACK_TEST_1000MBPS:
4144 value = LM_LINE_SPEED_1000MBPS;
4149 case NICE_LOOPBACK_TESTTYPE_MAC:
4150 case NICE_LOOPBACK_TESTTYPE_PHY:
4151 b57_suspend_chip(pUmDevice);
4152 value = b57_test_loopback(pUmDevice,
4153 nrq->nrq_looptype, value);
4154 b57_resume_chip(pUmDevice);
4159 /* A '1' indicates success */
4167 case NICE_CMD_KMALLOC_PHYS: {
4168 #if (LINUX_VERSION_CODE >= 0x020400)
4173 struct page *pg, *last_pg;
4175 for (i = 0; i < MAX_MEM2; i++) {
4176 if (pUmDevice->mem_size_list2[i] == 0)
4181 ptr = pci_alloc_consistent(pUmDevice->pdev,
4182 nrq->nrq_size, &mapping);
4186 pUmDevice->mem_size_list2[i] = nrq->nrq_size;
4187 pUmDevice->mem_list2[i] = ptr;
4188 pUmDevice->dma_list2[i] = mapping;
4190 /* put pci mapping at the beginning of buffer */
4191 *((__u64 *) ptr) = (__u64) mapping;
4193 /* Probably won't work on some architectures */
4194 /* get CPU mapping */
4195 cpu_pa = (__u64) virt_to_phys(ptr);
4196 pUmDevice->cpu_pa_list2[i] = cpu_pa;
4197 nrq->nrq_phys_addr_lo = (__u32) cpu_pa;
4198 nrq->nrq_phys_addr_hi = (__u32) (cpu_pa >> 32);
4200 pg = virt_to_page(ptr);
4201 last_pg = virt_to_page(ptr + nrq->nrq_size - 1);
4203 #if (LINUX_VERSION_CODE > 0x020500)
4204 SetPageReserved(pg);
4206 mem_map_reserve(pg);
4217 case NICE_CMD_KFREE_PHYS: {
4221 cpu_pa = (__u64) nrq->nrq_phys_addr_lo +
4222 ((__u64) nrq->nrq_phys_addr_hi << 32);
4223 for (i = 0; i < MAX_MEM2; i++) {
4224 if (pUmDevice->cpu_pa_list2[i] ==
4233 bcm5700_freemem2(pUmDevice, i);
4237 case NICE_CMD_SET_WRITE_PROTECT:
4238 if (nrq->nrq_write_protect)
4239 pDevice->Flags |= EEPROM_WP_FLAG;
4241 pDevice->Flags &= ~EEPROM_WP_FLAG;
4243 case NICE_CMD_GET_STATS_BLOCK: {
4244 PT3_STATS_BLOCK pStats =
4245 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4246 if (mm_copy_to_user(nrq->nrq_stats_useraddr,
4247 pStats, nrq->nrq_stats_size)) {
4252 case NICE_CMD_CLR_STATS_BLOCK: {
4254 PT3_STATS_BLOCK pStats =
4255 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4257 memset(pStats, 0, sizeof(T3_STATS_BLOCK));
4258 if (T3_ASIC_REV(pDevice->ChipRevId) ==
4262 for(j = 0x0300; j < 0x0b00; j = j + 4) {
4263 MEM_WR_OFFSET(pDevice, j, 0);
4273 #endif /* NICE_SUPPORT */
4276 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
4284 STATIC void bcm5700_do_rx_mode(struct net_device *dev)
4286 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4287 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4289 struct dev_mc_list *mclist;
4291 LM_MulticastClear(pDevice);
4292 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4293 i++, mclist = mclist->next) {
4294 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4296 if (dev->flags & IFF_ALLMULTI) {
4297 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4298 LM_SetReceiveMask(pDevice,
4299 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4302 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4303 LM_SetReceiveMask(pDevice,
4304 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4306 if (dev->flags & IFF_PROMISC) {
4307 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4308 LM_SetReceiveMask(pDevice,
4309 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4312 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4313 LM_SetReceiveMask(pDevice,
4314 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4319 STATIC void bcm5700_set_rx_mode(struct net_device *dev)
4321 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4322 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4324 struct dev_mc_list *mclist;
4325 unsigned long flags;
4327 BCM5700_PHY_LOCK(pUmDevice, flags);
4329 LM_MulticastClear(pDevice);
4330 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4331 i++, mclist = mclist->next) {
4332 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4334 if (dev->flags & IFF_ALLMULTI) {
4335 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4336 LM_SetReceiveMask(pDevice,
4337 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4340 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4341 LM_SetReceiveMask(pDevice,
4342 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4344 if (dev->flags & IFF_PROMISC) {
4345 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4346 LM_SetReceiveMask(pDevice,
4347 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4350 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4351 LM_SetReceiveMask(pDevice,
4352 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4355 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4359 * Set the hardware MAC address.
4361 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p)
4363 struct sockaddr *addr=p;
4364 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) dev->priv;
4365 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4367 if(is_valid_ether_addr(addr->sa_data)){
4369 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
4370 if (pUmDevice->opened)
4371 LM_SetMacAddress(pDevice, dev->dev_addr);
4377 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4378 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu)
4380 int pkt_size = new_mtu + ETHERNET_PACKET_HEADER_SIZE;
4381 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4382 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4383 unsigned long flags;
4386 if ((pkt_size < MIN_ETHERNET_PACKET_SIZE_NO_CRC) ||
4387 (pkt_size > MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC)) {
4391 if ( !(pDevice->Flags & JUMBO_CAPABLE_FLAG) &&
4392 (pkt_size > MAX_ETHERNET_PACKET_SIZE_NO_CRC) ) {
4396 if (pUmDevice->suspended)
4399 if (pUmDevice->opened && (new_mtu != dev->mtu) &&
4400 (pDevice->Flags & JUMBO_CAPABLE_FLAG)) {
4404 BCM5700_PHY_LOCK(pUmDevice, flags);
4406 /* Fix for RQM 289636 */
4407 /* netif_stop_queue(dev); */
4408 bcm5700_netif_stop_queue(dev);
4409 bcm5700_shutdown(pUmDevice);
4410 bcm5700_freemem(dev);
4414 if (pkt_size < MAX_ETHERNET_PACKET_SIZE_NO_CRC) {
4415 pDevice->RxMtu = pDevice->TxMtu =
4416 MAX_ETHERNET_PACKET_SIZE_NO_CRC;
4419 pDevice->RxMtu = pDevice->TxMtu = pkt_size;
4422 if (dev->mtu <= 1514) {
4423 pDevice->RxJumboDescCnt = 0;
4425 else if (pDevice->Flags & JUMBO_CAPABLE_FLAG){
4426 pDevice->RxJumboDescCnt =
4427 rx_jumbo_desc_cnt[pUmDevice->index];
4429 pDevice->RxPacketDescCnt = pDevice->RxJumboDescCnt +
4430 pDevice->RxStdDescCnt;
4432 pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ +
4433 COMMON_CACHE_LINE_SIZE-1) & ~COMMON_CACHE_LINE_MASK;
4436 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4437 (dev->mtu > 1514) ) {
4438 if (dev->features & NETIF_F_TSO) {
4439 dev->features &= ~NETIF_F_TSO;
4440 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4446 LM_InitializeAdapter(pDevice);
4447 bcm5700_do_rx_mode(dev);
4448 bcm5700_set_vlan_mode(pUmDevice);
4449 bcm5700_init_counters(pUmDevice);
4450 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
4451 LM_SetMacAddress(pDevice, dev->dev_addr);
4453 netif_start_queue(dev);
4454 bcm5700_intr_on(pUmDevice);
4456 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4463 #if (LINUX_VERSION_CODE < 0x020300)
4465 bcm5700_probe(struct net_device *dev)
4467 int cards_found = 0;
4468 struct pci_dev *pdev = NULL;
4469 struct pci_device_id *pci_tbl;
4472 if ( ! pci_present())
4475 pci_tbl = bcm5700_pci_tbl;
4476 while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
4479 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &ssvid);
4480 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &ssid);
4481 for (idx = 0; pci_tbl[idx].vendor; idx++) {
4482 if ((pci_tbl[idx].vendor == PCI_ANY_ID ||
4483 pci_tbl[idx].vendor == pdev->vendor) &&
4484 (pci_tbl[idx].device == PCI_ANY_ID ||
4485 pci_tbl[idx].device == pdev->device) &&
4486 (pci_tbl[idx].subvendor == PCI_ANY_ID ||
4487 pci_tbl[idx].subvendor == ssvid) &&
4488 (pci_tbl[idx].subdevice == PCI_ANY_ID ||
4489 pci_tbl[idx].subdevice == ssid))
4495 if (pci_tbl[idx].vendor == 0)
4499 if (bcm5700_init_one(pdev, &pci_tbl[idx]) == 0)
4503 return cards_found ? 0 : -ENODEV;
4507 int init_module(void)
4509 return bcm5700_probe(NULL);
4512 void cleanup_module(void)
4514 struct net_device *next_dev;
4515 PUM_DEVICE_BLOCK pUmDevice;
4518 bcm5700_proc_remove_notifier();
4520 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
4521 while (root_tigon3_dev) {
4522 pUmDevice = (PUM_DEVICE_BLOCK)root_tigon3_dev->priv;
4524 bcm5700_proc_remove_dev(root_tigon3_dev);
4526 next_dev = pUmDevice->next_module;
4527 unregister_netdev(root_tigon3_dev);
4528 if (pUmDevice->lm_dev.pMappedMemBase)
4529 iounmap(pUmDevice->lm_dev.pMappedMemBase);
4530 #if (LINUX_VERSION_CODE < 0x020600)
4531 kfree(root_tigon3_dev);
4533 free_netdev(root_tigon3_dev);
4535 root_tigon3_dev = next_dev;
4538 unregister_ioctl32_conversion(SIOCNICE);
4543 #else /* LINUX_VERSION_CODE < 0x020300 */
4546 #if (LINUX_VERSION_CODE >= 0x2060b)
4547 static int bcm5700_suspend(struct pci_dev *pdev, pm_message_t state)
4549 #if (LINUX_VERSION_CODE >= 0x020406)
4550 static int bcm5700_suspend (struct pci_dev *pdev, u32 state)
4552 static void bcm5700_suspend (struct pci_dev *pdev)
4556 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4557 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4558 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4560 if (!netif_running(dev))
4561 #if (LINUX_VERSION_CODE >= 0x020406)
4567 netif_device_detach (dev);
4568 bcm5700_shutdown(pUmDevice);
4570 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
4572 /* pci_power_off(pdev, -1);*/
4573 #if (LINUX_VERSION_CODE >= 0x020406)
4579 #if (LINUX_VERSION_CODE >= 0x020406)
4580 static int bcm5700_resume(struct pci_dev *pdev)
4582 static void bcm5700_resume(struct pci_dev *pdev)
4585 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4586 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4587 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4589 if (!netif_running(dev))
4590 #if (LINUX_VERSION_CODE >= 0x020406)
4595 /* pci_power_on(pdev);*/
4596 netif_device_attach(dev);
4597 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
4598 MM_InitializeUmPackets(pDevice);
4600 #if (LINUX_VERSION_CODE >= 0x020406)
4606 static struct pci_driver bcm5700_pci_driver = {
4607 name: bcm5700_driver,
4608 id_table: bcm5700_pci_tbl,
4609 probe: bcm5700_init_one,
4610 remove: __devexit_p(bcm5700_remove_one),
4611 suspend: bcm5700_suspend,
4612 resume: bcm5700_resume,
4616 static int __init bcm5700_init_module (void)
4618 return pci_module_init(&bcm5700_pci_driver);
4622 static void __exit bcm5700_cleanup_module (void)
4625 bcm5700_proc_remove_notifier();
4627 pci_unregister_driver(&bcm5700_pci_driver);
4631 module_init(bcm5700_init_module);
4632 module_exit(bcm5700_cleanup_module);
4641 #ifdef BCM_NAPI_RXPOLL
4643 MM_ScheduleRxPoll(LM_DEVICE_BLOCK *pDevice)
4645 struct net_device *dev = ((UM_DEVICE_BLOCK *) pDevice)->dev;
4647 if (netif_rx_schedule_prep(dev)) {
4648 __netif_rx_schedule(dev);
4649 return LM_STATUS_SUCCESS;
4651 return LM_STATUS_FAILURE;
4656 MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4657 LM_UINT16 *pValue16)
4659 UM_DEVICE_BLOCK *pUmDevice;
4661 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4662 pci_read_config_word(pUmDevice->pdev, Offset, (u16 *) pValue16);
4663 return LM_STATUS_SUCCESS;
4667 MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4668 LM_UINT32 *pValue32)
4670 UM_DEVICE_BLOCK *pUmDevice;
4672 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4673 pci_read_config_dword(pUmDevice->pdev, Offset, (u32 *) pValue32);
4674 return LM_STATUS_SUCCESS;
4678 MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4681 UM_DEVICE_BLOCK *pUmDevice;
4683 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4684 pci_write_config_word(pUmDevice->pdev, Offset, Value16);
4685 return LM_STATUS_SUCCESS;
4689 MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4692 UM_DEVICE_BLOCK *pUmDevice;
4694 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4695 pci_write_config_dword(pUmDevice->pdev, Offset, Value32);
4696 return LM_STATUS_SUCCESS;
4700 MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4701 PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
4705 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4708 pvirt = pci_alloc_consistent(pUmDevice->pdev, BlockSize,
4711 return LM_STATUS_FAILURE;
4713 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4714 pUmDevice->dma_list[pUmDevice->mem_list_num] = mapping;
4715 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = BlockSize;
4716 memset(pvirt, 0, BlockSize);
4717 *pMemoryBlockVirt = (PLM_VOID) pvirt;
4718 MM_SetAddr(pMemoryBlockPhy, mapping);
4719 return LM_STATUS_SUCCESS;
4723 MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4724 PLM_VOID *pMemoryBlockVirt)
4727 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4730 /* Maximum in slab.c */
4731 if (BlockSize > 131072) {
4732 goto MM_Alloc_error;
4735 pvirt = kmalloc(BlockSize,GFP_ATOMIC);
4737 goto MM_Alloc_error;
4739 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4740 pUmDevice->dma_list[pUmDevice->mem_list_num] = 0;
4741 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = 0;
4742 /* mem_size_list[i] == 0 indicates that the memory should be freed */
4744 memset(pvirt, 0, BlockSize);
4745 *pMemoryBlockVirt = pvirt;
4746 return LM_STATUS_SUCCESS;
4749 printk(KERN_WARNING "%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice->dev->name);
4750 return LM_STATUS_FAILURE;
4754 MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
4756 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4758 pDevice->pMappedMemBase = ioremap_nocache(
4759 pci_resource_start(pUmDevice->pdev, 0), sizeof(T3_STD_MEM_MAP));
4760 if (pDevice->pMappedMemBase == 0)
4761 return LM_STATUS_FAILURE;
4763 return LM_STATUS_SUCCESS;
4767 MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
4770 struct sk_buff *skb;
4771 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4772 PUM_PACKET pUmPacket;
4775 for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
4776 pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
4777 pUmPacket = (PUM_PACKET) pPacket;
4779 printk(KERN_DEBUG "Bad RxPacketFreeQ\n");
4781 if (pUmPacket->skbuff == 0) {
4782 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2);
4784 pUmPacket->skbuff = 0;
4786 &pUmDevice->rx_out_of_buf_q.Container,
4790 pUmPacket->skbuff = skb;
4791 skb->dev = pUmDevice->dev;
4792 skb_reserve(skb, pUmDevice->rx_buf_align);
4794 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
4796 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
4797 /* reallocate buffers in the ISR */
4798 pUmDevice->rx_buf_repl_thresh = 0;
4799 pUmDevice->rx_buf_repl_panic_thresh = 0;
4800 pUmDevice->rx_buf_repl_isr_limit = 0;
4803 pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 8;
4804 pUmDevice->rx_buf_repl_panic_thresh =
4805 pDevice->RxPacketDescCnt * 7 / 8;
4807 /* This limits the time spent in the ISR when the receiver */
4808 /* is in a steady state of being overrun. */
4809 pUmDevice->rx_buf_repl_isr_limit = pDevice->RxPacketDescCnt / 8;
4811 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4812 if (pDevice->RxJumboDescCnt != 0) {
4813 if (pUmDevice->rx_buf_repl_thresh >=
4814 pDevice->RxJumboDescCnt) {
4816 pUmDevice->rx_buf_repl_thresh =
4817 pUmDevice->rx_buf_repl_panic_thresh =
4818 pDevice->RxJumboDescCnt - 1;
4820 if (pUmDevice->rx_buf_repl_thresh >=
4821 pDevice->RxStdDescCnt) {
4823 pUmDevice->rx_buf_repl_thresh =
4824 pUmDevice->rx_buf_repl_panic_thresh =
4825 pDevice->RxStdDescCnt - 1;
4830 return LM_STATUS_SUCCESS;
4834 MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
4836 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4837 int index = pUmDevice->index;
4838 struct net_device *dev = pUmDevice->dev;
4840 if (index >= MAX_UNITS)
4841 return LM_STATUS_SUCCESS;
4843 #if LINUX_KERNEL_VERSION < 0x0020609
4845 bcm5700_validate_param_range(pUmDevice, &auto_speed[index], "auto_speed",
4847 if (auto_speed[index] == 0)
4848 pDevice->DisableAutoNeg = TRUE;
4850 pDevice->DisableAutoNeg = FALSE;
4852 if (line_speed[index] == 0) {
4853 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4854 pDevice->DisableAutoNeg = FALSE;
4857 bcm5700_validate_param_range(pUmDevice, &full_duplex[index],
4858 "full_duplex", 0, 1, 1);
4859 if (full_duplex[index]) {
4860 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
4863 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_HALF;
4866 if (line_speed[index] == 1000) {
4867 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
4868 if (pDevice->PhyFlags & PHY_NO_GIGABIT) {
4869 pDevice->RequestedLineSpeed =
4870 LM_LINE_SPEED_100MBPS;
4871 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver, index);
4874 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
4875 !full_duplex[index]) {
4876 printk(KERN_WARNING "%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver, index);
4877 pDevice->RequestedDuplexMode =
4878 LM_DUPLEX_MODE_FULL;
4881 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
4882 !auto_speed[index] && !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
4883 printk(KERN_WARNING "%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver, index);
4884 pDevice->DisableAutoNeg = FALSE;
4888 else if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
4889 (pDevice->PhyFlags & PHY_IS_FIBER)){
4890 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4891 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
4892 pDevice->DisableAutoNeg = FALSE;
4893 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver, index, line_speed[index]);
4895 else if (line_speed[index] == 100) {
4897 pDevice->RequestedLineSpeed = LM_LINE_SPEED_100MBPS;
4899 else if (line_speed[index] == 10) {
4901 pDevice->RequestedLineSpeed = LM_LINE_SPEED_10MBPS;
4904 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4905 pDevice->DisableAutoNeg = FALSE;
4906 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver, index, line_speed[index]);
4911 #endif /* LINUX_KERNEL_VERSION */
4913 /* This is an unmanageable switch nic and will have link problems if
4916 if(pDevice->SubsystemVendorId==0x103c && pDevice->SubsystemId==0x3226)
4918 if(pDevice->RequestedLineSpeed != LM_LINE_SPEED_AUTO)
4920 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n",
4921 bcm5700_driver, index, line_speed[index]);
4923 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4924 pDevice->DisableAutoNeg = FALSE;
4927 #if LINUX_KERNEL_VERSION < 0x0020609
4929 pDevice->FlowControlCap = 0;
4930 bcm5700_validate_param_range(pUmDevice, &rx_flow_control[index],
4931 "rx_flow_control", 0, 1, 0);
4932 if (rx_flow_control[index] != 0) {
4933 pDevice->FlowControlCap |= LM_FLOW_CONTROL_RECEIVE_PAUSE;
4935 bcm5700_validate_param_range(pUmDevice, &tx_flow_control[index],
4936 "tx_flow_control", 0, 1, 0);
4937 if (tx_flow_control[index] != 0) {
4938 pDevice->FlowControlCap |= LM_FLOW_CONTROL_TRANSMIT_PAUSE;
4940 bcm5700_validate_param_range(pUmDevice, &auto_flow_control[index],
4941 "auto_flow_control", 0, 1, 0);
4942 if (auto_flow_control[index] != 0) {
4943 if (pDevice->DisableAutoNeg == FALSE) {
4945 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
4946 if ((tx_flow_control[index] == 0) &&
4947 (rx_flow_control[index] == 0)) {
4949 pDevice->FlowControlCap |=
4950 LM_FLOW_CONTROL_TRANSMIT_PAUSE |
4951 LM_FLOW_CONTROL_RECEIVE_PAUSE;
4956 if (dev->mtu > 1500) {
4958 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4959 (dev->features & NETIF_F_TSO)) {
4960 dev->features &= ~NETIF_F_TSO;
4961 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4964 pDevice->RxMtu = dev->mtu + 14;
4967 if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) &&
4968 !(pDevice->Flags & BCM5788_FLAG)) {
4969 pDevice->Flags |= USE_TAGGED_STATUS_FLAG;
4970 pUmDevice->timer_interval = HZ;
4971 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) &&
4972 (pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
4973 pUmDevice->timer_interval = HZ/4;
4977 pUmDevice->timer_interval = HZ/10;
4980 bcm5700_validate_param_range(pUmDevice, &tx_pkt_desc_cnt[index],
4981 "tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT-1, TX_DESC_CNT);
4982 pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[index];
4983 bcm5700_validate_param_range(pUmDevice, &rx_std_desc_cnt[index],
4984 "rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT-1,
4986 pDevice->RxStdDescCnt = rx_std_desc_cnt[index];
4988 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4989 bcm5700_validate_param_range(pUmDevice, &rx_jumbo_desc_cnt[index],
4990 "rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT-1,
4993 if (mtu[index] <= 1514)
4994 pDevice->RxJumboDescCnt = 0;
4995 else if(!T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
4996 pDevice->RxJumboDescCnt = rx_jumbo_desc_cnt[index];
5001 bcm5700_validate_param_range(pUmDevice, &adaptive_coalesce[index],
5002 "adaptive_coalesce", 0, 1, 1);
5003 #ifdef BCM_NAPI_RXPOLL
5004 if (adaptive_coalesce[index]) {
5005 printk(KERN_WARNING "%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver, index);
5006 adaptive_coalesce[index] = 0;
5010 pUmDevice->adaptive_coalesce = adaptive_coalesce[index];
5011 if (!pUmDevice->adaptive_coalesce) {
5012 bcm5700_validate_param_range(pUmDevice,
5013 &rx_coalesce_ticks[index], "rx_coalesce_ticks", 0,
5014 MAX_RX_COALESCING_TICKS, RX_COAL_TK);
5015 if ((rx_coalesce_ticks[index] == 0) &&
5016 (rx_max_coalesce_frames[index] == 0)) {
5018 printk(KERN_WARNING "%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5019 bcm5700_driver, index, RX_COAL_TK, RX_COAL_FM);
5021 rx_coalesce_ticks[index] = RX_COAL_TK;
5022 rx_max_coalesce_frames[index] = RX_COAL_FM;
5024 pDevice->RxCoalescingTicks = pUmDevice->rx_curr_coalesce_ticks =
5025 rx_coalesce_ticks[index];
5026 #ifdef BCM_NAPI_RXPOLL
5027 pDevice->RxCoalescingTicksDuringInt = rx_coalesce_ticks[index];
5030 bcm5700_validate_param_range(pUmDevice,
5031 &rx_max_coalesce_frames[index],
5032 "rx_max_coalesce_frames", 0,
5033 MAX_RX_MAX_COALESCED_FRAMES, RX_COAL_FM);
5035 pDevice->RxMaxCoalescedFrames =
5036 pUmDevice->rx_curr_coalesce_frames =
5037 rx_max_coalesce_frames[index];
5038 #ifdef BCM_NAPI_RXPOLL
5039 pDevice->RxMaxCoalescedFramesDuringInt =
5040 rx_max_coalesce_frames[index];
5043 bcm5700_validate_param_range(pUmDevice,
5044 &tx_coalesce_ticks[index], "tx_coalesce_ticks", 0,
5045 MAX_TX_COALESCING_TICKS, TX_COAL_TK);
5046 if ((tx_coalesce_ticks[index] == 0) &&
5047 (tx_max_coalesce_frames[index] == 0)) {
5049 printk(KERN_WARNING "%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5050 bcm5700_driver, index, TX_COAL_TK, TX_COAL_FM);
5052 tx_coalesce_ticks[index] = TX_COAL_TK;
5053 tx_max_coalesce_frames[index] = TX_COAL_FM;
5055 pDevice->TxCoalescingTicks = tx_coalesce_ticks[index];
5056 bcm5700_validate_param_range(pUmDevice,
5057 &tx_max_coalesce_frames[index],
5058 "tx_max_coalesce_frames", 0,
5059 MAX_TX_MAX_COALESCED_FRAMES, TX_COAL_FM);
5060 pDevice->TxMaxCoalescedFrames = tx_max_coalesce_frames[index];
5061 pUmDevice->tx_curr_coalesce_frames =
5062 pDevice->TxMaxCoalescedFrames;
5064 bcm5700_validate_param_range(pUmDevice,
5065 &stats_coalesce_ticks[index], "stats_coalesce_ticks",
5066 0, MAX_STATS_COALESCING_TICKS, ST_COAL_TK);
5067 if (adaptive_coalesce[index]) {
5068 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver, index);
5070 if ((stats_coalesce_ticks[index] > 0) &&
5071 (stats_coalesce_ticks[index] < 100)) {
5072 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver, index, (unsigned int) stats_coalesce_ticks[index]);
5073 stats_coalesce_ticks[index] = 100;
5074 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5075 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5080 pUmDevice->rx_curr_coalesce_frames = RX_COAL_FM;
5081 pUmDevice->rx_curr_coalesce_ticks = RX_COAL_TK;
5082 pUmDevice->tx_curr_coalesce_frames = TX_COAL_FM;
5086 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
5087 unsigned int tmpvar;
5089 tmpvar = pDevice->StatsCoalescingTicks / BCM_TIMER_GRANULARITY;
5092 * If the result is zero, the request is too demanding.
5098 pDevice->StatsCoalescingTicks = tmpvar * BCM_TIMER_GRANULARITY;
5100 pUmDevice->statstimer_interval = tmpvar;
5104 bcm5700_validate_param_range(pUmDevice, &enable_wol[index],
5105 "enable_wol", 0, 1, 0);
5106 if (enable_wol[index]) {
5107 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
5108 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
5111 #ifdef INCLUDE_TBI_SUPPORT
5112 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
5113 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) ||
5114 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) {
5115 /* just poll since we have hardware autoneg. in 5704 */
5116 pDevice->TbiFlags |= TBI_PURE_POLLING_FLAG;
5119 pDevice->TbiFlags |= TBI_POLLING_INTR_FLAG;
5123 bcm5700_validate_param_range(pUmDevice, &scatter_gather[index],
5124 "scatter_gather", 0, 1, 1);
5125 bcm5700_validate_param_range(pUmDevice, &tx_checksum[index],
5126 "tx_checksum", 0, 1, 1);
5127 bcm5700_validate_param_range(pUmDevice, &rx_checksum[index],
5128 "rx_checksum", 0, 1, 1);
5129 if (!(pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
5130 if (tx_checksum[index] || rx_checksum[index]) {
5132 pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
5133 printk(KERN_WARNING "%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver, index);
5137 if (rx_checksum[index]) {
5138 pDevice->TaskToOffload |=
5139 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
5140 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
5142 if (tx_checksum[index]) {
5143 pDevice->TaskToOffload |=
5144 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
5145 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
5146 pDevice->Flags |= NO_TX_PSEUDO_HDR_CSUM_FLAG;
5150 bcm5700_validate_param_range(pUmDevice, &enable_tso[index],
5151 "enable_tso", 0, 1, 1);
5153 /* Always enable TSO firmware if supported */
5154 /* This way we can turn it on or off on the fly */
5155 if (pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TCP_SEGMENTATION)
5157 pDevice->TaskToOffload |=
5158 LM_TASK_OFFLOAD_TCP_SEGMENTATION;
5160 if (enable_tso[index] &&
5161 !(pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION))
5163 printk(KERN_WARNING "%s-%d: TSO not available on this NIC\n", bcm5700_driver, index);
5167 bcm5700_validate_param_range(pUmDevice, &vlan_tag_mode[index],
5168 "vlan_strip_mode", 0, 2, 0);
5169 pUmDevice->vlan_tag_mode = vlan_tag_mode[index];
5171 pUmDevice->vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
5174 #endif /* LINUX_KERNEL_VERSION */
5176 #ifdef BCM_NIC_SEND_BD
5177 bcm5700_validate_param_range(pUmDevice, &nic_tx_bd[index], "nic_tx_bd",
5179 if (nic_tx_bd[index])
5180 pDevice->Flags |= NIC_SEND_BD_FLAG;
5181 if ((pDevice->Flags & ENABLE_PCIX_FIX_FLAG) ||
5182 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5705)) {
5183 if (pDevice->Flags & NIC_SEND_BD_FLAG) {
5184 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
5185 printk(KERN_WARNING "%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver, index);
5189 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5190 bcm5700_validate_param_range(pUmDevice, &disable_msi[pUmDevice->index],
5191 "disable_msi", 0, 1, 0);
5194 bcm5700_validate_param_range(pUmDevice, &delay_link[index],
5195 "delay_link", 0, 1, 0);
5197 bcm5700_validate_param_range(pUmDevice, &disable_d3hot[index],
5198 "disable_d3hot", 0, 1, 0);
5199 if (disable_d3hot[index]) {
5202 if (enable_wol[index]) {
5203 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
5204 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
5205 printk(KERN_WARNING "%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver, index);
5208 pDevice->Flags |= DISABLE_D3HOT_FLAG;
5211 return LM_STATUS_SUCCESS;
5215 MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
5217 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5219 PUM_PACKET pUmPacket;
5220 struct sk_buff *skb;
5222 int vlan_tag_size = 0;
5224 if (pDevice->ReceiveMask & LM_KEEP_VLAN_TAG)
5228 pPacket = (PLM_PACKET)
5229 QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
5232 pUmPacket = (PUM_PACKET) pPacket;
5233 #if ! defined(NO_PCI_UNMAP)
5234 pci_unmap_single(pUmDevice->pdev,
5235 pci_unmap_addr(pUmPacket, map[0]),
5236 pPacket->u.Rx.RxBufferSize,
5237 PCI_DMA_FROMDEVICE);
5239 if ((pPacket->PacketStatus != LM_STATUS_SUCCESS) ||
5240 ((size = pPacket->PacketSize) >
5241 (pDevice->RxMtu + vlan_tag_size))) {
5245 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5247 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5249 pUmDevice->rx_misc_errors++;
5252 skb = pUmPacket->skbuff;
5255 skb->protocol = eth_type_trans(skb, skb->dev);
5256 if (size > pDevice->RxMtu) {
5257 /* Make sure we have a valid VLAN tag */
5258 if (htons(skb->protocol) != 0x8100) {
5259 dev_kfree_skb_irq(skb);
5260 pUmDevice->rx_misc_errors++;
5264 if ((pPacket->Flags & RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD) &&
5265 (pDevice->TaskToOffload &
5266 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM)) {
5267 if (pPacket->u.Rx.TcpUdpChecksum == 0xffff) {
5269 skb->ip_summed = CHECKSUM_UNNECESSARY;
5271 pUmDevice->rx_good_chksum_count++;
5275 skb->ip_summed = CHECKSUM_NONE;
5276 pUmDevice->rx_bad_chksum_count++;
5280 skb->ip_summed = CHECKSUM_NONE;
5283 if( pUmDevice->nice_rx ) {
5284 vlan_tag_t *vlan_tag;
5286 vlan_tag = (vlan_tag_t *) &skb->cb[0];
5287 if (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG) {
5288 vlan_tag->signature = 0x7777;
5289 vlan_tag->tag = pPacket->VlanTag;
5292 vlan_tag->signature = 0;
5294 pUmDevice->nice_rx(skb, pUmDevice->nice_ctx);
5300 if (pUmDevice->vlgrp &&
5301 (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG)) {
5303 #ifdef BCM_NAPI_RXPOLL
5304 vlan_hwaccel_receive_skb(skb, pUmDevice->vlgrp,
5307 vlan_hwaccel_rx(skb, pUmDevice->vlgrp,
5314 #ifdef BCM_NAPI_RXPOLL
5315 netif_receive_skb(skb);
5321 pUmDevice->dev->last_rx = jiffies;
5325 pUmPacket->skbuff = 0;
5326 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5328 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2);
5330 pUmPacket->skbuff = 0;
5331 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5334 pUmPacket->skbuff = skb;
5335 skb->dev = pUmDevice->dev;
5336 skb_reserve(skb, pUmDevice->rx_buf_align);
5337 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5341 return LM_STATUS_SUCCESS;
5345 MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5347 PUM_PACKET pUmPacket = (PUM_PACKET) pPacket;
5348 struct sk_buff *skb = pUmPacket->skbuff;
5349 struct sk_buff *nskb;
5350 #if ! defined(NO_PCI_UNMAP)
5351 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5353 pci_unmap_single(pUmDevice->pdev,
5354 pci_unmap_addr(pUmPacket, map[0]),
5355 pci_unmap_len(pUmPacket, map_len[0]),
5361 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5362 pci_unmap_page(pUmDevice->pdev,
5363 pci_unmap_addr(pUmPacket, map[i + 1]),
5364 pci_unmap_len(pUmPacket, map_len[i + 1]),
5370 if ((nskb = skb_copy(skb, GFP_ATOMIC))) {
5371 pUmPacket->lm_packet.u.Tx.FragCount = 1;
5373 pUmPacket->skbuff = nskb;
5374 return LM_STATUS_SUCCESS;
5377 pUmPacket->skbuff = 0;
5378 return LM_STATUS_FAILURE;
5381 /* Returns 1 if not all buffers are allocated */
5383 replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max)
5386 PUM_PACKET pUmPacket;
5387 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
5388 struct sk_buff *skb;
5393 while ((pUmPacket = (PUM_PACKET)
5394 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) {
5395 pPacket = (PLM_PACKET) pUmPacket;
5396 if (pUmPacket->skbuff) {
5397 /* reuse an old skb */
5398 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5402 if ((skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2)) == 0) {
5403 QQ_PushHead(&pUmDevice->rx_out_of_buf_q.Container,
5408 pUmPacket->skbuff = skb;
5409 skb->dev = pUmDevice->dev;
5410 skb_reserve(skb, pUmDevice->rx_buf_align);
5411 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5415 if (alloc_cnt >= max)
5419 if (queue_rx || pDevice->QueueAgain) {
5420 LM_QueueRxPackets(pDevice);
5426 MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
5428 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5430 PUM_PACKET pUmPacket;
5431 struct sk_buff *skb;
5432 #if ! defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
5437 pPacket = (PLM_PACKET)
5438 QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
5441 pUmPacket = (PUM_PACKET) pPacket;
5442 skb = pUmPacket->skbuff;
5443 #if ! defined(NO_PCI_UNMAP)
5444 pci_unmap_single(pUmDevice->pdev,
5445 pci_unmap_addr(pUmPacket, map[0]),
5446 pci_unmap_len(pUmPacket, map_len[0]),
5449 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5450 pci_unmap_page(pUmDevice->pdev,
5451 pci_unmap_addr(pUmPacket, map[i + 1]),
5452 pci_unmap_len(pUmPacket, map_len[i + 1]),
5457 dev_kfree_skb_irq(skb);
5458 pUmPacket->skbuff = 0;
5459 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
5461 if (pUmDevice->tx_full) {
5462 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) >=
5463 (pDevice->TxPacketDescCnt >> 1)) {
5465 pUmDevice->tx_full = 0;
5466 netif_wake_queue(pUmDevice->dev);
5469 return LM_STATUS_SUCCESS;
5473 MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
5475 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5476 struct net_device *dev = pUmDevice->dev;
5477 LM_FLOW_CONTROL flow_control;
5480 if (!pUmDevice->opened)
5481 return LM_STATUS_SUCCESS;
5483 if (!pUmDevice->suspended) {
5484 if (Status == LM_STATUS_LINK_DOWN) {
5485 netif_carrier_off(dev);
5487 else if (Status == LM_STATUS_LINK_ACTIVE) {
5488 netif_carrier_on(dev);
5492 if (pUmDevice->delayed_link_ind > 0) {
5493 pUmDevice->delayed_link_ind = 0;
5494 if (Status == LM_STATUS_LINK_DOWN) {
5495 printk(KERN_ERR "%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name);
5497 else if (Status == LM_STATUS_LINK_ACTIVE) {
5498 printk(KERN_INFO "%s: %s NIC Link is UP, ", bcm5700_driver, dev->name);
5502 if (Status == LM_STATUS_LINK_DOWN) {
5503 printk(KERN_ERR "%s: %s NIC Link is Down\n", bcm5700_driver, dev->name);
5505 else if (Status == LM_STATUS_LINK_ACTIVE) {
5506 printk(KERN_INFO "%s: %s NIC Link is Up, ", bcm5700_driver, dev->name);
5510 if (Status == LM_STATUS_LINK_ACTIVE) {
5511 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
5513 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
5515 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
5518 printk("%d Mbps ", speed);
5520 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
5521 printk("full duplex");
5523 printk("half duplex");
5525 flow_control = pDevice->FlowControl &
5526 (LM_FLOW_CONTROL_RECEIVE_PAUSE |
5527 LM_FLOW_CONTROL_TRANSMIT_PAUSE);
5529 if (flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE) {
5530 printk(", receive ");
5531 if (flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
5532 printk("& transmit ");
5535 printk(", transmit ");
5537 printk("flow control ON");
5541 return LM_STATUS_SUCCESS;
5545 MM_UnmapRxDma(LM_DEVICE_BLOCK *pDevice, LM_PACKET *pPacket)
5547 #if ! defined(NO_PCI_UNMAP)
5548 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5549 UM_PACKET *pUmPacket = (UM_PACKET *) pPacket;
5551 if (!pUmPacket->skbuff)
5554 pci_unmap_single(pUmDevice->pdev,
5555 pci_unmap_addr(pUmPacket, map[0]),
5556 pPacket->u.Rx.RxBufferSize,
5557 PCI_DMA_FROMDEVICE);
5562 MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5564 PUM_PACKET pUmPacket;
5565 struct sk_buff *skb;
5568 return LM_STATUS_SUCCESS;
5569 pUmPacket = (PUM_PACKET) pPacket;
5570 if ((skb = pUmPacket->skbuff)) {
5571 /* DMA address already unmapped */
5574 pUmPacket->skbuff = 0;
5575 return LM_STATUS_SUCCESS;
5579 MM_Sleep(LM_DEVICE_BLOCK *pDevice, LM_UINT32 msec)
5581 current->state = TASK_INTERRUPTIBLE;
5582 if (schedule_timeout(HZ * msec / 1000) != 0) {
5583 return LM_STATUS_FAILURE;
5585 if (signal_pending(current))
5586 return LM_STATUS_FAILURE;
5588 return LM_STATUS_SUCCESS;
5592 bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice)
5594 LM_DEVICE_BLOCK *pDevice = (LM_DEVICE_BLOCK *) pUmDevice;
5596 bcm5700_intr_off(pUmDevice);
5597 netif_carrier_off(pUmDevice->dev);
5599 tasklet_kill(&pUmDevice->tasklet);
5601 bcm5700_poll_wait(pUmDevice);
5605 pDevice->InitDone = 0;
5606 bcm5700_free_remaining_rx_bufs(pUmDevice);
5610 bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice)
5612 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
5613 UM_PACKET *pUmPacket;
5616 cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
5617 for (i = 0; i < cnt; i++) {
5619 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container))
5622 MM_UnmapRxDma(pDevice, (LM_PACKET *) pUmPacket);
5623 MM_FreeRxBuffer(pDevice, &pUmPacket->lm_packet);
5624 QQ_PushTail(&pDevice->RxPacketFreeQ.Container,
5631 bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
5632 char *param_name, int min, int max, int deflt)
5634 if (((unsigned int) *param < (unsigned int) min) ||
5635 ((unsigned int) *param > (unsigned int) max)) {
5637 printk(KERN_WARNING "%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver, pUmDevice->index, param_name, (unsigned int) *param, (unsigned int) deflt);
5643 bcm5700_find_peer(struct net_device *dev)
5645 struct net_device *tmp_dev;
5646 UM_DEVICE_BLOCK *pUmDevice, *pUmTmp;
5647 LM_DEVICE_BLOCK *pDevice;
5650 pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
5651 pDevice = &pUmDevice->lm_dev;
5652 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
5653 tmp_dev = root_tigon3_dev;
5655 pUmTmp = (PUM_DEVICE_BLOCK) tmp_dev->priv;
5656 if ((tmp_dev != dev) &&
5657 (pUmDevice->pdev->bus->number ==
5658 pUmTmp->pdev->bus->number) &&
5659 PCI_SLOT(pUmDevice->pdev->devfn) ==
5660 PCI_SLOT(pUmTmp->pdev->devfn)) {
5664 tmp_dev = pUmTmp->next_module;
5671 MM_FindPeerDev(LM_DEVICE_BLOCK *pDevice)
5673 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5674 struct net_device *dev = pUmDevice->dev;
5675 struct net_device *peer_dev;
5677 peer_dev = bcm5700_find_peer(dev);
5680 return ((LM_DEVICE_BLOCK *) peer_dev->priv);
5683 int MM_FindCapability(LM_DEVICE_BLOCK *pDevice, int capability)
5685 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5686 return (pci_find_capability(pUmDevice->pdev, capability));
5689 #if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
5691 poll_bcm5700(struct net_device *dev)
5693 UM_DEVICE_BLOCK *pUmDevice = dev->priv;
5695 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
5697 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5698 #ifdef BCM_NAPI_RXPOLL
5699 if (dev->poll_list.prev) {
5702 bcm5700_poll(dev, &budget);
5709 disable_irq(pUmDevice->pdev->irq);
5710 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5711 enable_irq(pUmDevice->pdev->irq);