1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2006 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
14 char bcm5700_driver[] = "bcm5700";
15 char bcm5700_version[] = "8.3.17c";
16 char bcm5700_date[] = "(03/10/06)";
21 /* A few user-configurable values. */
24 /* Used to pass the full-duplex flag, etc. */
25 static int line_speed[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
26 static int auto_speed[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
27 static int full_duplex[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
28 static int rx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
29 static int tx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
30 static int auto_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
31 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
32 static int mtu[MAX_UNITS] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500}; /* Jumbo MTU for interfaces. */
34 static int tx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
35 static int rx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
36 static int scatter_gather[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
38 #define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
39 static unsigned int tx_pkt_desc_cnt[MAX_UNITS] =
40 {TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
41 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
42 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
45 #define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
46 static unsigned int rx_std_desc_cnt[MAX_UNITS] =
47 {RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
48 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
49 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
52 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
53 #define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
54 static unsigned int rx_jumbo_desc_cnt[MAX_UNITS] =
55 {JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
56 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
57 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
62 #ifdef BCM_NAPI_RXPOLL
63 static unsigned int adaptive_coalesce[MAX_UNITS] =
64 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
66 static unsigned int adaptive_coalesce[MAX_UNITS] =
67 {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
70 #define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
71 static unsigned int rx_coalesce_ticks[MAX_UNITS] =
72 {RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
73 RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
74 RX_COAL_TK,RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
77 #define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
78 static unsigned int rx_max_coalesce_frames[MAX_UNITS] =
79 {RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
80 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
81 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
84 #define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
85 static unsigned int tx_coalesce_ticks[MAX_UNITS] =
86 {TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
87 TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
88 TX_COAL_TK,TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
91 #define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
92 static unsigned int tx_max_coalesce_frames[MAX_UNITS] =
93 {TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
94 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
95 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
98 #define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
99 static unsigned int stats_coalesce_ticks[MAX_UNITS] =
100 {ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
101 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
102 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
107 static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
110 static int enable_tso[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
112 #ifdef BCM_NIC_SEND_BD
113 static int nic_tx_bd[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
116 static int vlan_tag_mode[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
118 static int delay_link[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
119 static int disable_d3hot[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
121 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
122 static int disable_msi[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
123 static int bcm_msi_chipset_bug = 0;
126 #define BCM_TIMER_GRANULARITY (1000000 / HZ)
128 /* Operational parameters that usually are not changed. */
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT (2*HZ)
132 #define BCM_TX_TIMEOUT (5*HZ)
134 #if (LINUX_VERSION_CODE < 0x02030d)
135 #define pci_resource_start(dev, bar) (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
136 #elif (LINUX_VERSION_CODE < 0x02032b)
137 #define pci_resource_start(dev, bar) (dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
140 #if (LINUX_VERSION_CODE < 0x02032b)
141 #define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
142 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
143 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
145 static inline void netif_start_queue(struct net_device *dev)
152 #define netif_queue_stopped(dev) dev->tbusy
153 #define netif_running(dev) dev->start
155 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
157 queue_task(tasklet, &tq_immediate);
158 mark_bh(IMMEDIATE_BH);
161 static inline void tasklet_init(struct tasklet_struct *tasklet,
162 void (*func)(unsigned long),
165 tasklet->next = NULL;
167 tasklet->routine = (void (*)(void *))func;
168 tasklet->data = (void *)data;
171 #define tasklet_kill(tasklet)
175 #if (LINUX_VERSION_CODE < 0x020300)
176 struct pci_device_id {
177 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
178 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
179 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
180 unsigned long driver_data; /* Data private to the driver */
185 #define pci_set_drvdata(pdev, dev)
186 #define pci_get_drvdata(pdev) 0
188 #define pci_enable_device(pdev) 0
190 #define __devinit __init
191 #define __devinitdata __initdata
194 #define SET_MODULE_OWNER(dev)
195 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
199 #if (LINUX_VERSION_CODE < 0x020411)
201 #define __devexit_p(x) x
205 #ifndef MODULE_LICENSE
206 #define MODULE_LICENSE(license)
210 typedef void irqreturn_t;
211 #define IRQ_RETVAL(x)
214 #if (LINUX_VERSION_CODE < 0x02032a)
215 static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
216 dma_addr_t *dma_handle)
220 /* Maximum in slab.c */
224 virt_ptr = kmalloc(size, GFP_KERNEL);
225 *dma_handle = virt_to_bus(virt_ptr);
228 #define pci_free_consistent(dev, size, ptr, dma_ptr) kfree(ptr)
230 #endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
233 #if (LINUX_VERSION_CODE < 0x02040d)
235 #if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
237 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
238 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
241 /* pci_set_dma_mask is using dma_addr_t */
243 #define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
244 #define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
248 #else /* (LINUX_VERSION_CODE < 0x02040d) */
250 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
251 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
254 #if (LINUX_VERSION_CODE < 0x020329)
255 #define pci_set_dma_mask(pdev, mask) (0)
257 #if (LINUX_VERSION_CODE < 0x020403)
259 pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
261 if(! pci_dma_supported(dev, mask))
264 dev->dma_mask = mask;
271 #if (LINUX_VERSION_CODE < 0x020547)
272 #define pci_set_consistent_dma_mask(pdev, mask) (0)
275 #if (LINUX_VERSION_CODE < 0x020402)
276 #define pci_request_regions(pdev, name) (0)
277 #define pci_release_regions(pdev)
280 #if ! defined(spin_is_locked)
281 #define spin_is_locked(lock) (test_bit(0,(lock)))
284 #define BCM5700_LOCK(pUmDevice, flags) \
285 if ((pUmDevice)->do_global_lock) { \
286 spin_lock_irqsave(&(pUmDevice)->global_lock, flags); \
289 #define BCM5700_UNLOCK(pUmDevice, flags) \
290 if ((pUmDevice)->do_global_lock) { \
291 spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
294 /* Fix for RQM 289636 */
296 bcm5700_netif_stop_queue(struct net_device *dev)
298 dev->trans_start = jiffies; /* prevent tx timeout */
299 netif_stop_queue(dev);
303 bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice)
305 if (pUmDevice->do_global_lock) {
306 spin_lock(&pUmDevice->global_lock);
311 bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice)
313 if (pUmDevice->do_global_lock) {
314 spin_unlock(&pUmDevice->global_lock);
319 bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice)
321 atomic_inc(&pUmDevice->intr_sem);
322 LM_DisableInterrupt(&pUmDevice->lm_dev);
323 #if (LINUX_VERSION_CODE >= 0x2051c)
324 synchronize_irq(pUmDevice->dev->irq);
328 LM_DisableInterrupt(&pUmDevice->lm_dev);
332 bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice)
334 if (atomic_dec_and_test(&pUmDevice->intr_sem)) {
335 LM_EnableInterrupt(&pUmDevice->lm_dev);
340 * Broadcom NIC Extension support
351 #endif /* NICE_SUPPORT */
353 int MM_Packet_Desc_Size = sizeof(UM_PACKET);
356 MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
357 MODULE_DESCRIPTION("BCM5700 Driver");
358 MODULE_LICENSE("GPL");
360 #if (LINUX_VERSION_CODE < 0x020605)
362 MODULE_PARM(debug, "i");
363 MODULE_PARM(line_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
364 MODULE_PARM(auto_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
365 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
366 MODULE_PARM(rx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
367 MODULE_PARM(tx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
368 MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
369 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
370 MODULE_PARM(mtu, "1-" __MODULE_STRING(MAX_UNITS) "i");
372 MODULE_PARM(tx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
373 MODULE_PARM(rx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
374 MODULE_PARM(scatter_gather, "1-" __MODULE_STRING(MAX_UNITS) "i");
375 MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
376 MODULE_PARM(rx_std_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
377 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
378 MODULE_PARM(rx_jumbo_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
381 MODULE_PARM(adaptive_coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
382 MODULE_PARM(rx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
383 MODULE_PARM(rx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
384 MODULE_PARM(tx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
385 MODULE_PARM(tx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
386 MODULE_PARM(stats_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
389 MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i");
392 MODULE_PARM(enable_tso, "1-" __MODULE_STRING(MAX_UNITS) "i");
394 #ifdef BCM_NIC_SEND_BD
395 MODULE_PARM(nic_tx_bd, "1-" __MODULE_STRING(MAX_UNITS) "i");
398 MODULE_PARM(vlan_tag_mode, "1-" __MODULE_STRING(MAX_UNITS) "i");
400 MODULE_PARM(delay_link, "1-" __MODULE_STRING(MAX_UNITS) "i");
401 MODULE_PARM(disable_d3hot, "1-" __MODULE_STRING(MAX_UNITS) "i");
403 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
404 MODULE_PARM(disable_msi, "1-" __MODULE_STRING(MAX_UNITS) "i");
409 #if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
417 #if (LINUX_VERSION_CODE >= 0x2060a)
423 module_param_array(line_speed, int, numvar, 0);
424 module_param_array(auto_speed, int, numvar, 0);
425 module_param_array(full_duplex, int, numvar, 0);
426 module_param_array(rx_flow_control, int, numvar, 0);
427 module_param_array(tx_flow_control, int, numvar, 0);
428 module_param_array(auto_flow_control, int, numvar, 0);
429 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
430 module_param_array(mtu, int, numvar, 0);
432 module_param_array(tx_checksum, int, numvar, 0);
433 module_param_array(rx_checksum, int, numvar, 0);
434 module_param_array(scatter_gather, int, numvar, 0);
435 module_param_array(tx_pkt_desc_cnt, int, numvar, 0);
436 module_param_array(rx_std_desc_cnt, int, numvar, 0);
437 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
438 module_param_array(rx_jumbo_desc_cnt, int, numvar, 0);
441 module_param_array(adaptive_coalesce, int, numvar, 0);
442 module_param_array(rx_coalesce_ticks, int, numvar, 0);
443 module_param_array(rx_max_coalesce_frames, int, numvar, 0);
444 module_param_array(tx_coalesce_ticks, int, numvar, 0);
445 module_param_array(tx_max_coalesce_frames, int, numvar, 0);
446 module_param_array(stats_coalesce_ticks, int, numvar, 0);
449 module_param_array(enable_wol, int, numvar, 0);
452 module_param_array(enable_tso, int, numvar, 0);
454 #ifdef BCM_NIC_SEND_BD
455 module_param_array(nic_tx_bd, int, numvar, 0);
458 module_param_array(vlan_tag_mode, int, numvar, 0);
460 module_param_array(delay_link, int, numvar, 0);
461 module_param_array(disable_d3hot, int, numvar, 0);
463 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
464 module_param_array(disable_msi, int, numvar, 0);
473 #define RUN_AT(x) (jiffies + (x))
475 char kernel_version[] = UTS_RELEASE;
477 #define PCI_SUPPORT_VER2
479 #if ! defined(CAP_NET_ADMIN)
480 #define capable(CAP_XXX) (suser())
483 #define tigon3_debug debug
485 static int tigon3_debug = TIGON3_DEBUG;
487 static int tigon3_debug = 0;
491 int bcm5700_open(struct net_device *dev);
492 STATIC void bcm5700_timer(unsigned long data);
493 STATIC void bcm5700_stats_timer(unsigned long data);
494 STATIC void bcm5700_reset(struct net_device *dev);
495 STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev);
496 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
498 STATIC void bcm5700_tasklet(unsigned long data);
500 STATIC int bcm5700_close(struct net_device *dev);
501 STATIC struct net_device_stats *bcm5700_get_stats(struct net_device *dev);
502 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
503 STATIC void bcm5700_do_rx_mode(struct net_device *dev);
504 STATIC void bcm5700_set_rx_mode(struct net_device *dev);
505 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p);
506 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
507 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu);
509 #ifdef BCM_NAPI_RXPOLL
510 STATIC int bcm5700_poll(struct net_device *dev, int *budget);
512 STATIC int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max);
513 STATIC int bcm5700_freemem(struct net_device *dev);
515 STATIC int bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index);
518 #ifndef BCM_NAPI_RXPOLL
519 STATIC int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice);
522 STATIC void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice);
523 STATIC int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice);
525 STATIC void bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
526 STATIC void bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid);
528 void bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice);
529 void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice);
530 void bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
531 char *param_name, int min, int max, int deflt);
533 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
534 STATIC void poll_bcm5700(struct net_device *dev);
537 /* A list of all installed bcm5700 devices. */
538 static struct net_device *root_tigon3_dev = NULL;
540 #if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
543 #if (LINUX_VERSION_CODE < 0x20500)
544 extern int register_ioctl32_conversion(unsigned int cmd,
545 int (*handler)(unsigned int, unsigned int, unsigned long,
547 int unregister_ioctl32_conversion(unsigned int cmd);
549 #include <linux/ioctl32.h>
552 #define BCM_IOCTL32 1
554 atomic_t bcm5700_load_count = ATOMIC_INIT(0);
557 bcm5700_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg,
561 struct net_device *tmp_dev = root_tigon3_dev;
563 struct nice_req* nrq;
564 struct ifreq_nice32 {
572 if (!capable(CAP_NET_ADMIN))
575 if (mm_copy_from_user(&nrq32, (char *) arg, 32))
578 memcpy(rq.ifr_name, nrq32.ifnr_name, 16);
580 nrq = (struct nice_req*) &rq.ifr_ifru;
581 nrq->cmd = nrq32.cmd;
582 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK) {
583 nrq->nrq_stats_useraddr = (void *) ((__u64) nrq32.nrq1);
584 nrq->nrq_stats_size = nrq32.nrq2;
587 memcpy(&nrq->nrq_speed, &nrq32.nrq1, 12);
590 if (strcmp(rq.ifr_name, tmp_dev->name) == 0) {
591 ret = bcm5700_ioctl(tmp_dev, &rq, cmd);
593 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK)
596 memcpy(&nrq32.nrq1, &nrq->nrq_speed, 12);
597 if (mm_copy_to_user((char *) arg, &nrq32, 32))
602 tmp_dev = ((UM_DEVICE_BLOCK *)(tmp_dev->priv))->next_module;
606 #endif /* NICE_SUPPORT */
610 #ifdef MEFHACK_NOTFORPLANETLAB
661 #ifdef MEFHACK_NOTFORPLANETLAB
696 /* indexed by board_t, above */
699 } board_info[] __devinitdata = {
700 #ifdef MEFHACK_NOTFORPLANETLAB
701 { "Broadcom BCM5700 1000Base-T" },
702 { "Broadcom BCM5700 1000Base-SX" },
703 { "Broadcom BCM5700 1000Base-SX" },
704 { "Broadcom BCM5700 1000Base-T" },
705 { "Broadcom BCM5700" },
706 { "Broadcom BCM5701 1000Base-T" },
707 { "Broadcom BCM5701 1000Base-T" },
708 { "Broadcom BCM5701 1000Base-T" },
709 { "Broadcom BCM5701 1000Base-SX" },
710 { "Broadcom BCM5701 1000Base-T" },
711 { "Broadcom BCM5701 1000Base-T" },
712 { "Broadcom BCM5701" },
713 { "Broadcom BCM5702 1000Base-T" },
714 { "Broadcom BCM5703 1000Base-T" },
715 { "Broadcom BCM5703 1000Base-SX" },
716 { "Broadcom B5703 1000Base-SX" },
717 { "3Com 3C996 10/100/1000 Server NIC" },
718 { "3Com 3C996 10/100/1000 Server NIC" },
719 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
720 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
721 { "3Com 3C996B Gigabit Server NIC" },
722 { "3Com 3C997 Gigabit Server NIC" },
723 { "3Com 3C997 Gigabit Fiber-SX Server NIC" },
724 { "3Com 3C1000 Gigabit NIC" },
725 { "3Com 3C1000B-T 10/100/1000 PCI" },
726 { "3Com 3C940 Gigabit LOM (21X21)" },
727 { "3Com 3C942 Gigabit LOM (31X31)" },
728 { "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
729 { "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
730 { "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
732 { "HP NC6770 Gigabit Server Adapter" },
733 { "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
734 { "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
735 { "HP NC7760 Gigabit Server Adapter" },
736 { "HP NC7761 Gigabit Server Adapter" },
737 { "HP NC7770 Gigabit Server Adapter" },
738 { "HP NC7771 Gigabit Server Adapter" },
739 { "HP NC7780 Gigabit Server Adapter" },
740 { "HP NC7781 Gigabit Server Adapter" },
741 { "HP NC7772 Gigabit Server Adapter" },
742 { "HP NC7782 Gigabit Server Adapter" },
743 { "HP NC7783 Gigabit Server Adapter" },
744 { "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
745 { "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
746 { "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
747 { "HP NC325m Quad Port PCI Express Gigabit Server Adapter" },
748 { "HP NC324i Integrated Dual Port PCIe Gigabit Server Adapter" },
749 { "HP NC326i Integrated Dual Port PCIe Gigabit Server Adapter" },
750 { "HP NC326m Dual Port PCI Express Gigabit Server Adapter" },
751 #ifdef MEFHACK_NOTFORPLANETLAB
752 { "Broadcom BCM5704 CIOB-E 1000Base-T" },
753 { "Broadcom BCM5704 1000Base-T" },
754 { "Broadcom BCM5704 1000Base-SX" },
755 { "Broadcom BCM5705 1000Base-T" },
756 { "Broadcom BCM5705M 1000Base-T" },
757 { "Broadcom 570x 10/100 Integrated Controller" },
758 { "Broadcom BCM5901 100Base-TX" },
759 { "Broadcom NetXtreme Gigabit Ethernet for hp" },
760 { "Broadcom BCM5788 NetLink 1000Base-T" },
761 { "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
762 { "Broadcom BCM5750 1000Base-T PCI" },
763 { "Broadcom BCM5750M 1000Base-T PCI" },
764 { "Broadcom BCM5720 1000Base-T PCI" },
765 { "Broadcom BCM5751 1000Base-T PCI Express" },
766 { "Broadcom BCM5751M 1000Base-T PCI Express" },
767 { "Broadcom BCM5751F 100Base-TX PCI Express" },
768 { "Broadcom BCM5721 1000Base-T PCI Express" },
769 { "Broadcom BCM5753 1000Base-T PCI Express" },
770 { "Broadcom BCM5753M 1000Base-T PCI Express" },
771 { "Broadcom BCM5753F 100Base-TX PCI Express" },
772 { "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
773 { "Broadcom BCM5752 1000Base-T PCI Express" },
774 { "Broadcom BCM5752M 1000Base-T PCI Express" },
775 { "Broadcom BCM5714 1000Base-T " },
776 { "Broadcom BCM5714S 1000Base-SX " },
777 { "Broadcom BCM5780 1000Base-T" },
778 { "Broadcom BCM5780S 1000Base-SX" },
779 { "Broadcom BCM5715 1000Base-T " },
780 { "Broadcom BCM5715S 1000Base-SX " },
781 { "Broadcom BCM5903M Gigabit Ethernet " },
786 static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = {
787 #ifdef MEFHACK_NOTFORPLANETLAB
788 {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 },
789 {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 },
790 {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 },
791 {0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9 },
792 {0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700 },
793 {0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700 },
794 {0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700 },
795 {0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700 },
796 {0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T },
797 {0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST },
798 {0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX },
799 {0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T },
800 {0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX },
801 {0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01 },
802 {0x14e4, 0x1644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5700 },
803 {0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5 },
804 {0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1 },
805 {0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8 },
806 {0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7 },
807 {0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10 },
808 {0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12 },
810 {0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770 },
811 {0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770 },
812 {0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780 },
813 #ifdef MEFHACK_NOTFORPLANETLAB
814 {0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701 },
815 {0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX },
816 {0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT },
817 {0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T },
818 {0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01 },
819 {0x14e4, 0x1645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5701 },
820 {0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702 },
821 {0x14e4, 0x1646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
822 {0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702 },
823 {0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702 },
824 {0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760 },
825 {0x14e4, 0x16a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
826 {0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT },
827 {0x14e4, 0x16c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
828 {0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703 },
829 {0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
830 {0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703 },
831 {0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703 },
832 {0x14e4, 0x1647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
833 {0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703 },
834 {0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
835 {0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703 },
836 {0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703 },
838 {0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771 },
839 {0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781 },
840 #ifdef MEFHACK_NOTFORPLANETLAB
841 {0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE },
842 {0x14e4, 0x16a7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
843 {0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
845 {0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771 },
846 {0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781 },
847 #ifdef MEFHACK_NOTFORPLANETLAB
848 {0x14e4, 0x16c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
850 {0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772 },
851 {0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782 },
852 {0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783 },
853 #ifdef MEFHACK_NOTFORPLANETLAB
854 {0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T },
855 {0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T },
856 {0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE },
857 {0x14e4, 0x1648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704 },
858 {0x14e4, 0x1649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
859 {0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S },
860 {0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX },
861 {0x14e4, 0x16a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
863 {0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761 },
864 #ifdef MEFHACK_NOTFORPLANETLAB
865 {0x14e4, 0x1653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
867 {0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761 },
868 {0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020 },
869 {0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T },
870 #ifdef MEFHACK_NOTFORPLANETLAB
871 {0x14e4, 0x1654, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
872 {0x14e4, 0x165d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
873 {0x14e4, 0x165e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
874 {0x14e4, 0x166e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705F },
875 {0x14e4, 0x1696, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5782 },
876 {0x14e4, 0x169c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5788 },
877 {0x14e4, 0x169d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5789 },
878 {0x14e4, 0x170d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
879 {0x14e4, 0x170e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
880 {0x14e4, 0x1676, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750 },
881 {0x14e4, 0x167c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750M },
882 {0x14e4, 0x1677, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751 },
883 {0x14e4, 0x167d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751M },
884 {0x14e4, 0x167e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751F },
885 {0x14e4, 0x1658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5720 },
887 {0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T },
888 {0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T },
889 {0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I },
890 {0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I },
891 {0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I },
892 {0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I },
893 {0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I },
894 {0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I },
895 {0x14e4, 0x1679, 0x103c, 0x170c, 0, 0, NC325m },
896 {0x14e4, 0x1679, 0x103c, 0x1707, 0, 0, NC326m },
897 #ifdef MEFHACK_NOTFORPLANETLAB
898 {0x14e4, 0x1659, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5721 },
899 {0x14e4, 0x16f7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753 },
900 {0x14e4, 0x16fd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753M },
901 {0x14e4, 0x16fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753F },
902 {0x14e4, 0x16dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5781 },
903 {0x14e4, 0x1600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752 },
904 {0x14e4, 0x1601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752M },
905 {0x14e4, 0x1668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714 },
906 {0x14e4, 0x1669, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714S },
907 {0x14e4, 0x166a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780 },
908 {0x14e4, 0x166b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780S },
909 {0x14e4, 0x1678, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715 },
910 {0x14e4, 0x1679, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715S },
911 {0x14e4, 0x16ff, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5903M },
916 MODULE_DEVICE_TABLE(pci, bcm5700_pci_tbl);
919 extern int bcm5700_proc_create(void);
920 extern int bcm5700_proc_create_dev(struct net_device *dev);
921 extern int bcm5700_proc_remove_dev(struct net_device *dev);
922 extern int bcm5700_proc_remove_notifier(void);
925 #if (LINUX_VERSION_CODE >= 0x2060a)
926 static struct pci_device_id pci_AMD762id[]={
927 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
928 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
933 /*******************************************************************************
934 *******************************************************************************
937 int get_csum_flag(LM_UINT32 ChipRevId)
939 return NETIF_F_IP_CSUM;
942 /*******************************************************************************
943 *******************************************************************************
945 This function returns true if the device passed to it is attached to an
946 ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
947 or newer, it returns false.
949 This function determines which bridge it is attached to by scaning the pci
950 bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
951 the bridge's subordinate's secondary bus number is compared with this
952 devices bus number. If they match, then the device is attached to this
953 bridge. The bridge's device id is compared to a list of known device ids for
954 ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
955 chip revision must also be checked to determine if the chip is older than an
958 To scan the bus, one of two functions is used depending on the kernel
959 version. For 2.4 kernels, the pci_find_device function is used. This
960 function has been depricated in the 2.6 kernel and replaced with the
961 fucntion pci_get_device. The macro walk_pci_bus determines which function to
962 use when the driver is built.
965 #if (LINUX_VERSION_CODE >= 0x2060a)
966 #define walk_pci_bus(d) while ((d = pci_get_device( \
967 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
969 #define unwalk_pci_bus(d) pci_dev_put(d)
972 #define walk_pci_bus(d) while ((d = pci_find_device( \
973 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
974 #define unwalk_pci_bus(d)
978 #define ICH5_CHIP_VERSION 0xc0
980 static struct pci_device_id pci_ICHtable[] = {
981 {0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8 */
982 {0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8 */
983 {0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6 */
984 {0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
988 int attached_to_ICH4_or_older( struct pci_dev *pdev)
990 struct pci_dev *tmp_pdev = NULL;
991 struct pci_device_id *ich_table;
994 walk_pci_bus (tmp_pdev) {
995 if ((tmp_pdev->hdr_type == 1) &&
996 (tmp_pdev->subordinate != NULL) &&
997 (tmp_pdev->subordinate->secondary == pdev->bus->number)) {
999 ich_table = pci_ICHtable;
1001 while (ich_table->vendor) {
1002 if ((ich_table->vendor == tmp_pdev->vendor) &&
1003 (ich_table->device == tmp_pdev->device)) {
1005 pci_read_config_byte( tmp_pdev,
1006 PCI_REVISION_ID, &chip_rev);
1008 if (chip_rev < ICH5_CHIP_VERSION) {
1009 unwalk_pci_bus( tmp_pdev);
1020 static int __devinit bcm5700_init_board(struct pci_dev *pdev,
1021 struct net_device **dev_out,
1024 struct net_device *dev;
1025 PUM_DEVICE_BLOCK pUmDevice;
1026 PLM_DEVICE_BLOCK pDevice;
1031 /* dev zeroed in init_etherdev */
1032 #if (LINUX_VERSION_CODE >= 0x20600)
1033 dev = alloc_etherdev(sizeof(*pUmDevice));
1035 dev = init_etherdev(NULL, sizeof(*pUmDevice));
1038 printk (KERN_ERR "%s: unable to alloc new ethernet\n",
1042 SET_MODULE_OWNER(dev);
1043 #if (LINUX_VERSION_CODE >= 0x20600)
1044 SET_NETDEV_DEV(dev, &pdev->dev);
1046 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1048 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1049 rc = pci_enable_device (pdev);
1053 rc = pci_request_regions(pdev, bcm5700_driver);
1057 pci_set_master(pdev);
1059 if (pci_set_dma_mask(pdev, BCM_64BIT_DMA_MASK) == 0) {
1060 pUmDevice->using_dac = 1;
1061 if (pci_set_consistent_dma_mask(pdev, BCM_64BIT_DMA_MASK) != 0)
1063 printk(KERN_ERR "pci_set_consistent_dma_mask failed\n");
1064 pci_release_regions(pdev);
1068 else if (pci_set_dma_mask(pdev, BCM_32BIT_DMA_MASK) == 0) {
1069 pUmDevice->using_dac = 0;
1072 printk(KERN_ERR "System does not support DMA\n");
1073 pci_release_regions(pdev);
1077 pUmDevice->dev = dev;
1078 pUmDevice->pdev = pdev;
1079 pUmDevice->mem_list_num = 0;
1080 pUmDevice->next_module = root_tigon3_dev;
1081 pUmDevice->index = board_idx;
1082 root_tigon3_dev = dev;
1084 spin_lock_init(&pUmDevice->global_lock);
1086 spin_lock_init(&pUmDevice->undi_lock);
1088 spin_lock_init(&pUmDevice->phy_lock);
1090 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1092 pDevice->FunctNum = PCI_FUNC(pUmDevice->pdev->devfn);
1094 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1095 if (board_idx < MAX_UNITS) {
1096 bcm5700_validate_param_range(pUmDevice, &mtu[board_idx], "mtu", 1500, 9000, 1500);
1097 dev->mtu = mtu[board_idx];
1100 /* If we're attached to an ICH4 or older, we may need to implement a
1101 workaround for special cycles described in the BCM5704/357 Errata.
1102 This workaround is only need on 5703-A1/2 or 5704-A0 chips that
1103 are attached to a PCI-X bus. The NIC chip type and bus are checked
1104 later in the driver and the flag cleared if the workaround is not
1105 needed. The workaround is enabled by setting the flag UNDI_FIX_FLAG
1106 which casues the driver to use indirect pci-config cycles when
1107 accessing the low-priority mailboxes (MB_REG_WR/RD).
1110 if (attached_to_ICH4_or_older( pdev)) {
1111 pDevice->Flags |= UNDI_FIX_FLAG;
1114 #if (LINUX_VERSION_CODE >= 0x2060a)
1115 if(pci_dev_present(pci_AMD762id)){
1116 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1117 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1120 if (pci_find_device(0x1022, 0x700c, NULL)) {
1121 /* AMD762 writes I/O out of order */
1122 /* Setting bit 1 in 762's register 0x4C still doesn't work */
1124 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1125 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1128 if (LM_GetAdapterInfo(pDevice) != LM_STATUS_SUCCESS) {
1133 if ( (pDevice->Flags & JUMBO_CAPABLE_FLAG) == 0 ) {
1134 if (dev->mtu > 1500) {
1136 printk(KERN_WARNING "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n", bcm5700_driver, pUmDevice->index);
1140 pUmDevice->do_global_lock = 0;
1141 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
1142 /* The 5700 chip works best without interleaved register */
1143 /* accesses on certain machines. */
1144 pUmDevice->do_global_lock = 1;
1147 if ((T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5701) &&
1148 ((pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) == 0)) {
1150 pUmDevice->rx_buf_align = 0;
1153 pUmDevice->rx_buf_align = 2;
1155 dev->mem_start = pci_resource_start(pdev, 0);
1156 dev->mem_end = dev->mem_start + sizeof(T3_STD_MEM_MAP);
1157 dev->irq = pdev->irq;
1163 pci_release_regions(pdev);
1164 bcm5700_freemem(dev);
1167 #if (LINUX_VERSION_CODE < 0x020600)
1168 unregister_netdev(dev);
1176 static int __devinit
1177 bcm5700_print_ver(void)
1179 printk(KERN_INFO "Broadcom Gigabit Ethernet Driver %s ",
1182 printk("with Broadcom NIC Extension (NICE) ");
1184 printk("ver. %s %s\n", bcm5700_version, bcm5700_date);
1188 static int __devinit
1189 bcm5700_init_one(struct pci_dev *pdev,
1190 const struct pci_device_id *ent)
1192 struct net_device *dev = NULL;
1193 PUM_DEVICE_BLOCK pUmDevice;
1194 PLM_DEVICE_BLOCK pDevice;
1196 static int board_idx = -1;
1197 static int printed_version = 0;
1198 struct pci_dev *pci_dev;
1202 if (!printed_version) {
1203 bcm5700_print_ver();
1205 bcm5700_proc_create();
1207 printed_version = 1;
1210 i = bcm5700_init_board(pdev, &dev, board_idx);
1219 if (atomic_read(&bcm5700_load_count) == 0) {
1220 register_ioctl32_conversion(SIOCNICE, bcm5700_ioctl32);
1222 atomic_inc(&bcm5700_load_count);
1224 dev->open = bcm5700_open;
1225 dev->hard_start_xmit = bcm5700_start_xmit;
1226 dev->stop = bcm5700_close;
1227 dev->get_stats = bcm5700_get_stats;
1228 dev->set_multicast_list = bcm5700_set_rx_mode;
1229 dev->do_ioctl = bcm5700_ioctl;
1230 dev->set_mac_address = &bcm5700_set_mac_addr;
1231 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1232 dev->change_mtu = &bcm5700_change_mtu;
1234 #if (LINUX_VERSION_CODE >= 0x20400)
1235 dev->tx_timeout = bcm5700_reset;
1236 /* Fix for RQM 289636 */
1237 /* dev->watchdog_timeo = TX_TIMEOUT; */
1238 dev->watchdog_timeo = BCM_TX_TIMEOUT;
1241 dev->vlan_rx_register = &bcm5700_vlan_rx_register;
1242 dev->vlan_rx_kill_vid = &bcm5700_vlan_rx_kill_vid;
1244 #ifdef BCM_NAPI_RXPOLL
1245 dev->poll = bcm5700_poll;
1249 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1250 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1252 dev->base_addr = pci_resource_start(pdev, 0);
1253 dev->irq = pdev->irq;
1254 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1255 dev->poll_controller = poll_bcm5700;
1258 #if (LINUX_VERSION_CODE >= 0x20600)
1259 if ((i = register_netdev(dev))) {
1260 printk(KERN_ERR "%s: Cannot register net device\n",
1262 if (pUmDevice->lm_dev.pMappedMemBase)
1263 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1264 pci_release_regions(pdev);
1265 bcm5700_freemem(dev);
1272 pci_set_drvdata(pdev, dev);
1274 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1275 pUmDevice->name = board_info[ent->driver_data].name,
1276 printk(KERN_INFO "%s: %s found at mem %lx, IRQ %d, ",
1277 dev->name, pUmDevice->name, dev->base_addr,
1279 printk("node addr ");
1280 for (i = 0; i < 6; i++) {
1281 printk("%2.2x", dev->dev_addr[i]);
1285 printk(KERN_INFO "%s: ", dev->name);
1286 if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5400_PHY_ID)
1287 printk("Broadcom BCM5400 Copper ");
1288 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID)
1289 printk("Broadcom BCM5401 Copper ");
1290 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5411_PHY_ID)
1291 printk("Broadcom BCM5411 Copper ");
1292 else if (((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5701_PHY_ID) &&
1293 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
1294 printk("Broadcom BCM5701 Integrated Copper ");
1296 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5703_PHY_ID) {
1297 printk("Broadcom BCM5703 Integrated ");
1298 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1303 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) {
1304 printk("Broadcom BCM5704 Integrated ");
1305 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1310 else if (pDevice->PhyFlags & PHY_IS_FIBER){
1311 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5780_PHY_ID)
1312 printk("Broadcom BCM5780S Integrated Serdes ");
1314 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5714_PHY_ID)
1315 printk("Broadcom BCM5714S Integrated Serdes ");
1317 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID)
1318 printk("Broadcom BCM5705 Integrated Copper ");
1319 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5750_PHY_ID)
1320 printk("Broadcom BCM5750 Integrated Copper ");
1322 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5714_PHY_ID)
1323 printk("Broadcom BCM5714 Integrated Copper ");
1324 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5780_PHY_ID)
1325 printk("Broadcom BCM5780 Integrated Copper ");
1327 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5752_PHY_ID)
1328 printk("Broadcom BCM5752 Integrated Copper ");
1329 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID)
1330 printk("Broadcom BCM8002 SerDes ");
1331 else if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
1332 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1333 printk("Broadcom BCM5703 Integrated SerDes ");
1335 else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1336 printk("Broadcom BCM5704 Integrated SerDes ");
1339 printk("Agilent HDMP-1636 SerDes ");
1345 printk("transceiver found\n");
1347 #if (LINUX_VERSION_CODE >= 0x20400)
1348 if (scatter_gather[board_idx]) {
1349 dev->features |= NETIF_F_SG;
1350 if (pUmDevice->using_dac && !(pDevice->Flags & BCM5788_FLAG))
1351 dev->features |= NETIF_F_HIGHDMA;
1353 if ((pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM) &&
1354 tx_checksum[board_idx]) {
1356 dev->features |= get_csum_flag( pDevice->ChipRevId);
1359 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1362 /* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1363 the same time. Since only one of these features can be enable at a
1364 time, we'll enable only Jumbo Frames and disable TSO when the user
1365 tries to enable both.
1367 dev->features &= ~NETIF_F_TSO;
1369 if ((pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION) &&
1370 (enable_tso[board_idx])) {
1371 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
1372 (dev->mtu > 1500)) {
1373 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
1375 dev->features |= NETIF_F_TSO;
1379 printk(KERN_INFO "%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1381 (char *) ((dev->features & NETIF_F_SG) ? "ON" : "OFF"),
1382 (char *) ((dev->features & NETIF_F_HIGHDMA) ? "ON" : "OFF"),
1383 (char *) ((dev->features & get_csum_flag( pDevice->ChipRevId)) ? "ON" : "OFF"));
1385 if ((pDevice->ChipRevId != T3_CHIP_ID_5700_B0) &&
1386 rx_checksum[board_idx])
1387 printk("Rx Checksum ON");
1389 printk("Rx Checksum OFF");
1391 printk(", 802.1Q VLAN ON");
1394 if (dev->features & NETIF_F_TSO) {
1399 #ifdef BCM_NAPI_RXPOLL
1400 printk(", NAPI ON");
1405 bcm5700_proc_create_dev(dev);
1408 tasklet_init(&pUmDevice->tasklet, bcm5700_tasklet,
1409 (unsigned long) pUmDevice);
1411 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1412 if ((REG_RD(pDevice, PciCfg.DualMacCtrl) &
1413 T3_DUAL_MAC_CH_CTRL_MASK) == 3) {
1415 printk(KERN_WARNING "%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev->name);
1419 #if (LINUX_VERSION_CODE > 0x20605)
1421 if ((pci_dev = pci_get_device(0x1022, 0x700c, NULL))) {
1423 if ((pci_dev = pci_find_device(0x1022, 0x700c, NULL))) {
1427 /* Found AMD 762 North bridge */
1428 pci_read_config_dword(pci_dev, 0x4c, &val);
1429 if ((val & 0x02) == 0) {
1430 pci_write_config_dword(pci_dev, 0x4c, val | 0x02);
1431 printk(KERN_INFO "%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver);
1435 #if (LINUX_VERSION_CODE > 0x20605)
1437 pci_dev_put(pci_dev);
1439 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1441 if ((pci_dev = pci_get_device(0x1066, 0x0017, NULL))) {
1442 bcm_msi_chipset_bug = 1;
1444 pci_dev_put(pci_dev);
1452 static void __devexit
1453 bcm5700_remove_one (struct pci_dev *pdev)
1455 struct net_device *dev = pci_get_drvdata (pdev);
1456 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1459 bcm5700_proc_remove_dev(dev);
1462 atomic_dec(&bcm5700_load_count);
1463 if (atomic_read(&bcm5700_load_count) == 0)
1464 unregister_ioctl32_conversion(SIOCNICE);
1466 unregister_netdev(dev);
1468 if (pUmDevice->lm_dev.pMappedMemBase)
1469 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1471 pci_release_regions(pdev);
1473 #if (LINUX_VERSION_CODE < 0x020600)
1479 pci_set_drvdata(pdev, NULL);
1483 int b57_test_intr(UM_DEVICE_BLOCK *pUmDevice);
1486 bcm5700_open(struct net_device *dev)
1488 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1489 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1492 if (pUmDevice->suspended){
1495 /* delay for 6 seconds */
1496 pUmDevice->delayed_link_ind = (6 * HZ) / pUmDevice->timer_interval;
1499 #ifndef BCM_NAPI_RXPOLL
1500 pUmDevice->adaptive_expiry = HZ / pUmDevice->timer_interval;
1504 #ifdef INCLUDE_TBI_SUPPORT
1505 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
1506 (pDevice->TbiFlags & TBI_POLLING_FLAGS)) {
1507 pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval;
1508 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1509 pUmDevice->poll_tbi_interval /= 4;
1511 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1514 /* set this timer for 2 seconds */
1515 pUmDevice->asf_heartbeat = (2 * HZ) / pUmDevice->timer_interval;
1517 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1520 if ( ( (T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ) &&
1521 (T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5714_A0 ) &&
1522 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_AX ) &&
1523 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_BX ) ) &&
1524 !bcm_msi_chipset_bug ){
1526 if (disable_msi[pUmDevice->index]==1){
1527 /* do nothing-it's not turned on */
1529 pDevice->Flags |= USING_MSI_FLAG;
1531 REG_WR(pDevice, Msi.Mode, 2 );
1533 rc = pci_enable_msi(pUmDevice->pdev);
1536 pDevice->Flags &= ~ USING_MSI_FLAG;
1537 REG_WR(pDevice, Msi.Mode, 1 );
1545 if ((rc= request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt, SA_SHIRQ, dev->name, dev)))
1548 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1550 if(pDevice->Flags & USING_MSI_FLAG) {
1552 pci_disable_msi(pUmDevice->pdev);
1553 pDevice->Flags &= ~USING_MSI_FLAG;
1554 REG_WR(pDevice, Msi.Mode, 1 );
1561 pUmDevice->opened = 1;
1562 if (LM_InitializeAdapter(pDevice) != LM_STATUS_SUCCESS) {
1563 pUmDevice->opened = 0;
1564 free_irq(dev->irq, dev);
1565 bcm5700_freemem(dev);
1569 bcm5700_set_vlan_mode(pUmDevice);
1570 bcm5700_init_counters(pUmDevice);
1572 if (pDevice->Flags & UNDI_FIX_FLAG) {
1573 printk(KERN_INFO "%s: Using indirect register access\n", dev->name);
1576 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6))
1578 /* Do not use invalid eth addrs: any multicast & all zeros */
1579 if( is_valid_ether_addr(dev->dev_addr) ){
1580 LM_SetMacAddress(pDevice, dev->dev_addr);
1584 printk(KERN_INFO "%s: Invalid administered node address\n",dev->name);
1585 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1589 if (tigon3_debug > 1)
1590 printk(KERN_DEBUG "%s: tigon3_open() irq %d.\n", dev->name, dev->irq);
1592 QQ_InitQueue(&pUmDevice->rx_out_of_buf_q.Container,
1593 MAX_RX_PACKET_DESC_COUNT);
1596 #if (LINUX_VERSION_CODE < 0x020300)
1600 atomic_set(&pUmDevice->intr_sem, 0);
1602 LM_EnableInterrupt(pDevice);
1604 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1606 if (pDevice->Flags & USING_MSI_FLAG){
1608 /* int test to check support on older machines */
1609 if (b57_test_intr(pUmDevice) != 1) {
1611 LM_DisableInterrupt(pDevice);
1612 free_irq(pUmDevice->pdev->irq, dev);
1613 pci_disable_msi(pUmDevice->pdev);
1614 REG_WR(pDevice, Msi.Mode, 1 );
1615 pDevice->Flags &= ~USING_MSI_FLAG;
1617 rc = LM_ResetAdapter(pDevice);
1618 printk(KERN_ALERT " The MSI support in this system is not functional.\n");
1620 if (rc == LM_STATUS_SUCCESS)
1626 rc = request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt,
1627 SA_SHIRQ, dev->name, dev);
1632 bcm5700_freemem(dev);
1633 pUmDevice->opened = 0;
1638 pDevice->InitDone = TRUE;
1639 atomic_set(&pUmDevice->intr_sem, 0);
1640 LM_EnableInterrupt(pDevice);
1645 init_timer(&pUmDevice->timer);
1646 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1647 pUmDevice->timer.data = (unsigned long)dev;
1648 pUmDevice->timer.function = &bcm5700_timer;
1649 add_timer(&pUmDevice->timer);
1651 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
1652 init_timer(&pUmDevice->statstimer);
1653 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1654 pUmDevice->statstimer.data = (unsigned long)dev;
1655 pUmDevice->statstimer.function = &bcm5700_stats_timer;
1656 add_timer(&pUmDevice->statstimer);
1659 if(pDevice->Flags & USING_MSI_FLAG)
1660 printk(KERN_INFO "%s: Using Message Signaled Interrupt (MSI) \n", dev->name);
1662 printk(KERN_INFO "%s: Using PCI INTX interrupt \n", dev->name);
1664 netif_start_queue(dev);
1671 bcm5700_stats_timer(unsigned long data)
1673 struct net_device *dev = (struct net_device *)data;
1674 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1675 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1676 unsigned long flags = 0;
1678 if (!pUmDevice->opened)
1681 if (!atomic_read(&pUmDevice->intr_sem) &&
1682 !pUmDevice->suspended &&
1683 (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE)) {
1684 BCM5700_LOCK(pUmDevice, flags);
1685 LM_GetStats(pDevice);
1686 BCM5700_UNLOCK(pUmDevice, flags);
1689 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1691 add_timer(&pUmDevice->statstimer);
1696 bcm5700_timer(unsigned long data)
1698 struct net_device *dev = (struct net_device *)data;
1699 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1700 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1701 unsigned long flags = 0;
1704 if (!pUmDevice->opened)
1707 if (atomic_read(&pUmDevice->intr_sem) || pUmDevice->suspended) {
1708 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1709 add_timer(&pUmDevice->timer);
1713 #ifdef INCLUDE_TBI_SUPPORT
1714 if ((pDevice->TbiFlags & TBI_POLLING_FLAGS) &&
1715 (--pUmDevice->poll_tbi_expiry <= 0)) {
1717 BCM5700_PHY_LOCK(pUmDevice, flags);
1718 value32 = REG_RD(pDevice, MacCtrl.Status);
1719 if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) &&
1720 ((value32 & (MAC_STATUS_LINK_STATE_CHANGED |
1721 MAC_STATUS_CFG_CHANGED)) ||
1722 !(value32 & MAC_STATUS_PCS_SYNCED)))
1724 ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
1725 (value32 & (MAC_STATUS_PCS_SYNCED |
1726 MAC_STATUS_SIGNAL_DETECTED))))
1728 LM_SetupPhy(pDevice);
1730 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1731 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1736 if (pUmDevice->delayed_link_ind > 0) {
1737 if (pUmDevice->delayed_link_ind == 1)
1738 MM_IndicateStatus(pDevice, pDevice->LinkStatus);
1740 pUmDevice->delayed_link_ind--;
1743 if (pUmDevice->crc_counter_expiry > 0)
1744 pUmDevice->crc_counter_expiry--;
1746 if (!pUmDevice->interrupt) {
1747 if (!(pDevice->Flags & USE_TAGGED_STATUS_FLAG)) {
1748 BCM5700_LOCK(pUmDevice, flags);
1749 if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) {
1750 /* This will generate an interrupt */
1751 REG_WR(pDevice, Grc.LocalCtrl,
1752 pDevice->GrcLocalCtrl |
1753 GRC_MISC_LOCAL_CTRL_SET_INT);
1756 REG_WR(pDevice, HostCoalesce.Mode,
1757 pDevice->CoalesceMode |
1758 HOST_COALESCE_ENABLE |
1761 if (!(REG_RD(pDevice, DmaWrite.Mode) &
1762 DMA_WRITE_MODE_ENABLE)) {
1763 BCM5700_UNLOCK(pUmDevice, flags);
1767 BCM5700_UNLOCK(pUmDevice, flags);
1769 if (pUmDevice->tx_queued) {
1770 pUmDevice->tx_queued = 0;
1771 netif_wake_queue(dev);
1774 #if (LINUX_VERSION_CODE < 0x02032b)
1775 if ((QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) !=
1776 pDevice->TxPacketDescCnt) &&
1777 ((jiffies - dev->trans_start) > TX_TIMEOUT)) {
1779 printk(KERN_WARNING "%s: Tx hung\n", dev->name);
1785 #ifndef BCM_NAPI_RXPOLL
1786 if (pUmDevice->adaptive_coalesce) {
1787 pUmDevice->adaptive_expiry--;
1788 if (pUmDevice->adaptive_expiry == 0) {
1789 pUmDevice->adaptive_expiry = HZ /
1790 pUmDevice->timer_interval;
1791 bcm5700_adapt_coalesce(pUmDevice);
1796 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) >
1797 (unsigned int) pUmDevice->rx_buf_repl_panic_thresh) {
1798 /* Generate interrupt and let isr allocate buffers */
1799 REG_WR(pDevice, HostCoalesce.Mode, pDevice->CoalesceMode |
1800 HOST_COALESCE_ENABLE | HOST_COALESCE_NOW);
1804 if (pDevice->AsfFlags & ASF_ENABLED) {
1805 pUmDevice->asf_heartbeat--;
1806 if (pUmDevice->asf_heartbeat == 0) {
1807 if( (pDevice->Flags & UNDI_FIX_FLAG) ||
1808 (pDevice->Flags & ENABLE_PCIX_FIX_FLAG)) {
1809 MEM_WR_OFFSET(pDevice, T3_CMD_MAILBOX,
1810 T3_CMD_NICDRV_ALIVE2);
1811 MEM_WR_OFFSET(pDevice, T3_CMD_LENGTH_MAILBOX,
1813 MEM_WR_OFFSET(pDevice, T3_CMD_DATA_MAILBOX, 5);
1816 (T3_NIC_MBUF_POOL_ADDR +
1818 T3_CMD_NICDRV_ALIVE2, 1);
1820 (T3_NIC_MBUF_POOL_ADDR +
1821 T3_CMD_LENGTH_MAILBOX),4,1);
1823 (T3_NIC_MBUF_POOL_ADDR +
1824 T3_CMD_DATA_MAILBOX),5,1);
1827 value32 = REG_RD(pDevice, Grc.RxCpuEvent);
1828 REG_WR(pDevice, Grc.RxCpuEvent, value32 | BIT_14);
1829 pUmDevice->asf_heartbeat = (2 * HZ) /
1830 pUmDevice->timer_interval;
1835 if (pDevice->PhyFlags & PHY_IS_FIBER){
1836 BCM5700_PHY_LOCK(pUmDevice, flags);
1837 LM_5714_FamFiberCheckLink(pDevice);
1838 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1841 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1842 add_timer(&pUmDevice->timer);
1846 bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice)
1849 #ifndef BCM_NAPI_RXPOLL
1850 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1852 pUmDevice->rx_curr_coalesce_frames = pDevice->RxMaxCoalescedFrames;
1853 pUmDevice->rx_curr_coalesce_ticks = pDevice->RxCoalescingTicks;
1854 pUmDevice->tx_curr_coalesce_frames = pDevice->TxMaxCoalescedFrames;
1855 pUmDevice->rx_last_cnt = 0;
1856 pUmDevice->tx_last_cnt = 0;
1859 pUmDevice->phy_crc_count = 0;
1861 pUmDevice->tx_zc_count = 0;
1862 pUmDevice->tx_chksum_count = 0;
1863 pUmDevice->tx_himem_count = 0;
1864 pUmDevice->rx_good_chksum_count = 0;
1865 pUmDevice->rx_bad_chksum_count = 0;
1868 pUmDevice->tso_pkt_count = 0;
1874 #ifndef BCM_NAPI_RXPOLL
1876 bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice,
1877 int rx_frames, int rx_ticks, int tx_frames, int rx_frames_intr)
1879 unsigned long flags = 0;
1880 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
1882 if (pUmDevice->do_global_lock) {
1883 if (spin_is_locked(&pUmDevice->global_lock))
1885 spin_lock_irqsave(&pUmDevice->global_lock, flags);
1887 pUmDevice->rx_curr_coalesce_frames = rx_frames;
1888 pUmDevice->rx_curr_coalesce_ticks = rx_ticks;
1889 pUmDevice->tx_curr_coalesce_frames = tx_frames;
1890 pUmDevice->rx_curr_coalesce_frames_intr = rx_frames_intr;
1891 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFrames, rx_frames);
1893 REG_WR(pDevice, HostCoalesce.RxCoalescingTicks, rx_ticks);
1895 REG_WR(pDevice, HostCoalesce.TxMaxCoalescedFrames, tx_frames);
1897 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt,
1900 BCM5700_UNLOCK(pUmDevice, flags);
1905 bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice)
1907 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
1908 uint rx_curr_cnt, tx_curr_cnt, rx_delta, tx_delta, total_delta;
1910 rx_curr_cnt = pDevice->pStatsBlkVirt->ifHCInUcastPkts.Low;
1911 tx_curr_cnt = pDevice->pStatsBlkVirt->ifHCOutUcastPkts.Low;
1912 if ((rx_curr_cnt <= pUmDevice->rx_last_cnt) ||
1913 (tx_curr_cnt < pUmDevice->tx_last_cnt)) {
1915 /* skip if there is counter rollover */
1916 pUmDevice->rx_last_cnt = rx_curr_cnt;
1917 pUmDevice->tx_last_cnt = tx_curr_cnt;
1921 rx_delta = rx_curr_cnt - pUmDevice->rx_last_cnt;
1922 tx_delta = tx_curr_cnt - pUmDevice->tx_last_cnt;
1923 total_delta = (((rx_delta + rx_delta) + tx_delta) / 3) << 1;
1925 pUmDevice->rx_last_cnt = rx_curr_cnt;
1926 pUmDevice->tx_last_cnt = tx_curr_cnt;
1928 if (total_delta < ADAPTIVE_LO_PKT_THRESH) {
1929 if (pUmDevice->rx_curr_coalesce_frames !=
1930 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES) {
1932 bcm5700_do_adapt_coalesce(pUmDevice,
1933 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES,
1934 ADAPTIVE_LO_RX_COALESCING_TICKS,
1935 ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES,
1936 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT);
1939 else if (total_delta < ADAPTIVE_HI_PKT_THRESH) {
1940 if (pUmDevice->rx_curr_coalesce_frames !=
1941 DEFAULT_RX_MAX_COALESCED_FRAMES) {
1943 bcm5700_do_adapt_coalesce(pUmDevice,
1944 DEFAULT_RX_MAX_COALESCED_FRAMES,
1945 DEFAULT_RX_COALESCING_TICKS,
1946 DEFAULT_TX_MAX_COALESCED_FRAMES,
1947 DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT);
1951 if (pUmDevice->rx_curr_coalesce_frames !=
1952 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES) {
1954 bcm5700_do_adapt_coalesce(pUmDevice,
1955 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES,
1956 ADAPTIVE_HI_RX_COALESCING_TICKS,
1957 ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES,
1958 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT);
1967 bcm5700_reset(struct net_device *dev)
1969 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1970 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1971 unsigned long flags;
1975 if( (dev->features & NETIF_F_TSO) &&
1976 (pUmDevice->tx_full) ) {
1978 dev->features &= ~NETIF_F_TSO;
1982 netif_stop_queue(dev);
1983 bcm5700_intr_off(pUmDevice);
1984 BCM5700_PHY_LOCK(pUmDevice, flags);
1985 LM_ResetAdapter(pDevice);
1986 pDevice->InitDone = TRUE;
1987 bcm5700_do_rx_mode(dev);
1988 bcm5700_set_vlan_mode(pUmDevice);
1989 bcm5700_init_counters(pUmDevice);
1990 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
1991 LM_SetMacAddress(pDevice, dev->dev_addr);
1993 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1994 atomic_set(&pUmDevice->intr_sem, 1);
1995 bcm5700_intr_on(pUmDevice);
1996 netif_wake_queue(dev);
2000 bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice)
2002 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2003 LM_UINT32 ReceiveMask = pDevice->ReceiveMask;
2004 int vlan_tag_mode = pUmDevice->vlan_tag_mode;
2006 if (vlan_tag_mode == VLAN_TAG_MODE_AUTO_STRIP) {
2007 if (pDevice->AsfFlags & ASF_ENABLED) {
2008 vlan_tag_mode = VLAN_TAG_MODE_FORCED_STRIP;
2011 vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
2014 if (vlan_tag_mode == VLAN_TAG_MODE_NORMAL_STRIP) {
2015 ReceiveMask |= LM_KEEP_VLAN_TAG;
2017 if (pUmDevice->vlgrp)
2018 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2021 if (pUmDevice->nice_rx)
2022 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2025 else if (vlan_tag_mode == VLAN_TAG_MODE_FORCED_STRIP) {
2026 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2028 if (ReceiveMask != pDevice->ReceiveMask)
2030 LM_SetReceiveMask(pDevice, ReceiveMask);
2035 bcm5700_poll_wait(UM_DEVICE_BLOCK *pUmDevice)
2037 #ifdef BCM_NAPI_RXPOLL
2038 while (pUmDevice->lm_dev.RxPoll) {
2039 current->state = TASK_INTERRUPTIBLE;
2040 schedule_timeout(1);
2048 bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
2050 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2052 bcm5700_intr_off(pUmDevice);
2053 bcm5700_poll_wait(pUmDevice);
2054 pUmDevice->vlgrp = vlgrp;
2055 bcm5700_set_vlan_mode(pUmDevice);
2056 bcm5700_intr_on(pUmDevice);
2060 bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
2062 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2064 bcm5700_intr_off(pUmDevice);
2065 bcm5700_poll_wait(pUmDevice);
2066 if (pUmDevice->vlgrp) {
2067 pUmDevice->vlgrp->vlan_devices[vid] = NULL;
2069 bcm5700_intr_on(pUmDevice);
2074 bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2076 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2077 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2079 PUM_PACKET pUmPacket;
2080 unsigned long flags = 0;
2083 vlan_tag_t *vlan_tag;
2087 uint16_t ip_tcp_len, tcp_opt_len, tcp_seg_flags;
2090 if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) ||
2091 !pDevice->InitDone || pUmDevice->suspended)
2097 #if (LINUX_VERSION_CODE < 0x02032b)
2098 if (test_and_set_bit(0, &dev->tbusy)) {
2103 if (pUmDevice->do_global_lock && pUmDevice->interrupt) {
2104 /* Fix for RQM 289636 */
2105 /* netif_stop_queue(dev); */
2106 bcm5700_netif_stop_queue(dev);
2107 pUmDevice->tx_queued = 1;
2108 if (!pUmDevice->interrupt) {
2109 netif_wake_queue(dev);
2110 pUmDevice->tx_queued = 0;
2115 pPacket = (PLM_PACKET)
2116 QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
2118 /* Fix for RQM 289636 */
2119 /* netif_stop_queue(dev); */
2120 bcm5700_netif_stop_queue(dev);
2121 pUmDevice->tx_full = 1;
2122 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container)) {
2123 netif_wake_queue(dev);
2124 pUmDevice->tx_full = 0;
2128 pUmPacket = (PUM_PACKET) pPacket;
2129 pUmPacket->skbuff = skb;
2131 if (skb->ip_summed == CHECKSUM_HW) {
2132 pPacket->Flags = SND_BD_FLAG_TCP_UDP_CKSUM;
2134 pUmDevice->tx_chksum_count++;
2141 frag_no = skb_shinfo(skb)->nr_frags;
2145 if (atomic_read(&pDevice->SendBdLeft) < (frag_no + 1)) {
2146 /* Fix for RQM 289636 */
2147 /* netif_stop_queue(dev); */
2148 bcm5700_netif_stop_queue(dev);
2149 pUmDevice->tx_full = 1;
2150 QQ_PushHead(&pDevice->TxPacketFreeQ.Container, pPacket);
2151 if (atomic_read(&pDevice->SendBdLeft) >= (frag_no + 1)) {
2152 netif_wake_queue(dev);
2153 pUmDevice->tx_full = 0;
2158 pPacket->u.Tx.FragCount = frag_no + 1;
2160 if (pPacket->u.Tx.FragCount > 1)
2161 pUmDevice->tx_zc_count++;
2165 if (pUmDevice->vlgrp && vlan_tx_tag_present(skb)) {
2166 pPacket->VlanTag = vlan_tx_tag_get(skb);
2167 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2171 vlan_tag = (vlan_tag_t *) &skb->cb[0];
2172 if (vlan_tag->signature == 0x5555) {
2173 pPacket->VlanTag = vlan_tag->tag;
2174 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2175 vlan_tag->signature = 0;
2180 if ((mss = (LM_UINT32) skb_shinfo(skb)->tso_size) &&
2181 (skb->len > pDevice->TxMtu)) {
2183 #if (LINUX_VERSION_CODE >= 0x02060c)
2185 if (skb_header_cloned(skb) &&
2186 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2192 pUmDevice->tso_pkt_count++;
2194 pPacket->Flags |= SND_BD_FLAG_CPU_PRE_DMA |
2195 SND_BD_FLAG_CPU_POST_DMA;
2198 if (skb->h.th->doff > 5) {
2199 tcp_opt_len = (skb->h.th->doff - 5) << 2;
2201 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
2202 skb->nh.iph->check = 0;
2204 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2205 skb->h.th->check = 0;
2206 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2209 skb->h.th->check = ~csum_tcpudp_magic(
2210 skb->nh.iph->saddr, skb->nh.iph->daddr,
2214 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2217 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
2218 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2220 ((skb->nh.iph->ihl - 5) +
2221 (tcp_opt_len >> 2)) << 11;
2225 ((skb->nh.iph->ihl - 5) +
2226 (tcp_opt_len >> 2)) << 12;
2229 pPacket->u.Tx.MaxSegmentSize = mss | tcp_seg_flags;
2233 pPacket->u.Tx.MaxSegmentSize = 0;
2236 BCM5700_LOCK(pUmDevice, flags);
2237 LM_SendPacket(pDevice, pPacket);
2238 BCM5700_UNLOCK(pUmDevice, flags);
2240 #if (LINUX_VERSION_CODE < 0x02032b)
2241 netif_wake_queue(dev);
2243 dev->trans_start = jiffies;
2249 #ifdef BCM_NAPI_RXPOLL
2251 bcm5700_poll(struct net_device *dev, int *budget)
2253 int orig_budget = *budget;
2255 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
2256 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2257 unsigned long flags = 0;
2260 if (orig_budget > dev->quota)
2261 orig_budget = dev->quota;
2263 BCM5700_LOCK(pUmDevice, flags);
2264 work_done = LM_ServiceRxPoll(pDevice, orig_budget);
2265 *budget -= work_done;
2266 dev->quota -= work_done;
2268 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2269 replenish_rx_buffers(pUmDevice, 0);
2271 BCM5700_UNLOCK(pUmDevice, flags);
2273 MM_IndicateRxPackets(pDevice);
2274 BCM5700_LOCK(pUmDevice, flags);
2275 LM_QueueRxPackets(pDevice);
2276 BCM5700_UNLOCK(pUmDevice, flags);
2278 if ((work_done < orig_budget) || atomic_read(&pUmDevice->intr_sem) ||
2279 pUmDevice->suspended) {
2281 netif_rx_complete(dev);
2282 BCM5700_LOCK(pUmDevice, flags);
2283 REG_WR(pDevice, Grc.Mode, pDevice->GrcMode);
2284 pDevice->RxPoll = FALSE;
2285 if (pDevice->RxPoll) {
2286 BCM5700_UNLOCK(pUmDevice, flags);
2289 /* Take care of possible missed rx interrupts */
2290 REG_RD_BACK(pDevice, Grc.Mode); /* flush the register write */
2291 tag = pDevice->pStatusBlkVirt->StatusTag;
2292 if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2293 (pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx !=
2294 pDevice->RcvRetConIdx)) {
2296 REG_WR(pDevice, HostCoalesce.Mode,
2297 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2300 /* If a new status block is pending in the WDMA state machine */
2301 /* before the register write to enable the rx interrupt, */
2302 /* the new status block may DMA with no interrupt. In this */
2303 /* scenario, the tag read above will be older than the tag in */
2304 /* the pending status block and writing the older tag will */
2305 /* cause interrupt to be generated. */
2306 else if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2307 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low,
2309 /* Make sure we service tx in case some tx interrupts */
2311 if (atomic_read(&pDevice->SendBdLeft) <
2312 (T3_SEND_RCB_ENTRY_COUNT / 2)) {
2313 REG_WR(pDevice, HostCoalesce.Mode,
2314 pDevice->CoalesceMode |
2315 HOST_COALESCE_ENABLE |
2319 BCM5700_UNLOCK(pUmDevice, flags);
2324 #endif /* BCM_NAPI_RXPOLL */
2327 bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2329 struct net_device *dev = (struct net_device *)dev_instance;
2330 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2331 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2332 LM_UINT32 oldtag, newtag;
2333 int i, max_intr_loop;
2337 unsigned int handled = 1;
2339 if (!pDevice->InitDone) {
2341 return IRQ_RETVAL(handled);
2344 bcm5700_intr_lock(pUmDevice);
2345 if (atomic_read(&pUmDevice->intr_sem)) {
2346 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2347 bcm5700_intr_unlock(pUmDevice);
2349 return IRQ_RETVAL(handled);
2352 if (test_and_set_bit(0, (void*)&pUmDevice->interrupt)) {
2353 printk(KERN_ERR "%s: Duplicate entry of the interrupt handler\n",
2355 bcm5700_intr_unlock(pUmDevice);
2357 return IRQ_RETVAL(handled);
2360 if ((pDevice->Flags & USING_MSI_FLAG) ||
2361 (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2362 !(REG_RD(pDevice,PciCfg.PciState) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) )
2365 if (pUmDevice->intr_test) {
2366 if (!(REG_RD(pDevice, PciCfg.PciState) &
2367 T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) ||
2368 pDevice->Flags & USING_MSI_FLAG ) {
2369 pUmDevice->intr_test_result = 1;
2371 pUmDevice->intr_test = 0;
2374 #ifdef BCM_NAPI_RXPOLL
2379 if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2380 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2381 oldtag = pDevice->pStatusBlkVirt->StatusTag;
2383 for (i = 0; ; i++) {
2384 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2386 LM_ServiceInterrupts(pDevice);
2387 newtag = pDevice->pStatusBlkVirt->StatusTag;
2388 if ((newtag == oldtag) || (i > max_intr_loop)) {
2389 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, oldtag << 24);
2390 pDevice->LastTag = oldtag;
2391 if (pDevice->Flags & UNDI_FIX_FLAG) {
2392 REG_WR(pDevice, Grc.LocalCtrl,
2393 pDevice->GrcLocalCtrl | 0x2);
2406 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2407 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2408 LM_ServiceInterrupts(pDevice);
2409 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 0);
2410 dummy = MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2413 while ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) &&
2414 (i < max_intr_loop));
2416 if (pDevice->Flags & UNDI_FIX_FLAG) {
2417 REG_WR(pDevice, Grc.LocalCtrl,
2418 pDevice->GrcLocalCtrl | 0x2);
2424 /* not my interrupt */
2429 repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
2430 if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) ||
2431 pDevice->QueueAgain) &&
2432 (!test_and_set_bit(0, &pUmDevice->tasklet_busy))) {
2434 replenish_rx_buffers(pUmDevice, pUmDevice->rx_buf_repl_isr_limit);
2435 clear_bit(0, (void*)&pUmDevice->tasklet_busy);
2437 else if ((repl_buf_count > pUmDevice->rx_buf_repl_thresh) &&
2438 !pUmDevice->tasklet_pending) {
2440 pUmDevice->tasklet_pending = 1;
2441 tasklet_schedule(&pUmDevice->tasklet);
2444 #ifdef BCM_NAPI_RXPOLL
2445 if (!pDevice->RxPoll &&
2446 QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2447 pDevice->RxPoll = 1;
2448 MM_ScheduleRxPoll(pDevice);
2451 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2452 replenish_rx_buffers(pUmDevice, 0);
2455 if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container) ||
2456 pDevice->QueueAgain) {
2458 LM_QueueRxPackets(pDevice);
2463 clear_bit(0, (void*)&pUmDevice->interrupt);
2464 bcm5700_intr_unlock(pUmDevice);
2465 if (pUmDevice->tx_queued) {
2466 pUmDevice->tx_queued = 0;
2467 netif_wake_queue(dev);
2469 return IRQ_RETVAL(handled);
2475 bcm5700_tasklet(unsigned long data)
2477 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)data;
2478 unsigned long flags = 0;
2480 /* RH 7.2 Beta 3 tasklets are reentrant */
2481 if (test_and_set_bit(0, &pUmDevice->tasklet_busy)) {
2482 pUmDevice->tasklet_pending = 0;
2486 pUmDevice->tasklet_pending = 0;
2487 if (pUmDevice->opened && !pUmDevice->suspended) {
2488 BCM5700_LOCK(pUmDevice, flags);
2489 replenish_rx_buffers(pUmDevice, 0);
2490 BCM5700_UNLOCK(pUmDevice, flags);
2493 clear_bit(0, &pUmDevice->tasklet_busy);
2498 bcm5700_close(struct net_device *dev)
2501 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2502 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2504 #if (LINUX_VERSION_CODE < 0x02032b)
2507 /* Fix for RQM 289636 */
2508 /* netif_stop_queue(dev); */
2509 bcm5700_netif_stop_queue(dev);
2510 pUmDevice->opened = 0;
2513 if( !(pDevice->AsfFlags & ASF_ENABLED) )
2516 if( enable_wol[pUmDevice->index] == 0 )
2518 printk(KERN_INFO "%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name);
2520 if (tigon3_debug > 1)
2521 printk(KERN_DEBUG "%s: Shutting down Tigon3\n",
2524 LM_MulticastClear(pDevice);
2525 bcm5700_shutdown(pUmDevice);
2527 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
2528 del_timer_sync(&pUmDevice->statstimer);
2531 del_timer_sync(&pUmDevice->timer);
2533 free_irq(pUmDevice->pdev->irq, dev);
2535 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2537 if(pDevice->Flags & USING_MSI_FLAG) {
2538 pci_disable_msi(pUmDevice->pdev);
2539 REG_WR(pDevice, Msi.Mode, 1 );
2540 pDevice->Flags &= ~USING_MSI_FLAG;
2546 #if (LINUX_VERSION_CODE < 0x020300)
2551 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
2554 bcm5700_freemem(dev);
2556 QQ_InitQueue(&pDevice->RxPacketFreeQ.Container,
2557 MAX_RX_PACKET_DESC_COUNT);
2563 bcm5700_freemem(struct net_device *dev)
2566 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2567 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2569 for (i = 0; i < pUmDevice->mem_list_num; i++) {
2570 if (pUmDevice->mem_size_list[i] == 0) {
2571 kfree(pUmDevice->mem_list[i]);
2574 pci_free_consistent(pUmDevice->pdev,
2575 (size_t) pUmDevice->mem_size_list[i],
2576 pUmDevice->mem_list[i],
2577 pUmDevice->dma_list[i]);
2581 pDevice->pStatusBlkVirt = 0;
2582 pDevice->pStatsBlkVirt = 0;
2583 pUmDevice->mem_list_num = 0;
2586 if (!pUmDevice->opened) {
2587 for (i = 0; i < MAX_MEM2; i++) {
2588 if (pUmDevice->mem_size_list2[i]) {
2589 bcm5700_freemem2(pUmDevice, i);
2598 /* Frees consistent memory allocated through ioctl */
2599 /* The memory to be freed is in mem_list2[index] */
2601 bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index)
2603 #if (LINUX_VERSION_CODE >= 0x020400)
2605 struct page *pg, *last_pg;
2607 /* Probably won't work on some architectures */
2608 ptr = pUmDevice->mem_list2[index],
2609 pg = virt_to_page(ptr);
2610 last_pg = virt_to_page(ptr + pUmDevice->mem_size_list2[index] - 1);
2612 #if (LINUX_VERSION_CODE > 0x020500)
2613 ClearPageReserved(pg);
2615 mem_map_unreserve(pg);
2620 pci_free_consistent(pUmDevice->pdev,
2621 (size_t) pUmDevice->mem_size_list2[index],
2622 pUmDevice->mem_list2[index],
2623 pUmDevice->dma_list2[index]);
2624 pUmDevice->mem_size_list2[index] = 0;
2631 bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice)
2633 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2635 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2636 unsigned long flags;
2638 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5700 ||
2639 T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5701) &&
2640 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
2642 if (!pUmDevice->opened || !pDevice->InitDone)
2648 /* regulate MDIO access during run time */
2649 if (pUmDevice->crc_counter_expiry > 0)
2650 return pUmDevice->phy_crc_count;
2652 pUmDevice->crc_counter_expiry = (5 * HZ) /
2653 pUmDevice->timer_interval;
2655 BCM5700_PHY_LOCK(pUmDevice, flags);
2656 LM_ReadPhy(pDevice, 0x1e, &Value32);
2657 if ((Value32 & 0x8000) == 0)
2658 LM_WritePhy(pDevice, 0x1e, Value32 | 0x8000);
2659 LM_ReadPhy(pDevice, 0x14, &Value32);
2660 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2661 /* Sometimes data on the MDIO bus can be corrupted */
2662 if (Value32 != 0xffff)
2663 pUmDevice->phy_crc_count += Value32;
2664 return pUmDevice->phy_crc_count;
2666 else if (pStats == 0) {
2670 return (MM_GETSTATS64(pStats->dot3StatsFCSErrors));
2675 bcm5700_rx_err_count(UM_DEVICE_BLOCK *pUmDevice)
2677 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2678 T3_STATS_BLOCK *pStats = (T3_STATS_BLOCK *) pDevice->pStatsBlkVirt;
2682 return (bcm5700_crc_count(pUmDevice) +
2683 MM_GETSTATS64(pStats->dot3StatsAlignmentErrors) +
2684 MM_GETSTATS64(pStats->etherStatsUndersizePkts) +
2685 MM_GETSTATS64(pStats->etherStatsFragments) +
2686 MM_GETSTATS64(pStats->dot3StatsFramesTooLong) +
2687 MM_GETSTATS64(pStats->etherStatsJabbers));
2690 STATIC struct net_device_stats *
2691 bcm5700_get_stats(struct net_device *dev)
2693 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2694 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2695 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2696 struct net_device_stats *p_netstats = &pUmDevice->stats;
2701 /* Get stats from LM */
2702 p_netstats->rx_packets =
2703 MM_GETSTATS(pStats->ifHCInUcastPkts) +
2704 MM_GETSTATS(pStats->ifHCInMulticastPkts) +
2705 MM_GETSTATS(pStats->ifHCInBroadcastPkts);
2706 p_netstats->tx_packets =
2707 MM_GETSTATS(pStats->ifHCOutUcastPkts) +
2708 MM_GETSTATS(pStats->ifHCOutMulticastPkts) +
2709 MM_GETSTATS(pStats->ifHCOutBroadcastPkts);
2710 p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2711 p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2712 p_netstats->tx_errors =
2713 MM_GETSTATS(pStats->dot3StatsInternalMacTransmitErrors) +
2714 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors) +
2715 MM_GETSTATS(pStats->ifOutDiscards) +
2716 MM_GETSTATS(pStats->ifOutErrors);
2717 p_netstats->multicast = MM_GETSTATS(pStats->ifHCInMulticastPkts);
2718 p_netstats->collisions = MM_GETSTATS(pStats->etherStatsCollisions);
2719 p_netstats->rx_length_errors =
2720 MM_GETSTATS(pStats->dot3StatsFramesTooLong) +
2721 MM_GETSTATS(pStats->etherStatsUndersizePkts);
2722 p_netstats->rx_over_errors = MM_GETSTATS(pStats->nicNoMoreRxBDs);
2723 p_netstats->rx_frame_errors =
2724 MM_GETSTATS(pStats->dot3StatsAlignmentErrors);
2725 p_netstats->rx_crc_errors = (unsigned long)
2726 bcm5700_crc_count(pUmDevice);
2727 p_netstats->rx_errors = (unsigned long)
2728 bcm5700_rx_err_count(pUmDevice);
2730 p_netstats->tx_aborted_errors = MM_GETSTATS(pStats->ifOutDiscards);
2731 p_netstats->tx_carrier_errors =
2732 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors);
2738 b57_suspend_chip(UM_DEVICE_BLOCK *pUmDevice)
2740 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2742 if (pUmDevice->opened) {
2743 bcm5700_intr_off(pUmDevice);
2744 netif_carrier_off(pUmDevice->dev);
2745 netif_stop_queue(pUmDevice->dev);
2747 tasklet_kill(&pUmDevice->tasklet);
2749 bcm5700_poll_wait(pUmDevice);
2751 pUmDevice->suspended = 1;
2752 LM_ShutdownChip(pDevice, LM_SUSPEND_RESET);
2756 b57_resume_chip(UM_DEVICE_BLOCK *pUmDevice)
2758 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2760 if (pUmDevice->suspended) {
2761 pUmDevice->suspended = 0;
2762 if (pUmDevice->opened) {
2763 bcm5700_reset(pUmDevice->dev);
2766 LM_ShutdownChip(pDevice, LM_SHUTDOWN_RESET);
2771 /* Returns 0 on failure, 1 on success */
2773 b57_test_intr(UM_DEVICE_BLOCK *pUmDevice)
2775 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2778 if (!pUmDevice->opened)
2780 pUmDevice->intr_test_result = 0;
2781 pUmDevice->intr_test = 1;
2783 REG_WR(pDevice, HostCoalesce.Mode,
2784 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2787 for (j = 0; j < 10; j++) {
2788 if (pUmDevice->intr_test_result){
2792 REG_WR(pDevice, HostCoalesce.Mode,
2793 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2796 MM_Sleep(pDevice, 1);
2799 return pUmDevice->intr_test_result;
2805 #ifdef ETHTOOL_GSTRINGS
2807 #define ETH_NUM_STATS 30
2808 #define RX_CRC_IDX 5
2809 #define RX_MAC_ERR_IDX 14
2812 char string[ETH_GSTRING_LEN];
2813 } bcm5700_stats_str_arr[ETH_NUM_STATS] = {
2814 { "rx_unicast_packets" },
2815 { "rx_multicast_packets" },
2816 { "rx_broadcast_packets" },
2819 { "rx_crc_errors" }, /* this needs to be calculated */
2820 { "rx_align_errors" },
2821 { "rx_xon_frames" },
2822 { "rx_xoff_frames" },
2823 { "rx_long_frames" },
2824 { "rx_short_frames" },
2828 { "rx_mac_errors" }, /* this needs to be calculated */
2829 { "tx_unicast_packets" },
2830 { "tx_multicast_packets" },
2831 { "tx_broadcast_packets" },
2834 { "tx_single_collisions" },
2835 { "tx_multi_collisions" },
2836 { "tx_total_collisions" },
2837 { "tx_excess_collisions" },
2838 { "tx_late_collisions" },
2839 { "tx_xon_frames" },
2840 { "tx_xoff_frames" },
2841 { "tx_internal_mac_errors" },
2842 { "tx_carrier_errors" },
2846 #define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
2849 #define SWAP_DWORD_64(x) (x)
2851 #define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
2854 unsigned long bcm5700_stats_offset_arr[ETH_NUM_STATS] = {
2855 STATS_OFFSET(ifHCInUcastPkts),
2856 STATS_OFFSET(ifHCInMulticastPkts),
2857 STATS_OFFSET(ifHCInBroadcastPkts),
2858 STATS_OFFSET(ifHCInOctets),
2859 STATS_OFFSET(etherStatsFragments),
2861 STATS_OFFSET(dot3StatsAlignmentErrors),
2862 STATS_OFFSET(xonPauseFramesReceived),
2863 STATS_OFFSET(xoffPauseFramesReceived),
2864 STATS_OFFSET(dot3StatsFramesTooLong),
2865 STATS_OFFSET(etherStatsUndersizePkts),
2866 STATS_OFFSET(etherStatsJabbers),
2867 STATS_OFFSET(ifInDiscards),
2868 STATS_OFFSET(ifInErrors),
2870 STATS_OFFSET(ifHCOutUcastPkts),
2871 STATS_OFFSET(ifHCOutMulticastPkts),
2872 STATS_OFFSET(ifHCOutBroadcastPkts),
2873 STATS_OFFSET(ifHCOutOctets),
2874 STATS_OFFSET(dot3StatsDeferredTransmissions),
2875 STATS_OFFSET(dot3StatsSingleCollisionFrames),
2876 STATS_OFFSET(dot3StatsMultipleCollisionFrames),
2877 STATS_OFFSET(etherStatsCollisions),
2878 STATS_OFFSET(dot3StatsExcessiveCollisions),
2879 STATS_OFFSET(dot3StatsLateCollisions),
2880 STATS_OFFSET(outXonSent),
2881 STATS_OFFSET(outXoffSent),
2882 STATS_OFFSET(dot3StatsInternalMacTransmitErrors),
2883 STATS_OFFSET(dot3StatsCarrierSenseErrors),
2884 STATS_OFFSET(ifOutErrors),
2887 #endif /* ETHTOOL_GSTRINGS */
2890 #define ETH_NUM_TESTS 6
2892 char string[ETH_GSTRING_LEN];
2893 } bcm5700_tests_str_arr[ETH_NUM_TESTS] = {
2894 { "register test (offline)" },
2895 { "memory test (offline)" },
2896 { "loopback test (offline)" },
2897 { "nvram test (online)" },
2898 { "interrupt test (online)" },
2899 { "link test (online)" },
2902 extern LM_STATUS b57_test_registers(UM_DEVICE_BLOCK *pUmDevice);
2903 extern LM_STATUS b57_test_memory(UM_DEVICE_BLOCK *pUmDevice);
2904 extern LM_STATUS b57_test_nvram(UM_DEVICE_BLOCK *pUmDevice);
2905 extern LM_STATUS b57_test_link(UM_DEVICE_BLOCK *pUmDevice);
2906 extern LM_STATUS b57_test_loopback(UM_DEVICE_BLOCK *pUmDevice, int looptype, int linespeed);
2909 #ifdef ETHTOOL_GREGS
2910 #if (LINUX_VERSION_CODE >= 0x02040f)
2912 bcm5700_get_reg_blk(UM_DEVICE_BLOCK *pUmDevice, u32 **buf, u32 start, u32 end,
2916 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2919 memset(*buf, 0, end - start);
2920 *buf = *buf + (end - start)/4;
2923 for (offset = start; offset < end; offset+=4, *buf = *buf + 1) {
2924 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
2925 if (((offset >= 0x3400) && (offset < 0x3c00)) ||
2926 ((offset >= 0x5400) && (offset < 0x5800)) ||
2927 ((offset >= 0x6400) && (offset < 0x6800))) {
2932 **buf = REG_RD_OFFSET(pDevice, offset);
2938 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
2940 struct ethtool_cmd ethcmd;
2941 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2942 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2944 if (mm_copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2947 switch (ethcmd.cmd) {
2948 #ifdef ETHTOOL_GDRVINFO
2949 case ETHTOOL_GDRVINFO: {
2950 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2952 strcpy(info.driver, bcm5700_driver);
2953 #ifdef INCLUDE_5701_AX_FIX
2954 if(pDevice->ChipRevId == T3_CHIP_ID_5701_A0) {
2955 extern int t3FwReleaseMajor;
2956 extern int t3FwReleaseMinor;
2957 extern int t3FwReleaseFix;
2959 sprintf(info.fw_version, "%i.%i.%i",
2960 t3FwReleaseMajor, t3FwReleaseMinor,
2964 strcpy(info.fw_version, pDevice->BootCodeVer);
2965 strcpy(info.version, bcm5700_version);
2966 #if (LINUX_VERSION_CODE <= 0x020422)
2967 strcpy(info.bus_info, pUmDevice->pdev->slot_name);
2969 strcpy(info.bus_info, pci_name(pUmDevice->pdev));
2974 #ifdef ETHTOOL_GEEPROM
2975 BCM_EEDUMP_LEN(&info, pDevice->NvramSize);
2977 #ifdef ETHTOOL_GREGS
2978 /* dump everything, including holes in the register space */
2979 info.regdump_len = 0x6c00;
2981 #ifdef ETHTOOL_GSTATS
2982 info.n_stats = ETH_NUM_STATS;
2985 info.testinfo_len = ETH_NUM_TESTS;
2987 if (mm_copy_to_user(useraddr, &info, sizeof(info)))
2992 case ETHTOOL_GSET: {
2993 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG)||
2994 (pDevice->PhyFlags & PHY_IS_FIBER)) {
2996 (SUPPORTED_1000baseT_Full |
2998 ethcmd.supported |= SUPPORTED_FIBRE;
2999 ethcmd.port = PORT_FIBRE;
3003 (SUPPORTED_10baseT_Half |
3004 SUPPORTED_10baseT_Full |
3005 SUPPORTED_100baseT_Half |
3006 SUPPORTED_100baseT_Full |
3007 SUPPORTED_1000baseT_Half |
3008 SUPPORTED_1000baseT_Full |
3010 ethcmd.supported |= SUPPORTED_TP;
3011 ethcmd.port = PORT_TP;
3014 ethcmd.transceiver = XCVR_INTERNAL;
3015 ethcmd.phy_address = 0;
3017 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
3018 ethcmd.speed = SPEED_1000;
3019 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
3020 ethcmd.speed = SPEED_100;
3021 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
3022 ethcmd.speed = SPEED_10;
3026 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
3027 ethcmd.duplex = DUPLEX_FULL;
3029 ethcmd.duplex = DUPLEX_HALF;
3031 if (pDevice->DisableAutoNeg == FALSE) {
3032 ethcmd.autoneg = AUTONEG_ENABLE;
3033 ethcmd.advertising = ADVERTISED_Autoneg;
3034 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
3035 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3036 ethcmd.advertising |=
3037 ADVERTISED_1000baseT_Full |
3041 ethcmd.advertising |=
3043 if (pDevice->advertising &
3044 PHY_AN_AD_10BASET_HALF) {
3046 ethcmd.advertising |=
3047 ADVERTISED_10baseT_Half;
3049 if (pDevice->advertising &
3050 PHY_AN_AD_10BASET_FULL) {
3052 ethcmd.advertising |=
3053 ADVERTISED_10baseT_Full;
3055 if (pDevice->advertising &
3056 PHY_AN_AD_100BASETX_HALF) {
3058 ethcmd.advertising |=
3059 ADVERTISED_100baseT_Half;
3061 if (pDevice->advertising &
3062 PHY_AN_AD_100BASETX_FULL) {
3064 ethcmd.advertising |=
3065 ADVERTISED_100baseT_Full;
3067 if (pDevice->advertising1000 &
3068 BCM540X_AN_AD_1000BASET_HALF) {
3070 ethcmd.advertising |=
3071 ADVERTISED_1000baseT_Half;
3073 if (pDevice->advertising1000 &
3074 BCM540X_AN_AD_1000BASET_FULL) {
3076 ethcmd.advertising |=
3077 ADVERTISED_1000baseT_Full;
3082 ethcmd.autoneg = AUTONEG_DISABLE;
3083 ethcmd.advertising = 0;
3086 ethcmd.maxtxpkt = pDevice->TxMaxCoalescedFrames;
3087 ethcmd.maxrxpkt = pDevice->RxMaxCoalescedFrames;
3089 if(mm_copy_to_user(useraddr, ðcmd, sizeof(ethcmd)))
3093 case ETHTOOL_SSET: {
3094 unsigned long flags;
3096 if(!capable(CAP_NET_ADMIN))
3098 if (ethcmd.autoneg == AUTONEG_ENABLE) {
3099 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3100 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_UNKNOWN;
3101 pDevice->DisableAutoNeg = FALSE;
3104 if (ethcmd.speed == SPEED_1000 &&
3105 pDevice->PhyFlags & PHY_NO_GIGABIT)
3108 if (ethcmd.speed == SPEED_1000 &&
3109 (pDevice->TbiFlags & ENABLE_TBI_FLAG ||
3110 pDevice->PhyFlags & PHY_IS_FIBER ) ) {
3112 pDevice->RequestedLineSpeed =
3113 LM_LINE_SPEED_1000MBPS;
3115 pDevice->RequestedDuplexMode =
3116 LM_DUPLEX_MODE_FULL;
3118 else if (ethcmd.speed == SPEED_100 &&
3119 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3120 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3122 pDevice->RequestedLineSpeed =
3123 LM_LINE_SPEED_100MBPS;
3125 else if (ethcmd.speed == SPEED_10 &&
3126 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3127 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3129 pDevice->RequestedLineSpeed =
3130 LM_LINE_SPEED_10MBPS;
3136 pDevice->DisableAutoNeg = TRUE;
3137 if (ethcmd.duplex == DUPLEX_FULL) {
3138 pDevice->RequestedDuplexMode =
3139 LM_DUPLEX_MODE_FULL;
3142 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3143 !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
3145 pDevice->RequestedDuplexMode =
3146 LM_DUPLEX_MODE_HALF;
3150 if (netif_running(dev)) {
3151 BCM5700_PHY_LOCK(pUmDevice, flags);
3152 LM_SetupPhy(pDevice);
3153 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3159 case ETHTOOL_GWOL: {
3160 struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
3162 if (((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3163 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3164 (pDevice->Flags & DISABLE_D3HOT_FLAG)) {
3169 wol.supported = WAKE_MAGIC;
3170 if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET)
3172 wol.wolopts = WAKE_MAGIC;
3178 if (mm_copy_to_user(useraddr, &wol, sizeof(wol)))
3182 case ETHTOOL_SWOL: {
3183 struct ethtool_wolinfo wol;
3185 if(!capable(CAP_NET_ADMIN))
3187 if (mm_copy_from_user(&wol, useraddr, sizeof(wol)))
3189 if ((((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3190 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3191 (pDevice->Flags & DISABLE_D3HOT_FLAG)) &&
3196 if ((wol.wolopts & ~WAKE_MAGIC) != 0) {
3199 if (wol.wolopts & WAKE_MAGIC) {
3200 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
3201 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
3204 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
3205 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
3211 #ifdef ETHTOOL_GLINK
3212 case ETHTOOL_GLINK: {
3213 struct ethtool_value edata = {ETHTOOL_GLINK};
3215 /* workaround for DHCP using ifup script */
3216 /* ifup only waits for 5 seconds for link up */
3217 /* NIC may take more than 5 seconds to establish link */
3218 if ((pUmDevice->delayed_link_ind > 0) &&
3219 delay_link[pUmDevice->index])
3222 if (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) {
3228 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3233 #ifdef ETHTOOL_NWAY_RST
3234 case ETHTOOL_NWAY_RST: {
3236 unsigned long flags;
3238 if(!capable(CAP_NET_ADMIN))
3240 if (pDevice->DisableAutoNeg) {
3243 if (!netif_running(dev))
3245 BCM5700_PHY_LOCK(pUmDevice, flags);
3246 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
3247 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
3248 pDevice->DisableAutoNeg = TRUE;
3249 LM_SetupPhy(pDevice);
3251 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3252 pDevice->DisableAutoNeg = FALSE;
3253 LM_SetupPhy(pDevice);
3256 if ((T3_ASIC_REV(pDevice->ChipRevId) ==
3257 T3_ASIC_REV_5703) ||
3258 (T3_ASIC_REV(pDevice->ChipRevId) ==
3259 T3_ASIC_REV_5704) ||
3260 (T3_ASIC_REV(pDevice->ChipRevId) ==
3263 LM_ResetPhy(pDevice);
3264 LM_SetupPhy(pDevice);
3266 pDevice->PhyFlags &= ~PHY_FIBER_FALLBACK;
3267 LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl);
3268 LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl |
3269 PHY_CTRL_AUTO_NEG_ENABLE |
3270 PHY_CTRL_RESTART_AUTO_NEG);
3272 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3276 #ifdef ETHTOOL_GEEPROM
3277 case ETHTOOL_GEEPROM: {
3278 struct ethtool_eeprom eeprom;
3280 LM_UINT32 buf1[64/4];
3281 int i, j, offset, len;
3283 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3286 if (eeprom.offset >= pDevice->NvramSize)
3289 /* maximum data limited */
3290 /* to read more, call again with a different offset */
3291 if (eeprom.len > 0x800) {
3293 if (mm_copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
3297 if (eeprom.len > 64) {
3298 buf = kmalloc(eeprom.len, GFP_KERNEL);
3305 useraddr += offsetof(struct ethtool_eeprom, data);
3307 offset = eeprom.offset;
3310 offset &= 0xfffffffc;
3311 len += (offset & 3);
3313 len = (len + 3) & 0xfffffffc;
3314 for (i = 0, j = 0; j < len; i++, j += 4) {
3315 if (LM_NvramRead(pDevice, offset + j, buf + i) !=
3316 LM_STATUS_SUCCESS) {
3321 buf += (eeprom.offset & 3);
3322 i = mm_copy_to_user(useraddr, buf, eeprom.len);
3324 if (eeprom.len > 64) {
3331 case ETHTOOL_SEEPROM: {
3332 struct ethtool_eeprom eeprom;
3333 LM_UINT32 buf[64/4];
3336 if(!capable(CAP_NET_ADMIN))
3338 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3341 if ((eeprom.offset & 3) || (eeprom.len & 3) ||
3342 (eeprom.offset >= pDevice->NvramSize)) {
3346 if ((eeprom.offset + eeprom.len) >= pDevice->NvramSize) {
3347 eeprom.len = pDevice->NvramSize - eeprom.offset;
3350 useraddr += offsetof(struct ethtool_eeprom, data);
3353 offset = eeprom.offset;
3359 if (mm_copy_from_user(&buf, useraddr, i))
3362 bcm5700_intr_off(pUmDevice);
3363 /* Prevent race condition on Grc.Mode register */
3364 bcm5700_poll_wait(pUmDevice);
3366 if (LM_NvramWriteBlock(pDevice, offset, buf, i/4) !=
3367 LM_STATUS_SUCCESS) {
3368 bcm5700_intr_on(pUmDevice);
3371 bcm5700_intr_on(pUmDevice);
3379 #ifdef ETHTOOL_GREGS
3380 #if (LINUX_VERSION_CODE >= 0x02040f)
3381 case ETHTOOL_GREGS: {
3382 struct ethtool_regs eregs;
3383 LM_UINT32 *buf, *buf1;
3386 if(!capable(CAP_NET_ADMIN))
3388 if (pDevice->Flags & UNDI_FIX_FLAG)
3390 if (mm_copy_from_user(&eregs, useraddr, sizeof(eregs)))
3392 if (eregs.len > 0x6c00)
3394 eregs.version = 0x0;
3395 if (mm_copy_to_user(useraddr, &eregs, sizeof(eregs)))
3397 buf = buf1 = kmalloc(eregs.len, GFP_KERNEL);
3400 bcm5700_get_reg_blk(pUmDevice, &buf, 0, 0xb0, 0);
3401 bcm5700_get_reg_blk(pUmDevice, &buf, 0xb0, 0x200, 1);
3402 bcm5700_get_reg_blk(pUmDevice, &buf, 0x200, 0x8f0, 0);
3403 bcm5700_get_reg_blk(pUmDevice, &buf, 0x8f0, 0xc00, 1);
3404 bcm5700_get_reg_blk(pUmDevice, &buf, 0xc00, 0xce0, 0);
3405 bcm5700_get_reg_blk(pUmDevice, &buf, 0xce0, 0x1000, 1);
3406 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1000, 0x1004, 0);
3407 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1004, 0x1400, 1);
3408 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1400, 0x1480, 0);
3409 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1480, 0x1800, 1);
3410 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1800, 0x1848, 0);
3411 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1848, 0x1c00, 1);
3412 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c00, 0x1c04, 0);
3413 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c04, 0x2000, 1);
3414 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2000, 0x225c, 0);
3415 bcm5700_get_reg_blk(pUmDevice, &buf, 0x225c, 0x2400, 1);
3416 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2400, 0x24c4, 0);
3417 bcm5700_get_reg_blk(pUmDevice, &buf, 0x24c4, 0x2800, 1);
3418 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2800, 0x2804, 0);
3419 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2804, 0x2c00, 1);
3420 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c00, 0x2c20, 0);
3421 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c20, 0x3000, 1);
3422 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3000, 0x3014, 0);
3423 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3014, 0x3400, 1);
3424 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3400, 0x3408, 0);
3425 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3408, 0x3800, 1);
3426 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3800, 0x3808, 0);
3427 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3808, 0x3c00, 1);
3428 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3c00, 0x3d00, 0);
3429 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3d00, 0x4000, 1);
3430 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4000, 0x4010, 0);
3431 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4010, 0x4400, 1);
3432 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4400, 0x4458, 0);
3433 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4458, 0x4800, 1);
3434 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4800, 0x4808, 0);
3435 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4808, 0x4c00, 1);
3436 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c00, 0x4c08, 0);
3437 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c08, 0x5000, 1);
3438 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5000, 0x5050, 0);
3439 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5050, 0x5400, 1);
3440 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5400, 0x5450, 0);
3441 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5450, 0x5800, 1);
3442 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5800, 0x5a10, 0);
3443 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5a10, 0x6000, 1);
3444 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6000, 0x600c, 0);
3445 bcm5700_get_reg_blk(pUmDevice, &buf, 0x600c, 0x6400, 1);
3446 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6400, 0x6404, 0);
3447 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6404, 0x6800, 1);
3448 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6800, 0x6848, 0);
3449 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6848, 0x6c00, 1);
3451 i = mm_copy_to_user(useraddr + sizeof(eregs), buf1, eregs.len);
3459 #ifdef ETHTOOL_GPAUSEPARAM
3460 case ETHTOOL_GPAUSEPARAM: {
3461 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
3463 if (!pDevice->DisableAutoNeg) {
3464 epause.autoneg = (pDevice->FlowControlCap &
3465 LM_FLOW_CONTROL_AUTO_PAUSE) != 0;
3471 (pDevice->FlowControl &
3472 LM_FLOW_CONTROL_RECEIVE_PAUSE) != 0;
3474 (pDevice->FlowControl &
3475 LM_FLOW_CONTROL_TRANSMIT_PAUSE) != 0;
3476 if (mm_copy_to_user(useraddr, &epause, sizeof(epause)))
3481 case ETHTOOL_SPAUSEPARAM: {
3482 struct ethtool_pauseparam epause;
3483 unsigned long flags;
3485 if(!capable(CAP_NET_ADMIN))
3487 if (mm_copy_from_user(&epause, useraddr, sizeof(epause)))
3489 pDevice->FlowControlCap = 0;
3490 if (epause.autoneg && !pDevice->DisableAutoNeg) {
3491 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
3493 if (epause.rx_pause) {
3494 pDevice->FlowControlCap |=
3495 LM_FLOW_CONTROL_RECEIVE_PAUSE;
3497 if (epause.tx_pause) {
3498 pDevice->FlowControlCap |=
3499 LM_FLOW_CONTROL_TRANSMIT_PAUSE;
3501 if (netif_running(dev)) {
3502 BCM5700_PHY_LOCK(pUmDevice, flags);
3503 LM_SetupPhy(pDevice);
3504 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3510 #ifdef ETHTOOL_GRXCSUM
3511 case ETHTOOL_GRXCSUM: {
3512 struct ethtool_value edata = { ETHTOOL_GRXCSUM };
3515 (pDevice->TaskToOffload &
3516 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM) != 0;
3517 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3522 case ETHTOOL_SRXCSUM: {
3523 struct ethtool_value edata;
3525 if(!capable(CAP_NET_ADMIN))
3527 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3530 if (!(pDevice->TaskOffloadCap &
3531 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3535 pDevice->TaskToOffload |=
3536 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3537 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
3540 pDevice->TaskToOffload &=
3541 ~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3542 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM);
3546 case ETHTOOL_GTXCSUM: {
3547 struct ethtool_value edata = { ETHTOOL_GTXCSUM };
3550 (dev->features & get_csum_flag( pDevice->ChipRevId)) != 0;
3551 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3556 case ETHTOOL_STXCSUM: {
3557 struct ethtool_value edata;
3559 if(!capable(CAP_NET_ADMIN))
3561 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3564 if (!(pDevice->TaskOffloadCap &
3565 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3569 dev->features |= get_csum_flag( pDevice->ChipRevId);
3570 pDevice->TaskToOffload |=
3571 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3572 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
3575 dev->features &= ~get_csum_flag( pDevice->ChipRevId);
3576 pDevice->TaskToOffload &=
3577 ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3578 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM);
3583 struct ethtool_value edata = { ETHTOOL_GSG };
3586 (dev->features & NETIF_F_SG) != 0;
3587 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3592 struct ethtool_value edata;
3594 if(!capable(CAP_NET_ADMIN))
3596 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3599 dev->features |= NETIF_F_SG;
3602 dev->features &= ~NETIF_F_SG;
3607 #ifdef ETHTOOL_GRINGPARAM
3608 case ETHTOOL_GRINGPARAM: {
3609 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
3611 ering.rx_max_pending = T3_STD_RCV_RCB_ENTRY_COUNT - 1;
3612 ering.rx_pending = pDevice->RxStdDescCnt;
3613 ering.rx_mini_max_pending = 0;
3614 ering.rx_mini_pending = 0;
3615 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3616 ering.rx_jumbo_max_pending = T3_JUMBO_RCV_RCB_ENTRY_COUNT - 1;
3617 ering.rx_jumbo_pending = pDevice->RxJumboDescCnt;
3619 ering.rx_jumbo_max_pending = 0;
3620 ering.rx_jumbo_pending = 0;
3622 ering.tx_max_pending = MAX_TX_PACKET_DESC_COUNT - 1;
3623 ering.tx_pending = pDevice->TxPacketDescCnt;
3624 if (mm_copy_to_user(useraddr, &ering, sizeof(ering)))
3629 #ifdef ETHTOOL_PHYS_ID
3630 case ETHTOOL_PHYS_ID: {
3631 struct ethtool_value edata;
3633 if(!capable(CAP_NET_ADMIN))
3635 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3637 if (LM_BlinkLED(pDevice, edata.data) == LM_STATUS_SUCCESS)
3642 #ifdef ETHTOOL_GSTRINGS
3643 case ETHTOOL_GSTRINGS: {
3644 struct ethtool_gstrings egstr = { ETHTOOL_GSTRINGS };
3646 if (mm_copy_from_user(&egstr, useraddr, sizeof(egstr)))
3648 switch(egstr.string_set) {
3649 #ifdef ETHTOOL_GSTATS
3651 egstr.len = ETH_NUM_STATS;
3652 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3654 if (mm_copy_to_user(useraddr + sizeof(egstr),
3655 bcm5700_stats_str_arr,
3656 sizeof(bcm5700_stats_str_arr)))
3662 egstr.len = ETH_NUM_TESTS;
3663 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3665 if (mm_copy_to_user(useraddr + sizeof(egstr),
3666 bcm5700_tests_str_arr,
3667 sizeof(bcm5700_tests_str_arr)))
3676 #ifdef ETHTOOL_GSTATS
3677 case ETHTOOL_GSTATS: {
3678 struct ethtool_stats estats = { ETHTOOL_GSTATS };
3679 uint64_t stats[ETH_NUM_STATS];
3682 (uint64_t *) pDevice->pStatsBlkVirt;
3684 estats.n_stats = ETH_NUM_STATS;
3686 memset(stats, 0, sizeof(stats));
3690 for (i = 0; i < ETH_NUM_STATS; i++) {
3691 if (bcm5700_stats_offset_arr[i] != 0) {
3692 stats[i] = SWAP_DWORD_64(*(pStats +
3693 bcm5700_stats_offset_arr[i]));
3695 else if (i == RX_CRC_IDX) {
3697 bcm5700_crc_count(pUmDevice);
3699 else if (i == RX_MAC_ERR_IDX) {
3701 bcm5700_rx_err_count(pUmDevice);
3705 if (mm_copy_to_user(useraddr, &estats, sizeof(estats))) {
3708 if (mm_copy_to_user(useraddr + sizeof(estats), &stats,
3716 case ETHTOOL_TEST: {
3717 struct ethtool_test etest;
3718 uint64_t tests[ETH_NUM_TESTS] = {0, 0, 0, 0, 0, 0};
3719 LM_POWER_STATE old_power_level;
3721 printk( KERN_ALERT "Performing ethtool test.\n"
3722 "This test will take a few seconds to complete.\n" );
3724 if (mm_copy_from_user(&etest, useraddr, sizeof(etest)))
3727 etest.len = ETH_NUM_TESTS;
3728 old_power_level = pDevice->PowerLevel;
3729 if (old_power_level != LM_POWER_STATE_D0) {
3730 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
3731 LM_SwitchClocks(pDevice);
3733 MM_Sleep(pDevice, 1000);
3734 if (etest.flags & ETH_TEST_FL_OFFLINE) {
3735 b57_suspend_chip(pUmDevice);
3736 MM_Sleep(pDevice, 1000);
3737 LM_HaltCpu(pDevice,T3_RX_CPU_ID | T3_TX_CPU_ID);
3738 MM_Sleep(pDevice, 1000);
3739 if (b57_test_registers(pUmDevice) == 0) {
3740 etest.flags |= ETH_TEST_FL_FAILED;
3743 MM_Sleep(pDevice, 1000);
3744 if (b57_test_memory(pUmDevice) == 0) {
3745 etest.flags |= ETH_TEST_FL_FAILED;
3748 MM_Sleep(pDevice, 1000);
3749 if (b57_test_loopback(pUmDevice, NICE_LOOPBACK_TESTTYPE_MAC, 0) == 0) {
3750 etest.flags |= ETH_TEST_FL_FAILED;
3753 MM_Sleep(pDevice, 1000);
3754 b57_resume_chip(pUmDevice);
3755 /* wait for link to come up for the link test */
3756 MM_Sleep(pDevice, 4000);
3757 if ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
3758 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
3760 /* wait a little longer for linkup on copper */
3761 MM_Sleep(pDevice, 3000);
3764 if (b57_test_nvram(pUmDevice) == 0) {
3765 etest.flags |= ETH_TEST_FL_FAILED;
3768 MM_Sleep(pDevice, 1000);
3769 if (b57_test_intr(pUmDevice) == 0) {
3770 etest.flags |= ETH_TEST_FL_FAILED;
3773 MM_Sleep(pDevice, 1000);
3774 if (b57_test_link(pUmDevice) == 0) {
3775 etest.flags |= ETH_TEST_FL_FAILED;
3778 MM_Sleep(pDevice, 1000);
3779 if (old_power_level != LM_POWER_STATE_D0) {
3780 LM_SetPowerState(pDevice, old_power_level);
3782 if (mm_copy_to_user(useraddr, &etest, sizeof(etest))) {
3785 if (mm_copy_to_user(useraddr + sizeof(etest), tests,
3793 case ETHTOOL_GTSO: {
3794 struct ethtool_value edata = { ETHTOOL_GTSO };
3798 (dev->features & NETIF_F_TSO) != 0;
3802 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3808 case ETHTOOL_STSO: {
3810 struct ethtool_value edata;
3812 if (!capable(CAP_NET_ADMIN))
3815 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3818 if (!(pDevice->TaskToOffload &
3819 LM_TASK_OFFLOAD_TCP_SEGMENTATION)) {
3823 dev->features &= ~NETIF_F_TSO;
3826 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
3827 (dev->mtu > 1500)) {
3828 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
3831 dev->features |= NETIF_F_TSO;
3844 #endif /* #ifdef SIOCETHTOOL */
3846 #if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
3847 #include <linux/iobuf.h>
3850 /* Provide ioctl() calls to examine the MII xcvr state. */
3851 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3853 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
3854 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
3855 u16 *data = (u16 *)&rq->ifr_data;
3857 unsigned long flags;
3863 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
3864 data[0] = pDevice->PhyAddr;
3869 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
3870 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
3873 /* workaround for DHCP using ifup script */
3874 /* ifup only waits for 5 seconds for link up */
3875 /* NIC may take more than 5 seconds to establish link */
3876 if ((pUmDevice->delayed_link_ind > 0) &&
3877 delay_link[pUmDevice->index]) {
3881 BCM5700_PHY_LOCK(pUmDevice, flags);
3882 LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *) &value);
3883 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3884 data[3] = value & 0xffff;
3890 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
3891 if (!capable(CAP_NET_ADMIN))
3894 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
3897 BCM5700_PHY_LOCK(pUmDevice, flags);
3898 LM_WritePhy(pDevice, data[1] & 0x1f, data[2]);
3899 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3905 struct nice_req* nrq;
3907 if (!capable(CAP_NET_ADMIN))
3910 nrq = (struct nice_req*)&rq->ifr_ifru;
3911 if( nrq->cmd == NICE_CMD_QUERY_SUPPORT ) {
3912 nrq->nrq_magic = NICE_DEVICE_MAGIC;
3913 nrq->nrq_support_rx = 1;
3914 nrq->nrq_support_vlan = 1;
3915 nrq->nrq_support_get_speed = 1;
3916 #ifdef BCM_NAPI_RXPOLL
3917 nrq->nrq_support_rx_napi = 1;
3921 #ifdef BCM_NAPI_RXPOLL
3922 else if( nrq->cmd == NICE_CMD_SET_RX_NAPI )
3924 else if( nrq->cmd == NICE_CMD_SET_RX )
3927 pUmDevice->nice_rx = nrq->nrq_rx;
3928 pUmDevice->nice_ctx = nrq->nrq_ctx;
3929 bcm5700_set_vlan_mode(pUmDevice);
3932 #ifdef BCM_NAPI_RXPOLL
3933 else if( nrq->cmd == NICE_CMD_GET_RX_NAPI )
3935 else if( nrq->cmd == NICE_CMD_GET_RX )
3938 nrq->nrq_rx = pUmDevice->nice_rx;
3939 nrq->nrq_ctx = pUmDevice->nice_ctx;
3942 else if( nrq->cmd == NICE_CMD_GET_SPEED ) {
3943 if(pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE){
3946 else if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) {
3947 nrq->nrq_speed = SPEED_1000;
3948 } else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS) {
3949 nrq->nrq_speed = SPEED_100;
3950 } else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) {
3951 nrq->nrq_speed = SPEED_100;
3958 if (!pUmDevice->opened)
3962 case NICE_CMD_BLINK_LED:
3963 if (LM_BlinkLED(pDevice, nrq->nrq_blink_time) ==
3964 LM_STATUS_SUCCESS) {
3969 case NICE_CMD_DIAG_SUSPEND:
3970 b57_suspend_chip(pUmDevice);
3973 case NICE_CMD_DIAG_RESUME:
3974 b57_resume_chip(pUmDevice);
3977 case NICE_CMD_REG_READ:
3978 if (nrq->nrq_offset >= 0x10000) {
3979 nrq->nrq_data = LM_RegRdInd(pDevice,
3983 nrq->nrq_data = LM_RegRd(pDevice,
3988 case NICE_CMD_REG_WRITE:
3989 if (nrq->nrq_offset >= 0x10000) {
3990 LM_RegWrInd(pDevice, nrq->nrq_offset,
3994 LM_RegWr(pDevice, nrq->nrq_offset,
3995 nrq->nrq_data, FALSE);
3999 case NICE_CMD_REG_READ_DIRECT:
4000 case NICE_CMD_REG_WRITE_DIRECT:
4001 if ((nrq->nrq_offset >= 0x10000) ||
4002 (pDevice->Flags & UNDI_FIX_FLAG)) {
4006 if (nrq->cmd == NICE_CMD_REG_READ_DIRECT) {
4007 nrq->nrq_data = REG_RD_OFFSET(pDevice,
4011 REG_WR_OFFSET(pDevice, nrq->nrq_offset,
4016 case NICE_CMD_MEM_READ:
4017 nrq->nrq_data = LM_MemRdInd(pDevice,
4021 case NICE_CMD_MEM_WRITE:
4022 LM_MemWrInd(pDevice, nrq->nrq_offset,
4026 case NICE_CMD_CFG_READ32:
4027 pci_read_config_dword(pUmDevice->pdev,
4028 nrq->nrq_offset, (u32 *)&nrq->nrq_data);
4031 case NICE_CMD_CFG_READ16:
4032 pci_read_config_word(pUmDevice->pdev,
4033 nrq->nrq_offset, (u16 *)&nrq->nrq_data);
4036 case NICE_CMD_CFG_READ8:
4037 pci_read_config_byte(pUmDevice->pdev,
4038 nrq->nrq_offset, (u8 *)&nrq->nrq_data);
4041 case NICE_CMD_CFG_WRITE32:
4042 pci_write_config_dword(pUmDevice->pdev,
4043 nrq->nrq_offset, (u32)nrq->nrq_data);
4046 case NICE_CMD_CFG_WRITE16:
4047 pci_write_config_word(pUmDevice->pdev,
4048 nrq->nrq_offset, (u16)nrq->nrq_data);
4051 case NICE_CMD_CFG_WRITE8:
4052 pci_write_config_byte(pUmDevice->pdev,
4053 nrq->nrq_offset, (u8)nrq->nrq_data);
4056 case NICE_CMD_RESET:
4060 case NICE_CMD_ENABLE_MAC_LOOPBACK:
4061 if (pDevice->LoopBackMode != 0) {
4065 BCM5700_PHY_LOCK(pUmDevice, flags);
4066 LM_EnableMacLoopBack(pDevice);
4067 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4070 case NICE_CMD_DISABLE_MAC_LOOPBACK:
4071 if (pDevice->LoopBackMode !=
4072 LM_MAC_LOOP_BACK_MODE) {
4076 BCM5700_PHY_LOCK(pUmDevice, flags);
4077 LM_DisableMacLoopBack(pDevice);
4078 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4081 case NICE_CMD_ENABLE_PHY_LOOPBACK:
4082 if (pDevice->LoopBackMode != 0) {
4086 BCM5700_PHY_LOCK(pUmDevice, flags);
4087 LM_EnablePhyLoopBack(pDevice);
4088 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4091 case NICE_CMD_DISABLE_PHY_LOOPBACK:
4092 if (pDevice->LoopBackMode !=
4093 LM_PHY_LOOP_BACK_MODE) {
4097 BCM5700_PHY_LOCK(pUmDevice, flags);
4098 LM_DisablePhyLoopBack(pDevice);
4099 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4102 case NICE_CMD_ENABLE_EXT_LOOPBACK:
4103 if (pDevice->LoopBackMode != 0) {
4107 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
4108 if (nrq->nrq_speed != 1000)
4112 if ((nrq->nrq_speed != 1000) &&
4113 (nrq->nrq_speed != 100) &&
4114 (nrq->nrq_speed != 10)) {
4118 BCM5700_PHY_LOCK(pUmDevice, flags);
4119 LM_EnableExtLoopBack(pDevice, nrq->nrq_speed);
4120 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4123 case NICE_CMD_DISABLE_EXT_LOOPBACK:
4124 if (pDevice->LoopBackMode !=
4125 LM_EXT_LOOP_BACK_MODE) {
4129 BCM5700_PHY_LOCK(pUmDevice, flags);
4130 LM_DisableExtLoopBack(pDevice);
4131 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4134 case NICE_CMD_INTERRUPT_TEST:
4135 nrq->nrq_intr_test_result =
4136 b57_test_intr(pUmDevice);
4139 case NICE_CMD_LOOPBACK_TEST:
4141 switch (nrq->nrq_looptype) {
4142 case NICE_LOOPBACK_TESTTYPE_EXT:
4143 if ((nrq->nrq_loopspeed & ~NICE_LOOPBACK_TEST_SPEEDMASK) ||
4144 !(nrq->nrq_loopspeed & NICE_LOOPBACK_TEST_SPEEDMASK))
4146 switch (nrq->nrq_loopspeed) {
4147 case NICE_LOOPBACK_TEST_10MBPS:
4148 value = LM_LINE_SPEED_10MBPS;
4150 case NICE_LOOPBACK_TEST_100MBPS:
4151 value = LM_LINE_SPEED_100MBPS;
4153 case NICE_LOOPBACK_TEST_1000MBPS:
4154 value = LM_LINE_SPEED_1000MBPS;
4159 case NICE_LOOPBACK_TESTTYPE_MAC:
4160 case NICE_LOOPBACK_TESTTYPE_PHY:
4161 b57_suspend_chip(pUmDevice);
4162 value = b57_test_loopback(pUmDevice,
4163 nrq->nrq_looptype, value);
4164 b57_resume_chip(pUmDevice);
4169 /* A '1' indicates success */
4177 case NICE_CMD_KMALLOC_PHYS: {
4178 #if (LINUX_VERSION_CODE >= 0x020400)
4183 struct page *pg, *last_pg;
4185 for (i = 0; i < MAX_MEM2; i++) {
4186 if (pUmDevice->mem_size_list2[i] == 0)
4191 ptr = pci_alloc_consistent(pUmDevice->pdev,
4192 nrq->nrq_size, &mapping);
4196 pUmDevice->mem_size_list2[i] = nrq->nrq_size;
4197 pUmDevice->mem_list2[i] = ptr;
4198 pUmDevice->dma_list2[i] = mapping;
4200 /* put pci mapping at the beginning of buffer */
4201 *((__u64 *) ptr) = (__u64) mapping;
4203 /* Probably won't work on some architectures */
4204 /* get CPU mapping */
4205 cpu_pa = (__u64) virt_to_phys(ptr);
4206 pUmDevice->cpu_pa_list2[i] = cpu_pa;
4207 nrq->nrq_phys_addr_lo = (__u32) cpu_pa;
4208 nrq->nrq_phys_addr_hi = (__u32) (cpu_pa >> 32);
4210 pg = virt_to_page(ptr);
4211 last_pg = virt_to_page(ptr + nrq->nrq_size - 1);
4213 #if (LINUX_VERSION_CODE > 0x020500)
4214 SetPageReserved(pg);
4216 mem_map_reserve(pg);
4227 case NICE_CMD_KFREE_PHYS: {
4231 cpu_pa = (__u64) nrq->nrq_phys_addr_lo +
4232 ((__u64) nrq->nrq_phys_addr_hi << 32);
4233 for (i = 0; i < MAX_MEM2; i++) {
4234 if (pUmDevice->cpu_pa_list2[i] ==
4243 bcm5700_freemem2(pUmDevice, i);
4247 case NICE_CMD_SET_WRITE_PROTECT:
4248 if (nrq->nrq_write_protect)
4249 pDevice->Flags |= EEPROM_WP_FLAG;
4251 pDevice->Flags &= ~EEPROM_WP_FLAG;
4253 case NICE_CMD_GET_STATS_BLOCK: {
4254 PT3_STATS_BLOCK pStats =
4255 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4256 if (mm_copy_to_user(nrq->nrq_stats_useraddr,
4257 pStats, nrq->nrq_stats_size)) {
4262 case NICE_CMD_CLR_STATS_BLOCK: {
4264 PT3_STATS_BLOCK pStats =
4265 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4267 memset(pStats, 0, sizeof(T3_STATS_BLOCK));
4268 if (T3_ASIC_REV(pDevice->ChipRevId) ==
4272 for(j = 0x0300; j < 0x0b00; j = j + 4) {
4273 MEM_WR_OFFSET(pDevice, j, 0);
4283 #endif /* NICE_SUPPORT */
4286 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
4294 STATIC void bcm5700_do_rx_mode(struct net_device *dev)
4296 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4297 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4299 struct dev_mc_list *mclist;
4301 LM_MulticastClear(pDevice);
4302 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4303 i++, mclist = mclist->next) {
4304 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4306 if (dev->flags & IFF_ALLMULTI) {
4307 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4308 LM_SetReceiveMask(pDevice,
4309 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4312 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4313 LM_SetReceiveMask(pDevice,
4314 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4316 if (dev->flags & IFF_PROMISC) {
4317 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4318 LM_SetReceiveMask(pDevice,
4319 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4322 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4323 LM_SetReceiveMask(pDevice,
4324 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4329 STATIC void bcm5700_set_rx_mode(struct net_device *dev)
4331 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4332 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4334 struct dev_mc_list *mclist;
4335 unsigned long flags;
4337 BCM5700_PHY_LOCK(pUmDevice, flags);
4339 LM_MulticastClear(pDevice);
4340 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4341 i++, mclist = mclist->next) {
4342 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4344 if (dev->flags & IFF_ALLMULTI) {
4345 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4346 LM_SetReceiveMask(pDevice,
4347 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4350 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4351 LM_SetReceiveMask(pDevice,
4352 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4354 if (dev->flags & IFF_PROMISC) {
4355 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4356 LM_SetReceiveMask(pDevice,
4357 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4360 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4361 LM_SetReceiveMask(pDevice,
4362 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4365 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4369 * Set the hardware MAC address.
4371 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p)
4373 struct sockaddr *addr=p;
4374 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) dev->priv;
4375 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4377 if(is_valid_ether_addr(addr->sa_data)){
4379 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
4380 if (pUmDevice->opened)
4381 LM_SetMacAddress(pDevice, dev->dev_addr);
4387 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4388 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu)
4390 int pkt_size = new_mtu + ETHERNET_PACKET_HEADER_SIZE;
4391 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4392 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4393 unsigned long flags;
4396 if ((pkt_size < MIN_ETHERNET_PACKET_SIZE_NO_CRC) ||
4397 (pkt_size > MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC)) {
4401 if ( !(pDevice->Flags & JUMBO_CAPABLE_FLAG) &&
4402 (pkt_size > MAX_ETHERNET_PACKET_SIZE_NO_CRC) ) {
4406 if (pUmDevice->suspended)
4409 if (pUmDevice->opened && (new_mtu != dev->mtu) &&
4410 (pDevice->Flags & JUMBO_CAPABLE_FLAG)) {
4414 BCM5700_PHY_LOCK(pUmDevice, flags);
4416 /* Fix for RQM 289636 */
4417 /* netif_stop_queue(dev); */
4418 bcm5700_netif_stop_queue(dev);
4419 bcm5700_shutdown(pUmDevice);
4420 bcm5700_freemem(dev);
4424 if (pkt_size < MAX_ETHERNET_PACKET_SIZE_NO_CRC) {
4425 pDevice->RxMtu = pDevice->TxMtu =
4426 MAX_ETHERNET_PACKET_SIZE_NO_CRC;
4429 pDevice->RxMtu = pDevice->TxMtu = pkt_size;
4432 if (dev->mtu <= 1514) {
4433 pDevice->RxJumboDescCnt = 0;
4435 else if (pDevice->Flags & JUMBO_CAPABLE_FLAG){
4436 pDevice->RxJumboDescCnt =
4437 rx_jumbo_desc_cnt[pUmDevice->index];
4439 pDevice->RxPacketDescCnt = pDevice->RxJumboDescCnt +
4440 pDevice->RxStdDescCnt;
4442 pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ +
4443 COMMON_CACHE_LINE_SIZE-1) & ~COMMON_CACHE_LINE_MASK;
4446 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4447 (dev->mtu > 1514) ) {
4448 if (dev->features & NETIF_F_TSO) {
4449 dev->features &= ~NETIF_F_TSO;
4450 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4456 LM_InitializeAdapter(pDevice);
4457 bcm5700_do_rx_mode(dev);
4458 bcm5700_set_vlan_mode(pUmDevice);
4459 bcm5700_init_counters(pUmDevice);
4460 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
4461 LM_SetMacAddress(pDevice, dev->dev_addr);
4463 netif_start_queue(dev);
4464 bcm5700_intr_on(pUmDevice);
4466 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4473 #if (LINUX_VERSION_CODE < 0x020300)
4475 bcm5700_probe(struct net_device *dev)
4477 int cards_found = 0;
4478 struct pci_dev *pdev = NULL;
4479 struct pci_device_id *pci_tbl;
4482 if ( ! pci_present())
4485 pci_tbl = bcm5700_pci_tbl;
4486 while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
4489 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &ssvid);
4490 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &ssid);
4491 for (idx = 0; pci_tbl[idx].vendor; idx++) {
4492 if ((pci_tbl[idx].vendor == PCI_ANY_ID ||
4493 pci_tbl[idx].vendor == pdev->vendor) &&
4494 (pci_tbl[idx].device == PCI_ANY_ID ||
4495 pci_tbl[idx].device == pdev->device) &&
4496 (pci_tbl[idx].subvendor == PCI_ANY_ID ||
4497 pci_tbl[idx].subvendor == ssvid) &&
4498 (pci_tbl[idx].subdevice == PCI_ANY_ID ||
4499 pci_tbl[idx].subdevice == ssid))
4505 if (pci_tbl[idx].vendor == 0)
4509 if (bcm5700_init_one(pdev, &pci_tbl[idx]) == 0)
4513 return cards_found ? 0 : -ENODEV;
4517 int init_module(void)
4519 return bcm5700_probe(NULL);
4522 void cleanup_module(void)
4524 struct net_device *next_dev;
4525 PUM_DEVICE_BLOCK pUmDevice;
4528 bcm5700_proc_remove_notifier();
4530 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
4531 while (root_tigon3_dev) {
4532 pUmDevice = (PUM_DEVICE_BLOCK)root_tigon3_dev->priv;
4534 bcm5700_proc_remove_dev(root_tigon3_dev);
4536 next_dev = pUmDevice->next_module;
4537 unregister_netdev(root_tigon3_dev);
4538 if (pUmDevice->lm_dev.pMappedMemBase)
4539 iounmap(pUmDevice->lm_dev.pMappedMemBase);
4540 #if (LINUX_VERSION_CODE < 0x020600)
4541 kfree(root_tigon3_dev);
4543 free_netdev(root_tigon3_dev);
4545 root_tigon3_dev = next_dev;
4548 unregister_ioctl32_conversion(SIOCNICE);
4553 #else /* LINUX_VERSION_CODE < 0x020300 */
4556 #if (LINUX_VERSION_CODE >= 0x2060b)
4557 static int bcm5700_suspend(struct pci_dev *pdev, pm_message_t state)
4559 #if (LINUX_VERSION_CODE >= 0x020406)
4560 static int bcm5700_suspend (struct pci_dev *pdev, u32 state)
4562 static void bcm5700_suspend (struct pci_dev *pdev)
4566 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4567 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4568 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4570 if (!netif_running(dev))
4571 #if (LINUX_VERSION_CODE >= 0x020406)
4577 netif_device_detach (dev);
4578 bcm5700_shutdown(pUmDevice);
4580 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
4582 /* pci_power_off(pdev, -1);*/
4583 #if (LINUX_VERSION_CODE >= 0x020406)
4589 #if (LINUX_VERSION_CODE >= 0x020406)
4590 static int bcm5700_resume(struct pci_dev *pdev)
4592 static void bcm5700_resume(struct pci_dev *pdev)
4595 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4596 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4597 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4599 if (!netif_running(dev))
4600 #if (LINUX_VERSION_CODE >= 0x020406)
4605 /* pci_power_on(pdev);*/
4606 netif_device_attach(dev);
4607 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
4608 MM_InitializeUmPackets(pDevice);
4610 #if (LINUX_VERSION_CODE >= 0x020406)
4616 static struct pci_driver bcm5700_pci_driver = {
4617 name: bcm5700_driver,
4618 id_table: bcm5700_pci_tbl,
4619 probe: bcm5700_init_one,
4620 remove: __devexit_p(bcm5700_remove_one),
4621 suspend: bcm5700_suspend,
4622 resume: bcm5700_resume,
4626 static int __init bcm5700_init_module (void)
4628 return pci_module_init(&bcm5700_pci_driver);
4632 static void __exit bcm5700_cleanup_module (void)
4635 bcm5700_proc_remove_notifier();
4637 pci_unregister_driver(&bcm5700_pci_driver);
4641 module_init(bcm5700_init_module);
4642 module_exit(bcm5700_cleanup_module);
4651 #ifdef BCM_NAPI_RXPOLL
4653 MM_ScheduleRxPoll(LM_DEVICE_BLOCK *pDevice)
4655 struct net_device *dev = ((UM_DEVICE_BLOCK *) pDevice)->dev;
4657 if (netif_rx_schedule_prep(dev)) {
4658 __netif_rx_schedule(dev);
4659 return LM_STATUS_SUCCESS;
4661 return LM_STATUS_FAILURE;
4666 MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4667 LM_UINT16 *pValue16)
4669 UM_DEVICE_BLOCK *pUmDevice;
4671 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4672 pci_read_config_word(pUmDevice->pdev, Offset, (u16 *) pValue16);
4673 return LM_STATUS_SUCCESS;
4677 MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4678 LM_UINT32 *pValue32)
4680 UM_DEVICE_BLOCK *pUmDevice;
4682 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4683 pci_read_config_dword(pUmDevice->pdev, Offset, (u32 *) pValue32);
4684 return LM_STATUS_SUCCESS;
4688 MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4691 UM_DEVICE_BLOCK *pUmDevice;
4693 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4694 pci_write_config_word(pUmDevice->pdev, Offset, Value16);
4695 return LM_STATUS_SUCCESS;
4699 MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4702 UM_DEVICE_BLOCK *pUmDevice;
4704 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4705 pci_write_config_dword(pUmDevice->pdev, Offset, Value32);
4706 return LM_STATUS_SUCCESS;
4710 MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4711 PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
4715 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4718 pvirt = pci_alloc_consistent(pUmDevice->pdev, BlockSize,
4721 return LM_STATUS_FAILURE;
4723 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4724 pUmDevice->dma_list[pUmDevice->mem_list_num] = mapping;
4725 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = BlockSize;
4726 memset(pvirt, 0, BlockSize);
4727 *pMemoryBlockVirt = (PLM_VOID) pvirt;
4728 MM_SetAddr(pMemoryBlockPhy, mapping);
4729 return LM_STATUS_SUCCESS;
4733 MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4734 PLM_VOID *pMemoryBlockVirt)
4737 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4740 /* Maximum in slab.c */
4741 if (BlockSize > 131072) {
4742 goto MM_Alloc_error;
4745 pvirt = kmalloc(BlockSize,GFP_ATOMIC);
4747 goto MM_Alloc_error;
4749 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4750 pUmDevice->dma_list[pUmDevice->mem_list_num] = 0;
4751 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = 0;
4752 /* mem_size_list[i] == 0 indicates that the memory should be freed */
4754 memset(pvirt, 0, BlockSize);
4755 *pMemoryBlockVirt = pvirt;
4756 return LM_STATUS_SUCCESS;
4759 printk(KERN_WARNING "%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice->dev->name);
4760 return LM_STATUS_FAILURE;
4764 MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
4766 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4768 pDevice->pMappedMemBase = ioremap_nocache(
4769 pci_resource_start(pUmDevice->pdev, 0), sizeof(T3_STD_MEM_MAP));
4770 if (pDevice->pMappedMemBase == 0)
4771 return LM_STATUS_FAILURE;
4773 return LM_STATUS_SUCCESS;
4777 MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
4780 struct sk_buff *skb;
4781 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4782 PUM_PACKET pUmPacket;
4785 for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
4786 pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
4787 pUmPacket = (PUM_PACKET) pPacket;
4789 printk(KERN_DEBUG "Bad RxPacketFreeQ\n");
4791 if (pUmPacket->skbuff == 0) {
4792 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2);
4794 pUmPacket->skbuff = 0;
4796 &pUmDevice->rx_out_of_buf_q.Container,
4800 pUmPacket->skbuff = skb;
4801 skb->dev = pUmDevice->dev;
4802 skb_reserve(skb, pUmDevice->rx_buf_align);
4804 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
4806 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
4807 /* reallocate buffers in the ISR */
4808 pUmDevice->rx_buf_repl_thresh = 0;
4809 pUmDevice->rx_buf_repl_panic_thresh = 0;
4810 pUmDevice->rx_buf_repl_isr_limit = 0;
4813 pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 8;
4814 pUmDevice->rx_buf_repl_panic_thresh =
4815 pDevice->RxPacketDescCnt * 7 / 8;
4817 /* This limits the time spent in the ISR when the receiver */
4818 /* is in a steady state of being overrun. */
4819 pUmDevice->rx_buf_repl_isr_limit = pDevice->RxPacketDescCnt / 8;
4821 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4822 if (pDevice->RxJumboDescCnt != 0) {
4823 if (pUmDevice->rx_buf_repl_thresh >=
4824 pDevice->RxJumboDescCnt) {
4826 pUmDevice->rx_buf_repl_thresh =
4827 pUmDevice->rx_buf_repl_panic_thresh =
4828 pDevice->RxJumboDescCnt - 1;
4830 if (pUmDevice->rx_buf_repl_thresh >=
4831 pDevice->RxStdDescCnt) {
4833 pUmDevice->rx_buf_repl_thresh =
4834 pUmDevice->rx_buf_repl_panic_thresh =
4835 pDevice->RxStdDescCnt - 1;
4840 return LM_STATUS_SUCCESS;
4844 MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
4846 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4847 int index = pUmDevice->index;
4848 struct net_device *dev = pUmDevice->dev;
4850 if (index >= MAX_UNITS)
4851 return LM_STATUS_SUCCESS;
4853 #if LINUX_KERNEL_VERSION < 0x0020609
4855 bcm5700_validate_param_range(pUmDevice, &auto_speed[index], "auto_speed",
4857 if (auto_speed[index] == 0)
4858 pDevice->DisableAutoNeg = TRUE;
4860 pDevice->DisableAutoNeg = FALSE;
4862 if (line_speed[index] == 0) {
4863 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4864 pDevice->DisableAutoNeg = FALSE;
4867 bcm5700_validate_param_range(pUmDevice, &full_duplex[index],
4868 "full_duplex", 0, 1, 1);
4869 if (full_duplex[index]) {
4870 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
4873 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_HALF;
4876 if (line_speed[index] == 1000) {
4877 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
4878 if (pDevice->PhyFlags & PHY_NO_GIGABIT) {
4879 pDevice->RequestedLineSpeed =
4880 LM_LINE_SPEED_100MBPS;
4881 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver, index);
4884 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
4885 !full_duplex[index]) {
4886 printk(KERN_WARNING "%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver, index);
4887 pDevice->RequestedDuplexMode =
4888 LM_DUPLEX_MODE_FULL;
4891 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
4892 !auto_speed[index] && !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
4893 printk(KERN_WARNING "%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver, index);
4894 pDevice->DisableAutoNeg = FALSE;
4898 else if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
4899 (pDevice->PhyFlags & PHY_IS_FIBER)){
4900 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4901 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
4902 pDevice->DisableAutoNeg = FALSE;
4903 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver, index, line_speed[index]);
4905 else if (line_speed[index] == 100) {
4907 pDevice->RequestedLineSpeed = LM_LINE_SPEED_100MBPS;
4909 else if (line_speed[index] == 10) {
4911 pDevice->RequestedLineSpeed = LM_LINE_SPEED_10MBPS;
4914 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4915 pDevice->DisableAutoNeg = FALSE;
4916 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver, index, line_speed[index]);
4921 #endif /* LINUX_KERNEL_VERSION */
4923 /* This is an unmanageable switch nic and will have link problems if
4926 if(pDevice->SubsystemVendorId==0x103c && pDevice->SubsystemId==0x3226)
4928 if(pDevice->RequestedLineSpeed != LM_LINE_SPEED_AUTO)
4930 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n",
4931 bcm5700_driver, index, line_speed[index]);
4933 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
4934 pDevice->DisableAutoNeg = FALSE;
4937 #if LINUX_KERNEL_VERSION < 0x0020609
4939 pDevice->FlowControlCap = 0;
4940 bcm5700_validate_param_range(pUmDevice, &rx_flow_control[index],
4941 "rx_flow_control", 0, 1, 0);
4942 if (rx_flow_control[index] != 0) {
4943 pDevice->FlowControlCap |= LM_FLOW_CONTROL_RECEIVE_PAUSE;
4945 bcm5700_validate_param_range(pUmDevice, &tx_flow_control[index],
4946 "tx_flow_control", 0, 1, 0);
4947 if (tx_flow_control[index] != 0) {
4948 pDevice->FlowControlCap |= LM_FLOW_CONTROL_TRANSMIT_PAUSE;
4950 bcm5700_validate_param_range(pUmDevice, &auto_flow_control[index],
4951 "auto_flow_control", 0, 1, 0);
4952 if (auto_flow_control[index] != 0) {
4953 if (pDevice->DisableAutoNeg == FALSE) {
4955 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
4956 if ((tx_flow_control[index] == 0) &&
4957 (rx_flow_control[index] == 0)) {
4959 pDevice->FlowControlCap |=
4960 LM_FLOW_CONTROL_TRANSMIT_PAUSE |
4961 LM_FLOW_CONTROL_RECEIVE_PAUSE;
4966 if (dev->mtu > 1500) {
4968 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4969 (dev->features & NETIF_F_TSO)) {
4970 dev->features &= ~NETIF_F_TSO;
4971 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4974 pDevice->RxMtu = dev->mtu + 14;
4977 if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) &&
4978 !(pDevice->Flags & BCM5788_FLAG)) {
4979 pDevice->Flags |= USE_TAGGED_STATUS_FLAG;
4980 pUmDevice->timer_interval = HZ;
4981 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) &&
4982 (pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
4983 pUmDevice->timer_interval = HZ/4;
4987 pUmDevice->timer_interval = HZ/10;
4990 bcm5700_validate_param_range(pUmDevice, &tx_pkt_desc_cnt[index],
4991 "tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT-1, TX_DESC_CNT);
4992 pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[index];
4993 bcm5700_validate_param_range(pUmDevice, &rx_std_desc_cnt[index],
4994 "rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT-1,
4996 pDevice->RxStdDescCnt = rx_std_desc_cnt[index];
4998 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4999 bcm5700_validate_param_range(pUmDevice, &rx_jumbo_desc_cnt[index],
5000 "rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT-1,
5003 if (mtu[index] <= 1514)
5004 pDevice->RxJumboDescCnt = 0;
5005 else if(!T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
5006 pDevice->RxJumboDescCnt = rx_jumbo_desc_cnt[index];
5011 bcm5700_validate_param_range(pUmDevice, &adaptive_coalesce[index],
5012 "adaptive_coalesce", 0, 1, 1);
5013 #ifdef BCM_NAPI_RXPOLL
5014 if (adaptive_coalesce[index]) {
5015 printk(KERN_WARNING "%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver, index);
5016 adaptive_coalesce[index] = 0;
5020 pUmDevice->adaptive_coalesce = adaptive_coalesce[index];
5021 if (!pUmDevice->adaptive_coalesce) {
5022 bcm5700_validate_param_range(pUmDevice,
5023 &rx_coalesce_ticks[index], "rx_coalesce_ticks", 0,
5024 MAX_RX_COALESCING_TICKS, RX_COAL_TK);
5025 if ((rx_coalesce_ticks[index] == 0) &&
5026 (rx_max_coalesce_frames[index] == 0)) {
5028 printk(KERN_WARNING "%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5029 bcm5700_driver, index, RX_COAL_TK, RX_COAL_FM);
5031 rx_coalesce_ticks[index] = RX_COAL_TK;
5032 rx_max_coalesce_frames[index] = RX_COAL_FM;
5034 pDevice->RxCoalescingTicks = pUmDevice->rx_curr_coalesce_ticks =
5035 rx_coalesce_ticks[index];
5036 #ifdef BCM_NAPI_RXPOLL
5037 pDevice->RxCoalescingTicksDuringInt = rx_coalesce_ticks[index];
5040 bcm5700_validate_param_range(pUmDevice,
5041 &rx_max_coalesce_frames[index],
5042 "rx_max_coalesce_frames", 0,
5043 MAX_RX_MAX_COALESCED_FRAMES, RX_COAL_FM);
5045 pDevice->RxMaxCoalescedFrames =
5046 pUmDevice->rx_curr_coalesce_frames =
5047 rx_max_coalesce_frames[index];
5048 #ifdef BCM_NAPI_RXPOLL
5049 pDevice->RxMaxCoalescedFramesDuringInt =
5050 rx_max_coalesce_frames[index];
5053 bcm5700_validate_param_range(pUmDevice,
5054 &tx_coalesce_ticks[index], "tx_coalesce_ticks", 0,
5055 MAX_TX_COALESCING_TICKS, TX_COAL_TK);
5056 if ((tx_coalesce_ticks[index] == 0) &&
5057 (tx_max_coalesce_frames[index] == 0)) {
5059 printk(KERN_WARNING "%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5060 bcm5700_driver, index, TX_COAL_TK, TX_COAL_FM);
5062 tx_coalesce_ticks[index] = TX_COAL_TK;
5063 tx_max_coalesce_frames[index] = TX_COAL_FM;
5065 pDevice->TxCoalescingTicks = tx_coalesce_ticks[index];
5066 bcm5700_validate_param_range(pUmDevice,
5067 &tx_max_coalesce_frames[index],
5068 "tx_max_coalesce_frames", 0,
5069 MAX_TX_MAX_COALESCED_FRAMES, TX_COAL_FM);
5070 pDevice->TxMaxCoalescedFrames = tx_max_coalesce_frames[index];
5071 pUmDevice->tx_curr_coalesce_frames =
5072 pDevice->TxMaxCoalescedFrames;
5074 bcm5700_validate_param_range(pUmDevice,
5075 &stats_coalesce_ticks[index], "stats_coalesce_ticks",
5076 0, MAX_STATS_COALESCING_TICKS, ST_COAL_TK);
5077 if (adaptive_coalesce[index]) {
5078 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver, index);
5080 if ((stats_coalesce_ticks[index] > 0) &&
5081 (stats_coalesce_ticks[index] < 100)) {
5082 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver, index, (unsigned int) stats_coalesce_ticks[index]);
5083 stats_coalesce_ticks[index] = 100;
5084 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5085 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5090 pUmDevice->rx_curr_coalesce_frames = RX_COAL_FM;
5091 pUmDevice->rx_curr_coalesce_ticks = RX_COAL_TK;
5092 pUmDevice->tx_curr_coalesce_frames = TX_COAL_FM;
5096 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
5097 unsigned int tmpvar;
5099 tmpvar = pDevice->StatsCoalescingTicks / BCM_TIMER_GRANULARITY;
5102 * If the result is zero, the request is too demanding.
5108 pDevice->StatsCoalescingTicks = tmpvar * BCM_TIMER_GRANULARITY;
5110 pUmDevice->statstimer_interval = tmpvar;
5114 bcm5700_validate_param_range(pUmDevice, &enable_wol[index],
5115 "enable_wol", 0, 1, 0);
5116 if (enable_wol[index]) {
5117 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
5118 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
5121 #ifdef INCLUDE_TBI_SUPPORT
5122 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
5123 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) ||
5124 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) {
5125 /* just poll since we have hardware autoneg. in 5704 */
5126 pDevice->TbiFlags |= TBI_PURE_POLLING_FLAG;
5129 pDevice->TbiFlags |= TBI_POLLING_INTR_FLAG;
5133 bcm5700_validate_param_range(pUmDevice, &scatter_gather[index],
5134 "scatter_gather", 0, 1, 1);
5135 bcm5700_validate_param_range(pUmDevice, &tx_checksum[index],
5136 "tx_checksum", 0, 1, 1);
5137 bcm5700_validate_param_range(pUmDevice, &rx_checksum[index],
5138 "rx_checksum", 0, 1, 1);
5139 if (!(pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
5140 if (tx_checksum[index] || rx_checksum[index]) {
5142 pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
5143 printk(KERN_WARNING "%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver, index);
5147 if (rx_checksum[index]) {
5148 pDevice->TaskToOffload |=
5149 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
5150 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
5152 if (tx_checksum[index]) {
5153 pDevice->TaskToOffload |=
5154 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
5155 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
5156 pDevice->Flags |= NO_TX_PSEUDO_HDR_CSUM_FLAG;
5160 bcm5700_validate_param_range(pUmDevice, &enable_tso[index],
5161 "enable_tso", 0, 1, 1);
5163 /* Always enable TSO firmware if supported */
5164 /* This way we can turn it on or off on the fly */
5165 if (pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TCP_SEGMENTATION)
5167 pDevice->TaskToOffload |=
5168 LM_TASK_OFFLOAD_TCP_SEGMENTATION;
5170 if (enable_tso[index] &&
5171 !(pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION))
5173 printk(KERN_WARNING "%s-%d: TSO not available on this NIC\n", bcm5700_driver, index);
5177 bcm5700_validate_param_range(pUmDevice, &vlan_tag_mode[index],
5178 "vlan_strip_mode", 0, 2, 0);
5179 pUmDevice->vlan_tag_mode = vlan_tag_mode[index];
5181 pUmDevice->vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
5184 #endif /* LINUX_KERNEL_VERSION */
5186 #ifdef BCM_NIC_SEND_BD
5187 bcm5700_validate_param_range(pUmDevice, &nic_tx_bd[index], "nic_tx_bd",
5189 if (nic_tx_bd[index])
5190 pDevice->Flags |= NIC_SEND_BD_FLAG;
5191 if ((pDevice->Flags & ENABLE_PCIX_FIX_FLAG) ||
5192 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5705)) {
5193 if (pDevice->Flags & NIC_SEND_BD_FLAG) {
5194 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
5195 printk(KERN_WARNING "%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver, index);
5199 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5200 bcm5700_validate_param_range(pUmDevice, &disable_msi[pUmDevice->index],
5201 "disable_msi", 0, 1, 0);
5204 bcm5700_validate_param_range(pUmDevice, &delay_link[index],
5205 "delay_link", 0, 1, 0);
5207 bcm5700_validate_param_range(pUmDevice, &disable_d3hot[index],
5208 "disable_d3hot", 0, 1, 0);
5209 if (disable_d3hot[index]) {
5212 if (enable_wol[index]) {
5213 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
5214 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
5215 printk(KERN_WARNING "%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver, index);
5218 pDevice->Flags |= DISABLE_D3HOT_FLAG;
5221 return LM_STATUS_SUCCESS;
5225 MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
5227 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5229 PUM_PACKET pUmPacket;
5230 struct sk_buff *skb;
5232 int vlan_tag_size = 0;
5234 if (pDevice->ReceiveMask & LM_KEEP_VLAN_TAG)
5238 pPacket = (PLM_PACKET)
5239 QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
5242 pUmPacket = (PUM_PACKET) pPacket;
5243 #if ! defined(NO_PCI_UNMAP)
5244 pci_unmap_single(pUmDevice->pdev,
5245 pci_unmap_addr(pUmPacket, map[0]),
5246 pPacket->u.Rx.RxBufferSize,
5247 PCI_DMA_FROMDEVICE);
5249 if ((pPacket->PacketStatus != LM_STATUS_SUCCESS) ||
5250 ((size = pPacket->PacketSize) >
5251 (pDevice->RxMtu + vlan_tag_size))) {
5255 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5257 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5259 pUmDevice->rx_misc_errors++;
5262 skb = pUmPacket->skbuff;
5265 skb->protocol = eth_type_trans(skb, skb->dev);
5266 if (size > pDevice->RxMtu) {
5267 /* Make sure we have a valid VLAN tag */
5268 if (htons(skb->protocol) != 0x8100) {
5269 dev_kfree_skb_irq(skb);
5270 pUmDevice->rx_misc_errors++;
5274 if ((pPacket->Flags & RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD) &&
5275 (pDevice->TaskToOffload &
5276 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM)) {
5277 if (pPacket->u.Rx.TcpUdpChecksum == 0xffff) {
5279 skb->ip_summed = CHECKSUM_UNNECESSARY;
5281 pUmDevice->rx_good_chksum_count++;
5285 skb->ip_summed = CHECKSUM_NONE;
5286 pUmDevice->rx_bad_chksum_count++;
5290 skb->ip_summed = CHECKSUM_NONE;
5293 if( pUmDevice->nice_rx ) {
5294 vlan_tag_t *vlan_tag;
5296 vlan_tag = (vlan_tag_t *) &skb->cb[0];
5297 if (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG) {
5298 vlan_tag->signature = 0x7777;
5299 vlan_tag->tag = pPacket->VlanTag;
5302 vlan_tag->signature = 0;
5304 pUmDevice->nice_rx(skb, pUmDevice->nice_ctx);
5310 if (pUmDevice->vlgrp &&
5311 (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG)) {
5313 #ifdef BCM_NAPI_RXPOLL
5314 vlan_hwaccel_receive_skb(skb, pUmDevice->vlgrp,
5317 vlan_hwaccel_rx(skb, pUmDevice->vlgrp,
5324 #ifdef BCM_NAPI_RXPOLL
5325 netif_receive_skb(skb);
5331 pUmDevice->dev->last_rx = jiffies;
5335 pUmPacket->skbuff = 0;
5336 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5338 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2);
5340 pUmPacket->skbuff = 0;
5341 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5344 pUmPacket->skbuff = skb;
5345 skb->dev = pUmDevice->dev;
5346 skb_reserve(skb, pUmDevice->rx_buf_align);
5347 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5351 return LM_STATUS_SUCCESS;
5355 MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5357 PUM_PACKET pUmPacket = (PUM_PACKET) pPacket;
5358 struct sk_buff *skb = pUmPacket->skbuff;
5359 struct sk_buff *nskb;
5360 #if ! defined(NO_PCI_UNMAP)
5361 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5363 pci_unmap_single(pUmDevice->pdev,
5364 pci_unmap_addr(pUmPacket, map[0]),
5365 pci_unmap_len(pUmPacket, map_len[0]),
5371 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5372 pci_unmap_page(pUmDevice->pdev,
5373 pci_unmap_addr(pUmPacket, map[i + 1]),
5374 pci_unmap_len(pUmPacket, map_len[i + 1]),
5380 if ((nskb = skb_copy(skb, GFP_ATOMIC))) {
5381 pUmPacket->lm_packet.u.Tx.FragCount = 1;
5383 pUmPacket->skbuff = nskb;
5384 return LM_STATUS_SUCCESS;
5387 pUmPacket->skbuff = 0;
5388 return LM_STATUS_FAILURE;
5391 /* Returns 1 if not all buffers are allocated */
5393 replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max)
5396 PUM_PACKET pUmPacket;
5397 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
5398 struct sk_buff *skb;
5403 while ((pUmPacket = (PUM_PACKET)
5404 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) {
5405 pPacket = (PLM_PACKET) pUmPacket;
5406 if (pUmPacket->skbuff) {
5407 /* reuse an old skb */
5408 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5412 if ((skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2)) == 0) {
5413 QQ_PushHead(&pUmDevice->rx_out_of_buf_q.Container,
5418 pUmPacket->skbuff = skb;
5419 skb->dev = pUmDevice->dev;
5420 skb_reserve(skb, pUmDevice->rx_buf_align);
5421 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5425 if (alloc_cnt >= max)
5429 if (queue_rx || pDevice->QueueAgain) {
5430 LM_QueueRxPackets(pDevice);
5436 MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
5438 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5440 PUM_PACKET pUmPacket;
5441 struct sk_buff *skb;
5442 #if ! defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
5447 pPacket = (PLM_PACKET)
5448 QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
5451 pUmPacket = (PUM_PACKET) pPacket;
5452 skb = pUmPacket->skbuff;
5453 #if ! defined(NO_PCI_UNMAP)
5454 pci_unmap_single(pUmDevice->pdev,
5455 pci_unmap_addr(pUmPacket, map[0]),
5456 pci_unmap_len(pUmPacket, map_len[0]),
5459 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5460 pci_unmap_page(pUmDevice->pdev,
5461 pci_unmap_addr(pUmPacket, map[i + 1]),
5462 pci_unmap_len(pUmPacket, map_len[i + 1]),
5467 dev_kfree_skb_irq(skb);
5468 pUmPacket->skbuff = 0;
5469 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
5471 if (pUmDevice->tx_full) {
5472 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) >=
5473 (pDevice->TxPacketDescCnt >> 1)) {
5475 pUmDevice->tx_full = 0;
5476 netif_wake_queue(pUmDevice->dev);
5479 return LM_STATUS_SUCCESS;
5483 MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
5485 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5486 struct net_device *dev = pUmDevice->dev;
5487 LM_FLOW_CONTROL flow_control;
5490 if (!pUmDevice->opened)
5491 return LM_STATUS_SUCCESS;
5493 if (!pUmDevice->suspended) {
5494 if (Status == LM_STATUS_LINK_DOWN) {
5495 netif_carrier_off(dev);
5497 else if (Status == LM_STATUS_LINK_ACTIVE) {
5498 netif_carrier_on(dev);
5502 if (pUmDevice->delayed_link_ind > 0) {
5503 pUmDevice->delayed_link_ind = 0;
5504 if (Status == LM_STATUS_LINK_DOWN) {
5505 printk(KERN_ERR "%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name);
5507 else if (Status == LM_STATUS_LINK_ACTIVE) {
5508 printk(KERN_INFO "%s: %s NIC Link is UP, ", bcm5700_driver, dev->name);
5512 if (Status == LM_STATUS_LINK_DOWN) {
5513 printk(KERN_ERR "%s: %s NIC Link is Down\n", bcm5700_driver, dev->name);
5515 else if (Status == LM_STATUS_LINK_ACTIVE) {
5516 printk(KERN_INFO "%s: %s NIC Link is Up, ", bcm5700_driver, dev->name);
5520 if (Status == LM_STATUS_LINK_ACTIVE) {
5521 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
5523 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
5525 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
5528 printk("%d Mbps ", speed);
5530 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
5531 printk("full duplex");
5533 printk("half duplex");
5535 flow_control = pDevice->FlowControl &
5536 (LM_FLOW_CONTROL_RECEIVE_PAUSE |
5537 LM_FLOW_CONTROL_TRANSMIT_PAUSE);
5539 if (flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE) {
5540 printk(", receive ");
5541 if (flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
5542 printk("& transmit ");
5545 printk(", transmit ");
5547 printk("flow control ON");
5551 return LM_STATUS_SUCCESS;
5555 MM_UnmapRxDma(LM_DEVICE_BLOCK *pDevice, LM_PACKET *pPacket)
5557 #if ! defined(NO_PCI_UNMAP)
5558 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5559 UM_PACKET *pUmPacket = (UM_PACKET *) pPacket;
5561 if (!pUmPacket->skbuff)
5564 pci_unmap_single(pUmDevice->pdev,
5565 pci_unmap_addr(pUmPacket, map[0]),
5566 pPacket->u.Rx.RxBufferSize,
5567 PCI_DMA_FROMDEVICE);
5572 MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5574 PUM_PACKET pUmPacket;
5575 struct sk_buff *skb;
5578 return LM_STATUS_SUCCESS;
5579 pUmPacket = (PUM_PACKET) pPacket;
5580 if ((skb = pUmPacket->skbuff)) {
5581 /* DMA address already unmapped */
5584 pUmPacket->skbuff = 0;
5585 return LM_STATUS_SUCCESS;
5589 MM_Sleep(LM_DEVICE_BLOCK *pDevice, LM_UINT32 msec)
5591 current->state = TASK_INTERRUPTIBLE;
5592 if (schedule_timeout(HZ * msec / 1000) != 0) {
5593 return LM_STATUS_FAILURE;
5595 if (signal_pending(current))
5596 return LM_STATUS_FAILURE;
5598 return LM_STATUS_SUCCESS;
5602 bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice)
5604 LM_DEVICE_BLOCK *pDevice = (LM_DEVICE_BLOCK *) pUmDevice;
5606 bcm5700_intr_off(pUmDevice);
5607 netif_carrier_off(pUmDevice->dev);
5609 tasklet_kill(&pUmDevice->tasklet);
5611 bcm5700_poll_wait(pUmDevice);
5615 pDevice->InitDone = 0;
5616 bcm5700_free_remaining_rx_bufs(pUmDevice);
5620 bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice)
5622 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
5623 UM_PACKET *pUmPacket;
5626 cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
5627 for (i = 0; i < cnt; i++) {
5629 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container))
5632 MM_UnmapRxDma(pDevice, (LM_PACKET *) pUmPacket);
5633 MM_FreeRxBuffer(pDevice, &pUmPacket->lm_packet);
5634 QQ_PushTail(&pDevice->RxPacketFreeQ.Container,
5641 bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
5642 char *param_name, int min, int max, int deflt)
5644 if (((unsigned int) *param < (unsigned int) min) ||
5645 ((unsigned int) *param > (unsigned int) max)) {
5647 printk(KERN_WARNING "%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver, pUmDevice->index, param_name, (unsigned int) *param, (unsigned int) deflt);
5653 bcm5700_find_peer(struct net_device *dev)
5655 struct net_device *tmp_dev;
5656 UM_DEVICE_BLOCK *pUmDevice, *pUmTmp;
5657 LM_DEVICE_BLOCK *pDevice;
5660 pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
5661 pDevice = &pUmDevice->lm_dev;
5662 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
5663 tmp_dev = root_tigon3_dev;
5665 pUmTmp = (PUM_DEVICE_BLOCK) tmp_dev->priv;
5666 if ((tmp_dev != dev) &&
5667 (pUmDevice->pdev->bus->number ==
5668 pUmTmp->pdev->bus->number) &&
5669 PCI_SLOT(pUmDevice->pdev->devfn) ==
5670 PCI_SLOT(pUmTmp->pdev->devfn)) {
5674 tmp_dev = pUmTmp->next_module;
5681 MM_FindPeerDev(LM_DEVICE_BLOCK *pDevice)
5683 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5684 struct net_device *dev = pUmDevice->dev;
5685 struct net_device *peer_dev;
5687 peer_dev = bcm5700_find_peer(dev);
5690 return ((LM_DEVICE_BLOCK *) peer_dev->priv);
5693 int MM_FindCapability(LM_DEVICE_BLOCK *pDevice, int capability)
5695 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5696 return (pci_find_capability(pUmDevice->pdev, capability));
5699 #if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
5701 poll_bcm5700(struct net_device *dev)
5703 UM_DEVICE_BLOCK *pUmDevice = dev->priv;
5705 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
5707 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5708 #ifdef BCM_NAPI_RXPOLL
5709 if (dev->poll_list.prev) {
5712 bcm5700_poll(dev, &budget);
5719 disable_irq(pUmDevice->pdev->irq);
5720 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5721 enable_irq(pUmDevice->pdev->irq);