1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2004 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
16 #include <linux/config.h>
18 #if defined(CONFIG_SMP) && ! defined(__SMP__)
22 #if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
29 #define __NO_VERSION__
31 #include <linux/version.h>
35 #if defined(MODVERSIONS) && (LINUX_VERSION_CODE < 0x020500)
37 #include <linux/modversions.h>
41 #if (LINUX_VERSION_CODE < 0x020605)
42 #include <linux/module.h>
44 #include <linux/moduleparam.h>
49 #define MOD_INC_USE_COUNT
50 #define MOD_DEC_USE_COUNT
51 #define SET_MODULE_OWNER(dev)
52 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
56 #include <linux/kernel.h>
57 #include <linux/sched.h>
58 #include <linux/string.h>
59 #include <linux/timer.h>
60 #include <linux/errno.h>
61 #include <linux/ioport.h>
62 #include <linux/slab.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/init.h>
66 #include <linux/netdevice.h>
67 #include <linux/etherdevice.h>
68 #include <linux/skbuff.h>
69 #include <asm/processor.h> /* Processor type for cache alignment. */
70 #include <asm/bitops.h>
72 #include <asm/unaligned.h>
73 #include <linux/delay.h>
74 #include <asm/byteorder.h>
75 #include <linux/time.h>
76 #include <asm/uaccess.h>
77 #if (LINUX_VERSION_CODE >= 0x020400)
78 #if (LINUX_VERSION_CODE < 0x020500)
79 #include <linux/wrapper.h>
81 #include <linux/ethtool.h>
84 #include <linux/smp_lock.h>
85 #include <linux/proc_fs.h>
88 #ifdef NETIF_F_HW_VLAN_TX
89 #include <linux/if_vlan.h>
94 #define INCLUDE_TCP_SEG_SUPPORT 1
97 #include <net/checksum.h>
100 #ifndef LINUX_KERNEL_VERSION
101 #define LINUX_KERNEL_VERSION 0
104 #ifndef MAX_SKB_FRAGS
105 #define MAX_SKB_FRAGS 0
108 #if (LINUX_VERSION_CODE >= 0x020400)
109 #ifndef ETHTOOL_GEEPROM
111 #define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
112 #define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
114 /* for passing EEPROM chunks */
115 struct ethtool_eeprom {
118 u32 offset; /* in bytes */
119 u32 len; /* in bytes */
122 #define BCM_EEDUMP_LEN(info_p, size) *((u32 *) &((info_p)->reserved1[24]))=size
126 #define BCM_EEDUMP_LEN(info_p, size) (info_p)->eedump_len=size
131 #define BCM_INT_COAL 1
132 #define BCM_NIC_SEND_BD 1
135 #define BCM_TASKLET 1
137 #if HAVE_NETIF_RECEIVE_SKB
138 #define BCM_NAPI_RXPOLL 1
142 #if defined(CONFIG_PPC64)
143 #define BCM_DISCONNECT_AT_CACHELINE 1
148 #undef ETHTOOL_GEEPROM
149 #undef ETHTOOL_SEEPROM
151 #undef ETHTOOL_GPAUSEPARAM
152 #undef ETHTOOL_GRXCSUM
155 #undef BCM_NIC_SEND_BD
163 #define BIG_ENDIAN_HOST 1
166 #define MM_SWAP_LE32(x) cpu_to_le32(x)
167 #define MM_SWAP_BE32(x) cpu_to_be32(x)
169 #if (LINUX_VERSION_CODE < 0x020327)
170 #define __raw_readl readl
171 #define __raw_writel writel
174 #define MM_MEMWRITEL(ptr, val) __raw_writel(val, ptr)
175 #define MM_MEMREADL(ptr) __raw_readl(ptr)
177 typedef atomic_t MM_ATOMIC_T;
179 #define MM_ATOMIC_SET(ptr, val) atomic_set(ptr, val)
180 #define MM_ATOMIC_READ(ptr) atomic_read(ptr)
181 #define MM_ATOMIC_INC(ptr) atomic_inc(ptr)
182 #define MM_ATOMIC_ADD(ptr, val) atomic_add(val, ptr)
183 #define MM_ATOMIC_DEC(ptr) atomic_dec(ptr)
184 #define MM_ATOMIC_SUB(ptr, val) atomic_sub(val, ptr)
193 #define MM_WMB() wmb()
194 #define MM_RMB() rmb()
195 #define MM_MMIOWB() mmiowb()
204 #define STATIC static
207 extern int MM_Packet_Desc_Size;
209 #define MM_PACKET_DESC_SIZE MM_Packet_Desc_Size
211 DECLARE_QUEUE_TYPE(UM_RX_PACKET_Q, MAX_RX_PACKET_DESC_COUNT+1);
216 #if (LINUX_VERSION_CODE < 0x020211)
217 typedef u32 dma_addr_t;
220 #if (LINUX_VERSION_CODE < 0x02032a)
221 #define pci_map_single(dev, address, size, dir) virt_to_bus(address)
222 #define pci_unmap_single(dev, dma_addr, size, dir)
226 #if (LINUX_VERSION_CODE >= 0x02040d)
228 typedef dma_addr_t dmaaddr_high_t;
232 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)
234 #if defined(CONFIG_HIGHMEM64G)
235 typedef unsigned long long dmaaddr_high_t;
237 typedef dma_addr_t dmaaddr_high_t;
241 #define pci_map_page bcm_pci_map_page
244 static inline dmaaddr_high_t
245 bcm_pci_map_page(struct pci_dev *dev, struct page *page,
246 int offset, size_t size, int dir)
250 phys = (page-mem_map) * (dmaaddr_high_t) PAGE_SIZE + offset;
255 #ifndef pci_unmap_page
256 #define pci_unmap_page(dev, map, size, dir)
259 #else /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
261 typedef dma_addr_t dmaaddr_high_t;
263 /* Warning - This may not work for all architectures if HIGHMEM is defined */
266 #define pci_map_page(dev, page, offset, size, dir) \
267 pci_map_single(dev, page_address(page) + (offset), size, dir)
269 #ifndef pci_unmap_page
270 #define pci_unmap_page(dev, map, size, dir) \
271 pci_unmap_single(dev, map, size, dir)
274 #endif /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
276 #endif /* #if (LINUX_VERSION_CODE >= 0x02040d)*/
277 #endif /* #if MAX_SKB_FRAGS*/
279 #if defined (CONFIG_X86) && ! defined(CONFIG_X86_64)
280 #define NO_PCI_UNMAP 1
283 #if (LINUX_VERSION_CODE < 0x020412)
284 #if ! defined (NO_PCI_UNMAP)
285 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
286 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
288 #define pci_unmap_addr(PTR, ADDR_NAME) \
291 #define pci_unmap_len(PTR, LEN_NAME) \
294 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
295 (((PTR)->ADDR_NAME) = (VAL))
297 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
298 (((PTR)->LEN_NAME) = (VAL))
300 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
301 #define DECLARE_PCI_UNMAP_LEN(ADDR_NAME)
303 #define pci_unmap_addr(PTR, ADDR_NAME) 0
304 #define pci_unmap_len(PTR, LEN_NAME) 0
305 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
306 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
310 #if (LINUX_VERSION_CODE < 0x02030e)
311 #define net_device device
312 #define netif_carrier_on(dev)
313 #define netif_carrier_off(dev)
316 #if (LINUX_VERSION_CODE < 0x02032b)
317 #define tasklet_struct tq_struct
320 typedef struct _UM_DEVICE_BLOCK {
321 LM_DEVICE_BLOCK lm_dev;
322 struct net_device *dev;
323 struct pci_dev *pdev;
324 struct net_device *next_module;
327 struct proc_dir_entry *pfs_entry;
330 void *mem_list[MAX_MEM];
331 dma_addr_t dma_list[MAX_MEM];
332 int mem_size_list[MAX_MEM];
336 void *mem_list2[MAX_MEM2]; /* for diagnostics ioctl */
337 dma_addr_t dma_list2[MAX_MEM2];
338 __u64 cpu_pa_list2[MAX_MEM2];
339 int mem_size_list2[MAX_MEM2];
344 int using_dac; /* dual address cycle */
345 int delayed_link_ind; /* Delay link status during initial load */
346 int adapter_just_inited; /* the first few seconds after init. */
348 int statstimer_interval;
350 int crc_counter_expiry;
351 int poll_tbi_interval;
356 int line_speed; /* in Mbps, 0 if link is down */
357 UM_RX_PACKET_Q rx_out_of_buf_q;
359 int rx_buf_repl_thresh;
360 int rx_buf_repl_panic_thresh;
361 int rx_buf_repl_isr_limit;
363 struct timer_list timer;
364 struct timer_list statstimer;
366 spinlock_t global_lock;
367 spinlock_t undi_lock;
369 unsigned long undi_flags;
370 volatile unsigned long interrupt;
373 volatile unsigned long tasklet_busy;
374 struct tasklet_struct tasklet;
375 struct net_device_stats stats;
377 void (*nice_rx)( struct sk_buff*, void* );
379 #endif /* NICE_SUPPORT */
381 int intr_test_result;
382 #ifdef NETIF_F_HW_VLAN_TX
383 struct vlan_group *vlgrp;
385 int vlan_tag_mode; /* Setting to allow ASF to work properly with */
387 #define VLAN_TAG_MODE_AUTO_STRIP 0
388 #define VLAN_TAG_MODE_NORMAL_STRIP 1
389 #define VLAN_TAG_MODE_FORCED_STRIP 2
391 /* Auto mode - VLAN TAGs are always stripped if ASF is enabled, */
392 /* If ASF is not enabled, it will be in normal mode. */
393 /* Normal mode - VLAN TAGs are stripped when VLANs are registered */
394 /* Forced mode - VLAN TAGs are always stripped. */
396 int adaptive_coalesce;
399 uint rx_curr_coalesce_frames;
400 uint rx_curr_coalesce_frames_intr;
401 uint rx_curr_coalesce_ticks;
402 uint tx_curr_coalesce_frames;
404 unsigned long tx_zc_count;
405 unsigned long tx_chksum_count;
406 unsigned long tx_himem_count;
407 unsigned long rx_good_chksum_count;
409 unsigned long rx_bad_chksum_count;
411 unsigned long tso_pkt_count;
413 unsigned long rx_misc_errors;
414 uint64_t phy_crc_count;
415 unsigned int spurious_int;
416 } UM_DEVICE_BLOCK, *PUM_DEVICE_BLOCK;
418 typedef struct _UM_PACKET {
420 struct sk_buff *skbuff;
422 DECLARE_PCI_UNMAP_ADDR(map[MAX_SKB_FRAGS + 1])
423 DECLARE_PCI_UNMAP_LEN(map_len[MAX_SKB_FRAGS + 1])
425 DECLARE_PCI_UNMAP_ADDR(map[1])
426 DECLARE_PCI_UNMAP_LEN(map_len[1])
428 } UM_PACKET, *PUM_PACKET;
430 static inline void MM_SetAddr(LM_PHYSICAL_ADDRESS *paddr, dma_addr_t addr)
432 #if (BITS_PER_LONG == 64)
433 paddr->High = ((unsigned long) addr) >> 32;
434 paddr->Low = ((unsigned long) addr) & 0xffffffff;
437 paddr->Low = (unsigned long) addr;
441 static inline void MM_SetT3Addr(T3_64BIT_HOST_ADDR *paddr, dma_addr_t addr)
443 #if (BITS_PER_LONG == 64)
444 paddr->High = ((unsigned long) addr) >> 32;
445 paddr->Low = ((unsigned long) addr) & 0xffffffff;
448 paddr->Low = (unsigned long) addr;
453 static inline void MM_SetT3AddrHigh(T3_64BIT_HOST_ADDR *paddr,
456 #if defined(CONFIG_HIGHMEM64G) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)
457 paddr->High = (unsigned long) (addr >> 32);
458 paddr->Low = (unsigned long) (addr & 0xffffffff);
460 MM_SetT3Addr(paddr, (dma_addr_t) addr);
465 static inline void MM_MapRxDma(PLM_DEVICE_BLOCK pDevice,
466 struct _LM_PACKET *pPacket,
467 T3_64BIT_HOST_ADDR *paddr)
470 struct sk_buff *skb = ((struct _UM_PACKET *) pPacket)->skbuff;
472 map = pci_map_single(((struct _UM_DEVICE_BLOCK *)pDevice)->pdev,
474 pPacket->u.Rx.RxBufferSize,
476 pci_unmap_addr_set(((struct _UM_PACKET *) pPacket), map[0], map);
477 MM_SetT3Addr(paddr, map);
480 static inline void MM_MapTxDma(PLM_DEVICE_BLOCK pDevice,
481 struct _LM_PACKET *pPacket,
482 T3_64BIT_HOST_ADDR *paddr,
487 struct sk_buff *skb = ((struct _UM_PACKET *) pPacket)->skbuff;
492 if (skb_shinfo(skb)->nr_frags)
493 length = skb->len - skb->data_len;
497 map = pci_map_single(((struct _UM_DEVICE_BLOCK *)pDevice)->pdev,
498 skb->data, length, PCI_DMA_TODEVICE);
499 MM_SetT3Addr(paddr, map);
500 pci_unmap_addr_set(((struct _UM_PACKET *)pPacket), map[0], map);
501 pci_unmap_len_set(((struct _UM_PACKET *) pPacket), map_len[0],
508 dmaaddr_high_t hi_map;
510 sk_frag = &skb_shinfo(skb)->frags[frag - 1];
512 hi_map = pci_map_page(
513 ((struct _UM_DEVICE_BLOCK *)pDevice)->pdev,
515 sk_frag->page_offset,
516 sk_frag->size, PCI_DMA_TODEVICE);
518 MM_SetT3AddrHigh(paddr, hi_map);
519 pci_unmap_addr_set(((struct _UM_PACKET *) pPacket), map[frag],
521 pci_unmap_len_set(((struct _UM_PACKET *) pPacket),
522 map_len[frag], sk_frag->size);
523 *len = sk_frag->size;
528 #define BCM5700_PHY_LOCK(pUmDevice, flags) { \
530 if ((pUmDevice)->do_global_lock) { \
531 lock = &(pUmDevice)->global_lock; \
534 lock = &(pUmDevice)->phy_lock; \
536 spin_lock_irqsave(lock, flags); \
539 #define BCM5700_PHY_UNLOCK(pUmDevice, flags) { \
541 if ((pUmDevice)->do_global_lock) { \
542 lock = &(pUmDevice)->global_lock; \
545 lock = &(pUmDevice)->phy_lock; \
547 spin_unlock_irqrestore(lock, flags); \
551 #define MM_ACQUIRE_UNDI_LOCK(_pDevice) \
552 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
553 unsigned long flags; \
554 spin_lock_irqsave(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
555 ((PUM_DEVICE_BLOCK)(_pDevice))->undi_flags = flags; \
558 #define MM_RELEASE_UNDI_LOCK(_pDevice) \
559 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
560 unsigned long flags = ((PUM_DEVICE_BLOCK) (_pDevice))->undi_flags; \
561 spin_unlock_irqrestore(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
564 #define MM_ACQUIRE_PHY_LOCK_IN_IRQ(_pDevice) \
565 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
566 spin_lock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
569 #define MM_RELEASE_PHY_LOCK_IN_IRQ(_pDevice) \
570 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
571 spin_unlock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
574 #define MM_UINT_PTR(_ptr) ((unsigned long) (_ptr))
576 #define MM_GETSTATS64(_Ctr) \
577 (uint64_t) (_Ctr).Low + ((uint64_t) (_Ctr).High << 32)
579 #define MM_GETSTATS32(_Ctr) \
580 (uint32_t) (_Ctr).Low
582 #if (BITS_PER_LONG == 64)
583 #define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS64(_Ctr)
585 #define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS32(_Ctr)
588 #if (LINUX_VERSION_CODE >= 0x020600)
589 #define mm_copy_to_user( to, from, size ) \
590 (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_to_user((to),(from),(size)))
591 #define mm_copy_from_user( to, from, size ) \
592 (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_from_user((to),(from),(size)))
594 #define mm_copy_to_user( to, from, size ) \
595 copy_to_user((to),(from),(size) )
596 #define mm_copy_from_user( to, from, size ) \
597 copy_from_user((to),(from),(size))
601 #define printf(fmt, args...) printk(KERN_WARNING fmt, ##args)
603 #define DbgPrint(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
604 #if defined(CONFIG_X86)
605 #define DbgBreakPoint() __asm__("int $129")
607 #define DbgBreakPoint()
609 #define MM_Wait(time) udelay(time)