1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
3 Written 2002-2003 by David Dillow <dave@thedillows.org>
4 Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5 Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This software is available on a public web site. It may enable
15 cryptographic capabilities of the 3Com hardware, and may be
16 exported from the United States under License Exception "TSU"
17 pursuant to 15 C.F.R. Section 740.13(e).
19 This work was funded by the National Library of Medicine under
20 the Department of Energy project number 0274DD06D1 and NLM project
23 This driver is designed for the 3Com 3CR990 Family of cards with the
24 3XP Processor. It has been tested on x86 and sparc64.
27 *) The current firmware always strips the VLAN tag off, even if
28 we tell it not to. You should filter VLANs at the switch
29 as a workaround (good practice in any event) until we can
31 *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32 issue. Hopefully 3Com will fix it.
33 *) Waiting for a command response takes 8ms due to non-preemptable
34 polling. Only significant for getting stats and creating
35 SAs, but an ugly wart never the less.
36 *) I've not tested multicast. I think it works, but reports welcome.
37 *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
40 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
41 * Setting to > 1518 effectively disables this feature.
43 static int rx_copybreak = 200;
45 /* end user-configurable values */
47 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
49 static const int multicast_filter_limit = 32;
51 /* Operational parameters that are set at compile time. */
53 /* Keep the ring sizes a power of two for compile efficiency.
54 * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
55 * Making the Tx ring too large decreases the effectiveness of channel
56 * bonding and packet priority.
57 * There are no ill effects from too-large receive rings.
59 * We don't currently use the Hi Tx ring so, don't make it very big.
61 * Beware that if we start using the Hi Tx ring, we will need to change
62 * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
64 #define TXHI_ENTRIES 2
65 #define TXLO_ENTRIES 128
67 #define COMMAND_ENTRIES 16
68 #define RESPONSE_ENTRIES 32
70 #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
71 #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
73 /* The 3XP will preload and remove 64 entries from the free buffer
74 * list, and we need one entry to keep the ring from wrapping, so
75 * to keep this a power of two, we use 128 entries.
77 #define RXFREE_ENTRIES 128
78 #define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
80 /* Operational parameters that usually are not changed. */
82 /* Time in jiffies before concluding the transmitter is hung. */
83 #define TX_TIMEOUT (2*HZ)
85 #define PKT_BUF_SZ 1536
87 #define DRV_MODULE_NAME "typhoon"
88 #define DRV_MODULE_VERSION "1.5.3"
89 #define DRV_MODULE_RELDATE "03/12/15"
90 #define PFX DRV_MODULE_NAME ": "
91 #define ERR_PFX KERN_ERR PFX
93 #include <linux/module.h>
94 #include <linux/kernel.h>
95 #include <linux/string.h>
96 #include <linux/timer.h>
97 #include <linux/errno.h>
98 #include <linux/ioport.h>
99 #include <linux/slab.h>
100 #include <linux/interrupt.h>
101 #include <linux/pci.h>
102 #include <linux/netdevice.h>
103 #include <linux/etherdevice.h>
104 #include <linux/skbuff.h>
105 #include <linux/init.h>
106 #include <linux/delay.h>
107 #include <linux/ethtool.h>
108 #include <linux/if_vlan.h>
109 #include <linux/crc32.h>
110 #include <asm/processor.h>
111 #include <asm/bitops.h>
113 #include <asm/uaccess.h>
114 #include <linux/in6.h>
115 #include <asm/checksum.h>
116 #include <linux/version.h>
119 #include "typhoon-firmware.h"
121 static char version[] __devinitdata =
122 "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
124 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
125 MODULE_LICENSE("GPL");
126 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
127 MODULE_PARM(rx_copybreak, "i");
129 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
130 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
134 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
135 #error TX ring too small!
138 struct typhoon_card_info {
143 #define TYPHOON_CRYPTO_NONE 0x00
144 #define TYPHOON_CRYPTO_DES 0x01
145 #define TYPHOON_CRYPTO_3DES 0x02
146 #define TYPHOON_CRYPTO_VARIABLE 0x04
147 #define TYPHOON_FIBER 0x08
148 #define TYPHOON_WAKEUP_NEEDS_RESET 0x10
151 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
152 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
153 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
157 /* directly indexed by enum typhoon_cards, above */
158 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
159 { "3Com Typhoon (3C990-TX)",
160 TYPHOON_CRYPTO_NONE},
161 { "3Com Typhoon (3CR990-TX-95)",
163 { "3Com Typhoon (3CR990-TX-97)",
164 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
165 { "3Com Typhoon (3C990SVR)",
166 TYPHOON_CRYPTO_NONE},
167 { "3Com Typhoon (3CR990SVR95)",
169 { "3Com Typhoon (3CR990SVR97)",
170 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
171 { "3Com Typhoon2 (3C990B-TX-M)",
172 TYPHOON_CRYPTO_VARIABLE},
173 { "3Com Typhoon2 (3C990BSVR)",
174 TYPHOON_CRYPTO_VARIABLE},
175 { "3Com Typhoon (3CR990-FX-95)",
176 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
177 { "3Com Typhoon (3CR990-FX-97)",
178 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
179 { "3Com Typhoon (3CR990-FX-95 Server)",
180 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
181 { "3Com Typhoon (3CR990-FX-97 Server)",
182 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
183 { "3Com Typhoon2 (3C990B-FX-97)",
184 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
187 /* Notes on the new subsystem numbering scheme:
188 * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
189 * bit 4 indicates if this card has secured firmware (we don't support it)
190 * bit 8 indicates if this is a (0) copper or (1) fiber card
191 * bits 12-16 indicate card type: (0) client and (1) server
193 static struct pci_device_id typhoon_pci_tbl[] = {
194 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
195 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
196 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
197 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
198 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
199 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
200 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
201 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
202 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
203 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
204 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
205 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
206 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
207 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
208 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
209 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
210 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
211 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
212 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
213 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
214 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
215 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
216 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
217 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
218 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
222 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
224 /* Define the shared memory area
225 * Align everything the 3XP will normally be using.
226 * We'll need to move/align txHi if we start using that ring.
228 #define __3xp_aligned ____cacheline_aligned
229 struct typhoon_shared {
230 struct typhoon_interface iface;
231 struct typhoon_indexes indexes __3xp_aligned;
232 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
233 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
234 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
235 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
236 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
237 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
239 struct tx_desc txHi[TXHI_ENTRIES];
240 } __attribute__ ((packed));
248 /* Tx cache line section */
249 struct transmit_ring txLoRing ____cacheline_aligned;
250 struct pci_dev * tx_pdev;
251 void __iomem *tx_ioaddr;
254 /* Irq/Rx cache line section */
255 void __iomem *ioaddr ____cacheline_aligned;
256 struct typhoon_indexes *indexes;
261 struct basic_ring rxLoRing;
262 struct pci_dev * pdev;
263 struct net_device * dev;
264 spinlock_t state_lock;
265 struct vlan_group * vlgrp;
266 struct basic_ring rxHiRing;
267 struct basic_ring rxBuffRing;
268 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
270 /* general section */
271 spinlock_t command_lock ____cacheline_aligned;
272 struct basic_ring cmdRing;
273 struct basic_ring respRing;
274 struct net_device_stats stats;
275 struct net_device_stats stats_saved;
277 struct typhoon_shared * shared;
278 dma_addr_t shared_dma;
284 /* unused stuff (future use) */
286 struct transmit_ring txHiRing;
289 enum completion_wait_values {
290 NoWait = 0, WaitNoSleep, WaitSleep,
293 /* These are the values for the typhoon.card_state variable.
294 * These determine where the statistics will come from in get_stats().
295 * The sleep image does not support the statistics we need.
298 Sleeping = 0, Running,
301 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
302 * cannot pass a read, so this forces current writes to post.
304 #define typhoon_post_pci_writes(x) \
305 do { readl(x + TYPHOON_REG_HEARTBEAT); } while(0)
307 /* We'll wait up to six seconds for a reset, and half a second normally.
309 #define TYPHOON_UDELAY 50
310 #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
311 #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
312 #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
314 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
315 #define typhoon_synchronize_irq(x) synchronize_irq()
317 #define typhoon_synchronize_irq(x) synchronize_irq(x)
320 #if defined(NETIF_F_TSO)
321 #define skb_tso_size(x) (skb_shinfo(x)->tso_size)
322 #define TSO_NUM_DESCRIPTORS 2
323 #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
325 #define NETIF_F_TSO 0
326 #define skb_tso_size(x) 0
327 #define TSO_NUM_DESCRIPTORS 0
328 #define TSO_OFFLOAD_ON 0
332 typhoon_inc_index(u32 *index, const int count, const int num_entries)
334 /* Increment a ring index -- we can use this for all rings execept
335 * the Rx rings, as they use different size descriptors
336 * otherwise, everything is the same size as a cmd_desc
338 *index += count * sizeof(struct cmd_desc);
339 *index %= num_entries * sizeof(struct cmd_desc);
343 typhoon_inc_cmd_index(u32 *index, const int count)
345 typhoon_inc_index(index, count, COMMAND_ENTRIES);
349 typhoon_inc_resp_index(u32 *index, const int count)
351 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
355 typhoon_inc_rxfree_index(u32 *index, const int count)
357 typhoon_inc_index(index, count, RXFREE_ENTRIES);
361 typhoon_inc_tx_index(u32 *index, const int count)
363 /* if we start using the Hi Tx ring, this needs updateing */
364 typhoon_inc_index(index, count, TXLO_ENTRIES);
368 typhoon_inc_rx_index(u32 *index, const int count)
370 /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
371 *index += count * sizeof(struct rx_desc);
372 *index %= RX_ENTRIES * sizeof(struct rx_desc);
376 typhoon_reset(void __iomem *ioaddr, int wait_type)
381 if(wait_type == WaitNoSleep)
382 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
384 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
386 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
387 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
389 writel(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
390 typhoon_post_pci_writes(ioaddr);
392 writel(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
394 if(wait_type != NoWait) {
395 for(i = 0; i < timeout; i++) {
396 if(readl(ioaddr + TYPHOON_REG_STATUS) ==
397 TYPHOON_STATUS_WAITING_FOR_HOST)
400 if(wait_type == WaitSleep) {
401 set_current_state(TASK_UNINTERRUPTIBLE);
404 udelay(TYPHOON_UDELAY);
411 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
412 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
416 /* The 3XP seems to need a little extra time to complete the load
417 * of the sleep image before we can reliably boot it. Failure to
418 * do this occasionally results in a hung adapter after boot in
419 * typhoon_init_one() while trying to read the MAC address or
420 * putting the card to sleep. 3Com's driver waits 5ms, but
421 * that seems to be overkill -- with a 50usec delay, it survives
422 * 35000 typhoon_init_one() calls, where it only make it 25-100
425 * As it turns out, still occasionally getting a hung adapter,
426 * so I'm bumping it to 100us.
431 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
435 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
436 if(readl(ioaddr + TYPHOON_REG_STATUS) == wait_value)
438 udelay(TYPHOON_UDELAY);
448 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
450 if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
451 netif_carrier_off(dev);
453 netif_carrier_on(dev);
457 typhoon_hello(struct typhoon *tp)
459 struct basic_ring *ring = &tp->cmdRing;
460 struct cmd_desc *cmd;
462 /* We only get a hello request if we've not sent anything to the
463 * card in a long while. If the lock is held, then we're in the
464 * process of issuing a command, so we don't need to respond.
466 if(spin_trylock(&tp->command_lock)) {
467 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
468 typhoon_inc_cmd_index(&ring->lastWrite, 1);
470 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
472 writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
473 spin_unlock(&tp->command_lock);
478 typhoon_process_response(struct typhoon *tp, int resp_size,
479 struct resp_desc *resp_save)
481 struct typhoon_indexes *indexes = tp->indexes;
482 struct resp_desc *resp;
483 u8 *base = tp->respRing.ringBase;
484 int count, len, wrap_len;
488 cleared = le32_to_cpu(indexes->respCleared);
489 ready = le32_to_cpu(indexes->respReady);
490 while(cleared != ready) {
491 resp = (struct resp_desc *)(base + cleared);
492 count = resp->numDesc + 1;
493 if(resp_save && resp->seqNo) {
494 if(count > resp_size) {
495 resp_save->flags = TYPHOON_RESP_ERROR;
500 len = count * sizeof(*resp);
501 if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
502 wrap_len = cleared + len - RESPONSE_RING_SIZE;
503 len = RESPONSE_RING_SIZE - cleared;
506 memcpy(resp_save, resp, len);
507 if(unlikely(wrap_len)) {
508 resp_save += len / sizeof(*resp);
509 memcpy(resp_save, base, wrap_len);
513 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
514 typhoon_media_status(tp->dev, resp);
515 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
518 printk(KERN_ERR "%s: dumping unexpected response "
519 "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
520 tp->name, le16_to_cpu(resp->cmd),
521 resp->numDesc, resp->flags,
522 le16_to_cpu(resp->parm1),
523 le32_to_cpu(resp->parm2),
524 le32_to_cpu(resp->parm3));
528 typhoon_inc_resp_index(&cleared, count);
531 indexes->respCleared = cpu_to_le32(cleared);
533 return (resp_save == NULL);
537 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
539 /* this works for all descriptors but rx_desc, as they are a
540 * different size than the cmd_desc -- everyone else is the same
542 lastWrite /= sizeof(struct cmd_desc);
543 lastRead /= sizeof(struct cmd_desc);
544 return (ringSize + lastRead - lastWrite - 1) % ringSize;
548 typhoon_num_free_cmd(struct typhoon *tp)
550 int lastWrite = tp->cmdRing.lastWrite;
551 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
553 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
557 typhoon_num_free_resp(struct typhoon *tp)
559 int respReady = le32_to_cpu(tp->indexes->respReady);
560 int respCleared = le32_to_cpu(tp->indexes->respCleared);
562 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
566 typhoon_num_free_tx(struct transmit_ring *ring)
568 /* if we start using the Hi Tx ring, this needs updating */
569 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
573 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
574 int num_resp, struct resp_desc *resp)
576 struct typhoon_indexes *indexes = tp->indexes;
577 struct basic_ring *ring = &tp->cmdRing;
578 struct resp_desc local_resp;
581 int freeCmd, freeResp;
584 spin_lock(&tp->command_lock);
586 freeCmd = typhoon_num_free_cmd(tp);
587 freeResp = typhoon_num_free_resp(tp);
589 if(freeCmd < num_cmd || freeResp < num_resp) {
590 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
591 "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
597 if(cmd->flags & TYPHOON_CMD_RESPOND) {
598 /* If we're expecting a response, but the caller hasn't given
599 * us a place to put it, we'll provide one.
601 tp->awaiting_resp = 1;
609 len = num_cmd * sizeof(*cmd);
610 if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
611 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
612 len = COMMAND_RING_SIZE - ring->lastWrite;
615 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
616 if(unlikely(wrap_len)) {
617 struct cmd_desc *wrap_ptr = cmd;
618 wrap_ptr += len / sizeof(*cmd);
619 memcpy(ring->ringBase, wrap_ptr, wrap_len);
622 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
624 /* "I feel a presence... another warrior is on the the mesa."
627 writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
628 typhoon_post_pci_writes(tp->ioaddr);
630 if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
633 /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
634 * preempt or do anything other than take interrupts. So, don't
635 * wait for a response unless you have to.
637 * I've thought about trying to sleep here, but we're called
638 * from many contexts that don't allow that. Also, given the way
639 * 3Com has implemented irq coalescing, we would likely timeout --
640 * this has been observed in real life!
642 * The big killer is we have to wait to get stats from the card,
643 * though we could go to a periodic refresh of those if we don't
644 * mind them getting somewhat stale. The rest of the waiting
645 * commands occur during open/close/suspend/resume, so they aren't
646 * time critical. Creating SAs in the future will also have to
650 for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
651 if(indexes->respCleared != indexes->respReady)
652 got_resp = typhoon_process_response(tp, num_resp,
654 udelay(TYPHOON_UDELAY);
662 /* Collect the error response even if we don't care about the
663 * rest of the response
665 if(resp->flags & TYPHOON_RESP_ERROR)
669 if(tp->awaiting_resp) {
670 tp->awaiting_resp = 0;
673 /* Ugh. If a response was added to the ring between
674 * the call to typhoon_process_response() and the clearing
675 * of tp->awaiting_resp, we could have missed the interrupt
676 * and it could hang in the ring an indeterminate amount of
677 * time. So, check for it, and interrupt ourselves if this
680 if(indexes->respCleared != indexes->respReady)
681 writel(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
684 spin_unlock(&tp->command_lock);
689 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
691 struct typhoon *tp = (struct typhoon *) dev->priv;
692 struct cmd_desc xp_cmd;
695 spin_lock_bh(&tp->state_lock);
696 if(!tp->vlgrp != !grp) {
697 /* We've either been turned on for the first time, or we've
698 * been turned off. Update the 3XP.
701 tp->offload |= TYPHOON_OFFLOAD_VLAN;
703 tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
705 /* If the interface is up, the runtime is running -- and we
706 * must be up for the vlan core to call us.
708 * Do the command outside of the spin lock, as it is slow.
710 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
711 TYPHOON_CMD_SET_OFFLOAD_TASKS);
712 xp_cmd.parm2 = tp->offload;
713 xp_cmd.parm3 = tp->offload;
714 spin_unlock_bh(&tp->state_lock);
715 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
717 printk("%s: vlan offload error %d\n", tp->name, -err);
718 spin_lock_bh(&tp->state_lock);
721 /* now make the change visible */
723 spin_unlock_bh(&tp->state_lock);
727 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
729 struct typhoon *tp = (struct typhoon *) dev->priv;
730 spin_lock_bh(&tp->state_lock);
732 tp->vlgrp->vlan_devices[vid] = NULL;
733 spin_unlock_bh(&tp->state_lock);
737 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
740 struct tcpopt_desc *tcpd;
741 u32 tcpd_offset = ring_dma;
743 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
744 tcpd_offset += txRing->lastWrite;
745 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
746 typhoon_inc_tx_index(&txRing->lastWrite, 1);
748 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
750 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
751 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
752 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
753 tcpd->bytesTx = cpu_to_le32(skb->len);
758 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
760 struct typhoon *tp = (struct typhoon *) dev->priv;
761 struct transmit_ring *txRing;
762 struct tx_desc *txd, *first_txd;
766 /* we have two rings to choose from, but we only use txLo for now
767 * If we start using the Hi ring as well, we'll need to update
768 * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
769 * and TXHI_ENTIRES to match, as well as update the TSO code below
770 * to get the right DMA address
772 txRing = &tp->txLoRing;
774 /* We need one descriptor for each fragment of the sk_buff, plus the
775 * one for the ->data area of it.
777 * The docs say a maximum of 16 fragment descriptors per TCP option
778 * descriptor, then make a new packet descriptor and option descriptor
779 * for the next 16 fragments. The engineers say just an option
780 * descriptor is needed. I've tested up to 26 fragments with a single
781 * packet descriptor/option descriptor combo, so I use that for now.
783 * If problems develop with TSO, check this first.
785 numDesc = skb_shinfo(skb)->nr_frags + 1;
786 if(skb_tso_size(skb))
789 /* When checking for free space in the ring, we need to also
790 * account for the initial Tx descriptor, and we always must leave
791 * at least one descriptor unused in the ring so that it doesn't
792 * wrap and look empty.
794 * The only time we should loop here is when we hit the race
795 * between marking the queue awake and updating the cleared index.
796 * Just loop and it will appear. This comes from the acenic driver.
798 while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
801 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
802 typhoon_inc_tx_index(&txRing->lastWrite, 1);
804 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
805 first_txd->numDesc = 0;
807 first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
808 first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
809 first_txd->processFlags = 0;
811 if(skb->ip_summed == CHECKSUM_HW) {
812 /* The 3XP will figure out if this is UDP/TCP */
813 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
818 if(vlan_tx_tag_present(skb)) {
819 first_txd->processFlags |=
820 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821 first_txd->processFlags |=
822 cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
823 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
826 if(skb_tso_size(skb)) {
827 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828 first_txd->numDesc++;
830 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
833 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834 typhoon_inc_tx_index(&txRing->lastWrite, 1);
836 /* No need to worry about padding packet -- the firmware pads
837 * it with zeros to ETH_ZLEN for us.
839 if(skb_shinfo(skb)->nr_frags == 0) {
840 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
842 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843 txd->len = cpu_to_le16(skb->len);
844 txd->addr = cpu_to_le32(skb_dma);
846 first_txd->numDesc++;
850 len = skb_headlen(skb);
851 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
853 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854 txd->len = cpu_to_le16(len);
855 txd->addr = cpu_to_le32(skb_dma);
857 first_txd->numDesc++;
859 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
863 txd = (struct tx_desc *) (txRing->ringBase +
865 typhoon_inc_tx_index(&txRing->lastWrite, 1);
868 frag_addr = (void *) page_address(frag->page) +
870 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
872 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873 txd->len = cpu_to_le16(len);
874 txd->addr = cpu_to_le32(skb_dma);
876 first_txd->numDesc++;
883 writel(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
885 dev->trans_start = jiffies;
887 /* If we don't have room to put the worst case packet on the
888 * queue, then we must stop the queue. We need 2 extra
889 * descriptors -- one to prevent ring wrap, and one for the
892 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
894 if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895 netif_stop_queue(dev);
897 /* A Tx complete IRQ could have gotten inbetween, making
898 * the ring free again. Only need to recheck here, since
901 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902 netif_wake_queue(dev);
909 typhoon_set_rx_mode(struct net_device *dev)
911 struct typhoon *tp = (struct typhoon *) dev->priv;
912 struct cmd_desc xp_cmd;
916 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917 if(dev->flags & IFF_PROMISC) {
918 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
920 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
921 } else if((dev->mc_count > multicast_filter_limit) ||
922 (dev->flags & IFF_ALLMULTI)) {
923 /* Too many to match, or accept all multicasts. */
924 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
925 } else if(dev->mc_count) {
926 struct dev_mc_list *mclist;
929 memset(mc_filter, 0, sizeof(mc_filter));
930 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
931 i++, mclist = mclist->next) {
932 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
933 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
936 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
937 TYPHOON_CMD_SET_MULTICAST_HASH);
938 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
939 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
940 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
941 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
943 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
946 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
947 xp_cmd.parm1 = filter;
948 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
952 typhoon_do_get_stats(struct typhoon *tp)
954 struct net_device_stats *stats = &tp->stats;
955 struct net_device_stats *saved = &tp->stats_saved;
956 struct cmd_desc xp_cmd;
957 struct resp_desc xp_resp[7];
958 struct stats_resp *s = (struct stats_resp *) xp_resp;
961 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
962 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
966 /* 3Com's Linux driver uses txMultipleCollisions as it's
967 * collisions value, but there is some other collision info as well...
969 stats->tx_packets = le32_to_cpu(s->txPackets);
970 stats->tx_bytes = le32_to_cpu(s->txBytes);
971 stats->tx_errors = le32_to_cpu(s->txCarrierLost);
972 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
973 stats->collisions = le32_to_cpu(s->txMultipleCollisions);
974 stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
975 stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
976 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
977 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
978 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
979 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
980 stats->rx_length_errors = le32_to_cpu(s->rxOversized);
981 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
982 SPEED_100 : SPEED_10;
983 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
984 DUPLEX_FULL : DUPLEX_HALF;
986 /* add in the saved statistics
988 stats->tx_packets += saved->tx_packets;
989 stats->tx_bytes += saved->tx_bytes;
990 stats->tx_errors += saved->tx_errors;
991 stats->collisions += saved->collisions;
992 stats->rx_packets += saved->rx_packets;
993 stats->rx_bytes += saved->rx_bytes;
994 stats->rx_fifo_errors += saved->rx_fifo_errors;
995 stats->rx_errors += saved->rx_errors;
996 stats->rx_crc_errors += saved->rx_crc_errors;
997 stats->rx_length_errors += saved->rx_length_errors;
1002 static struct net_device_stats *
1003 typhoon_get_stats(struct net_device *dev)
1005 struct typhoon *tp = (struct typhoon *) dev->priv;
1006 struct net_device_stats *stats = &tp->stats;
1007 struct net_device_stats *saved = &tp->stats_saved;
1010 if(tp->card_state == Sleeping)
1013 if(typhoon_do_get_stats(tp) < 0) {
1014 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1022 typhoon_set_mac_address(struct net_device *dev, void *addr)
1024 struct sockaddr *saddr = (struct sockaddr *) addr;
1026 if(netif_running(dev))
1029 memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1034 typhoon_ethtool_gdrvinfo(struct typhoon *tp, struct ethtool_drvinfo *info)
1036 struct pci_dev *pci_dev = tp->pdev;
1037 struct cmd_desc xp_cmd;
1038 struct resp_desc xp_resp[3];
1041 if(tp->card_state == Sleeping) {
1042 strcpy(info->fw_version, "Sleep image");
1044 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1045 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1046 strcpy(info->fw_version, "Unknown runtime");
1048 strncpy(info->fw_version, (char *) &xp_resp[1], 32);
1049 info->fw_version[31] = 0;
1053 strcpy(info->driver, DRV_MODULE_NAME);
1054 strcpy(info->version, DRV_MODULE_VERSION);
1055 strcpy(info->bus_info, pci_name(pci_dev));
1059 typhoon_ethtool_gset(struct typhoon *tp, struct ethtool_cmd *cmd)
1061 cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1064 switch (tp->xcvr_select) {
1065 case TYPHOON_XCVR_10HALF:
1066 cmd->advertising = ADVERTISED_10baseT_Half;
1068 case TYPHOON_XCVR_10FULL:
1069 cmd->advertising = ADVERTISED_10baseT_Full;
1071 case TYPHOON_XCVR_100HALF:
1072 cmd->advertising = ADVERTISED_100baseT_Half;
1074 case TYPHOON_XCVR_100FULL:
1075 cmd->advertising = ADVERTISED_100baseT_Full;
1077 case TYPHOON_XCVR_AUTONEG:
1078 cmd->advertising = ADVERTISED_10baseT_Half |
1079 ADVERTISED_10baseT_Full |
1080 ADVERTISED_100baseT_Half |
1081 ADVERTISED_100baseT_Full |
1086 if(tp->capabilities & TYPHOON_FIBER) {
1087 cmd->supported |= SUPPORTED_FIBRE;
1088 cmd->advertising |= ADVERTISED_FIBRE;
1089 cmd->port = PORT_FIBRE;
1091 cmd->supported |= SUPPORTED_10baseT_Half |
1092 SUPPORTED_10baseT_Full |
1094 cmd->advertising |= ADVERTISED_TP;
1095 cmd->port = PORT_TP;
1098 /* need to get stats to make these link speed/duplex valid */
1099 typhoon_do_get_stats(tp);
1100 cmd->speed = tp->speed;
1101 cmd->duplex = tp->duplex;
1102 cmd->phy_address = 0;
1103 cmd->transceiver = XCVR_INTERNAL;
1104 if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1105 cmd->autoneg = AUTONEG_ENABLE;
1107 cmd->autoneg = AUTONEG_DISABLE;
1113 typhoon_ethtool_sset(struct typhoon *tp, struct ethtool_cmd *cmd)
1115 struct cmd_desc xp_cmd;
1119 if(cmd->autoneg == AUTONEG_ENABLE) {
1120 xcvr = TYPHOON_XCVR_AUTONEG;
1122 if(cmd->duplex == DUPLEX_HALF) {
1123 if(cmd->speed == SPEED_10)
1124 xcvr = TYPHOON_XCVR_10HALF;
1125 else if(cmd->speed == SPEED_100)
1126 xcvr = TYPHOON_XCVR_100HALF;
1129 } else if(cmd->duplex == DUPLEX_FULL) {
1130 if(cmd->speed == SPEED_10)
1131 xcvr = TYPHOON_XCVR_10FULL;
1132 else if(cmd->speed == SPEED_100)
1133 xcvr = TYPHOON_XCVR_100FULL;
1140 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1141 xp_cmd.parm1 = cpu_to_le16(xcvr);
1142 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1146 tp->xcvr_select = xcvr;
1147 if(cmd->autoneg == AUTONEG_ENABLE) {
1148 tp->speed = 0xff; /* invalid */
1149 tp->duplex = 0xff; /* invalid */
1151 tp->speed = cmd->speed;
1152 tp->duplex = cmd->duplex;
1159 typhoon_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
1161 struct typhoon *tp = (struct typhoon *) dev->priv;
1164 if(copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
1168 case ETHTOOL_GDRVINFO: {
1169 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1171 typhoon_ethtool_gdrvinfo(tp, &info);
1172 if(copy_to_user(useraddr, &info, sizeof(info)))
1176 case ETHTOOL_GSET: {
1177 struct ethtool_cmd cmd = { ETHTOOL_GSET };
1179 typhoon_ethtool_gset(tp, &cmd);
1180 if(copy_to_user(useraddr, &cmd, sizeof(cmd)))
1184 case ETHTOOL_SSET: {
1185 struct ethtool_cmd cmd;
1186 if(copy_from_user(&cmd, useraddr, sizeof(cmd)))
1189 return typhoon_ethtool_sset(tp, &cmd);
1191 case ETHTOOL_GLINK:{
1192 struct ethtool_value edata = { ETHTOOL_GLINK };
1194 edata.data = netif_carrier_ok(dev) ? 1 : 0;
1195 if(copy_to_user(useraddr, &edata, sizeof(edata)))
1199 case ETHTOOL_GWOL: {
1200 struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
1202 if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1203 wol.wolopts |= WAKE_PHY;
1204 if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1205 wol.wolopts |= WAKE_MAGIC;
1206 if(copy_to_user(useraddr, &wol, sizeof(wol)))
1210 case ETHTOOL_SWOL: {
1211 struct ethtool_wolinfo wol;
1213 if(copy_from_user(&wol, useraddr, sizeof(wol)))
1216 if(wol.wolopts & WAKE_PHY)
1217 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1218 if(wol.wolopts & WAKE_MAGIC)
1219 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1230 typhoon_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1234 return typhoon_ethtool_ioctl(dev, ifr->ifr_data);
1243 typhoon_wait_interrupt(void __iomem *ioaddr)
1247 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1248 if(readl(ioaddr + TYPHOON_REG_INTR_STATUS) &
1249 TYPHOON_INTR_BOOTCMD)
1251 udelay(TYPHOON_UDELAY);
1257 writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1261 #define shared_offset(x) offsetof(struct typhoon_shared, x)
1264 typhoon_init_interface(struct typhoon *tp)
1266 struct typhoon_interface *iface = &tp->shared->iface;
1267 dma_addr_t shared_dma;
1269 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1271 /* The *Hi members of iface are all init'd to zero by the memset().
1273 shared_dma = tp->shared_dma + shared_offset(indexes);
1274 iface->ringIndex = cpu_to_le32(shared_dma);
1276 shared_dma = tp->shared_dma + shared_offset(txLo);
1277 iface->txLoAddr = cpu_to_le32(shared_dma);
1278 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1280 shared_dma = tp->shared_dma + shared_offset(txHi);
1281 iface->txHiAddr = cpu_to_le32(shared_dma);
1282 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1284 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1285 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1286 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1287 sizeof(struct rx_free));
1289 shared_dma = tp->shared_dma + shared_offset(rxLo);
1290 iface->rxLoAddr = cpu_to_le32(shared_dma);
1291 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1293 shared_dma = tp->shared_dma + shared_offset(rxHi);
1294 iface->rxHiAddr = cpu_to_le32(shared_dma);
1295 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1297 shared_dma = tp->shared_dma + shared_offset(cmd);
1298 iface->cmdAddr = cpu_to_le32(shared_dma);
1299 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1301 shared_dma = tp->shared_dma + shared_offset(resp);
1302 iface->respAddr = cpu_to_le32(shared_dma);
1303 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1305 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1306 iface->zeroAddr = cpu_to_le32(shared_dma);
1308 tp->indexes = &tp->shared->indexes;
1309 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1310 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1311 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1312 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1313 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1314 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1315 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1317 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1318 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1320 tp->txlo_dma_addr = iface->txLoAddr;
1321 tp->card_state = Sleeping;
1324 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1325 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1327 spin_lock_init(&tp->command_lock);
1328 spin_lock_init(&tp->state_lock);
1332 typhoon_init_rings(struct typhoon *tp)
1334 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1336 tp->txLoRing.lastWrite = 0;
1337 tp->txHiRing.lastWrite = 0;
1338 tp->rxLoRing.lastWrite = 0;
1339 tp->rxHiRing.lastWrite = 0;
1340 tp->rxBuffRing.lastWrite = 0;
1341 tp->cmdRing.lastWrite = 0;
1342 tp->cmdRing.lastWrite = 0;
1344 tp->txLoRing.lastRead = 0;
1345 tp->txHiRing.lastRead = 0;
1349 typhoon_download_firmware(struct typhoon *tp)
1351 void __iomem *ioaddr = tp->ioaddr;
1352 struct pci_dev *pdev = tp->pdev;
1353 struct typhoon_file_header *fHdr;
1354 struct typhoon_section_header *sHdr;
1357 dma_addr_t dpage_dma;
1370 fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1371 image_data = (u8 *) fHdr;
1373 if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1374 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1378 /* Cannot just map the firmware image using pci_map_single() as
1379 * the firmware is part of the kernel/module image, so we allocate
1380 * some consistent memory to copy the sections into, as it is simpler,
1381 * and short-lived. If we ever split out and require a userland
1382 * firmware loader, then we can revisit this.
1385 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1387 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1391 irqEnabled = readl(ioaddr + TYPHOON_REG_INTR_ENABLE);
1392 writel(irqEnabled | TYPHOON_INTR_BOOTCMD,
1393 ioaddr + TYPHOON_REG_INTR_ENABLE);
1394 irqMasked = readl(ioaddr + TYPHOON_REG_INTR_MASK);
1395 writel(irqMasked | TYPHOON_INTR_BOOTCMD,
1396 ioaddr + TYPHOON_REG_INTR_MASK);
1399 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1400 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1404 numSections = le32_to_cpu(fHdr->numSections);
1405 load_addr = le32_to_cpu(fHdr->startAddr);
1407 writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1408 writel(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1409 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1410 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1411 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1412 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1413 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1414 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1415 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1416 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1417 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1418 writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1419 typhoon_post_pci_writes(ioaddr);
1420 writel(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1422 image_data += sizeof(struct typhoon_file_header);
1424 /* The readl() in typhoon_wait_interrupt() will force the
1425 * last write to the command register to post, so
1426 * we don't need a typhoon_post_pci_writes() after it.
1428 for(i = 0; i < numSections; i++) {
1429 sHdr = (struct typhoon_section_header *) image_data;
1430 image_data += sizeof(struct typhoon_section_header);
1431 load_addr = le32_to_cpu(sHdr->startAddr);
1432 section_len = le32_to_cpu(sHdr->len);
1434 while(section_len) {
1435 len = min_t(u32, section_len, PAGE_SIZE);
1437 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1438 readl(ioaddr + TYPHOON_REG_STATUS) !=
1439 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1440 printk(KERN_ERR "%s: segment ready timeout\n",
1445 /* Do an pseudo IPv4 checksum on the data -- first
1446 * need to convert each u16 to cpu order before
1447 * summing. Fortunately, due to the properties of
1448 * the checksum, we can do this once, at the end.
1450 csum = csum_partial_copy_nocheck(image_data, dpage,
1452 csum = csum_fold(csum);
1453 csum = le16_to_cpu(csum);
1455 writel(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1456 writel(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1457 writel(load_addr, ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1458 writel(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1459 writel(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1460 typhoon_post_pci_writes(ioaddr);
1461 writel(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1462 ioaddr + TYPHOON_REG_COMMAND);
1470 if(typhoon_wait_interrupt(ioaddr) < 0 ||
1471 readl(ioaddr + TYPHOON_REG_STATUS) !=
1472 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1473 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1477 writel(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1479 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1480 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1481 tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1488 writel(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1489 writel(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1491 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1498 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1500 void __iomem *ioaddr = tp->ioaddr;
1502 if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1503 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1507 writel(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1508 writel(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1509 typhoon_post_pci_writes(ioaddr);
1510 writel(TYPHOON_BOOTCMD_REG_BOOT_RECORD, ioaddr + TYPHOON_REG_COMMAND);
1512 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1513 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1514 tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1518 /* Clear the Transmit and Command ready registers
1520 writel(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1521 writel(0, ioaddr + TYPHOON_REG_CMD_READY);
1522 writel(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1523 typhoon_post_pci_writes(ioaddr);
1524 writel(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1533 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1534 volatile u32 * index)
1536 u32 lastRead = txRing->lastRead;
1542 while(lastRead != le32_to_cpu(*index)) {
1543 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1544 type = tx->flags & TYPHOON_TYPE_MASK;
1546 if(type == TYPHOON_TX_DESC) {
1547 /* This tx_desc describes a packet.
1549 unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1550 struct sk_buff *skb = (struct sk_buff *) ptr;
1551 dev_kfree_skb_irq(skb);
1552 } else if(type == TYPHOON_FRAG_DESC) {
1553 /* This tx_desc describes a memory mapping. Free it.
1555 skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1556 dma_len = le16_to_cpu(tx->len);
1557 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1562 typhoon_inc_tx_index(&lastRead, 1);
1569 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1570 volatile u32 * index)
1573 int numDesc = MAX_SKB_FRAGS + 1;
1575 /* This will need changing if we start to use the Hi Tx ring. */
1576 lastRead = typhoon_clean_tx(tp, txRing, index);
1577 if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1578 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1579 netif_wake_queue(tp->dev);
1581 txRing->lastRead = lastRead;
1586 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1588 struct typhoon_indexes *indexes = tp->indexes;
1589 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1590 struct basic_ring *ring = &tp->rxBuffRing;
1593 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1594 indexes->rxBuffCleared) {
1595 /* no room in ring, just drop the skb
1597 dev_kfree_skb_any(rxb->skb);
1602 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1603 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1605 r->physAddr = cpu_to_le32(rxb->dma_addr);
1607 /* Tell the card about it */
1609 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1613 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1615 struct typhoon_indexes *indexes = tp->indexes;
1616 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1617 struct basic_ring *ring = &tp->rxBuffRing;
1619 struct sk_buff *skb;
1620 dma_addr_t dma_addr;
1624 if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1625 indexes->rxBuffCleared)
1628 skb = dev_alloc_skb(PKT_BUF_SZ);
1633 /* Please, 3com, fix the firmware to allow DMA to a unaligned
1634 * address! Pretty please?
1636 skb_reserve(skb, 2);
1640 dma_addr = pci_map_single(tp->pdev, skb->tail,
1641 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1643 /* Since no card does 64 bit DAC, the high bits will never
1646 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1647 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1649 r->physAddr = cpu_to_le32(dma_addr);
1651 rxb->dma_addr = dma_addr;
1653 /* Tell the card about it */
1655 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1660 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1661 volatile u32 * cleared, int budget)
1664 struct sk_buff *skb, *new_skb;
1665 struct rxbuff_ent *rxb;
1666 dma_addr_t dma_addr;
1675 local_ready = le32_to_cpu(*ready);
1676 rxaddr = le32_to_cpu(*cleared);
1677 while(rxaddr != local_ready && budget > 0) {
1678 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1680 rxb = &tp->rxbuffers[idx];
1682 dma_addr = rxb->dma_addr;
1684 rxaddr += sizeof(struct rx_desc);
1685 rxaddr %= RX_ENTRIES * sizeof(struct rx_desc);
1687 if(rx->flags & TYPHOON_RX_ERROR) {
1688 typhoon_recycle_rx_skb(tp, idx);
1692 pkt_len = le16_to_cpu(rx->frameLen);
1694 if(pkt_len < rx_copybreak &&
1695 (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1696 new_skb->dev = tp->dev;
1697 skb_reserve(new_skb, 2);
1698 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1700 PCI_DMA_FROMDEVICE);
1701 eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
1702 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1704 PCI_DMA_FROMDEVICE);
1705 skb_put(new_skb, pkt_len);
1706 typhoon_recycle_rx_skb(tp, idx);
1709 skb_put(new_skb, pkt_len);
1710 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1711 PCI_DMA_FROMDEVICE);
1712 typhoon_alloc_rx_skb(tp, idx);
1714 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1715 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1716 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1718 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1720 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1721 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1723 new_skb->ip_summed = CHECKSUM_NONE;
1725 spin_lock(&tp->state_lock);
1726 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1727 vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1728 ntohl(rx->vlanTag) & 0xffff);
1730 netif_receive_skb(new_skb);
1731 spin_unlock(&tp->state_lock);
1733 tp->dev->last_rx = jiffies;
1737 *cleared = cpu_to_le32(rxaddr);
1743 typhoon_fill_free_ring(struct typhoon *tp)
1747 for(i = 0; i < RXENT_ENTRIES; i++) {
1748 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1751 if(typhoon_alloc_rx_skb(tp, i) < 0)
1757 typhoon_poll(struct net_device *dev, int *total_budget)
1759 struct typhoon *tp = (struct typhoon *) dev->priv;
1760 struct typhoon_indexes *indexes = tp->indexes;
1761 int orig_budget = *total_budget;
1762 int budget, work_done, done;
1765 if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1766 typhoon_process_response(tp, 0, NULL);
1768 if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1769 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1771 if(orig_budget > dev->quota)
1772 orig_budget = dev->quota;
1774 budget = orig_budget;
1778 if(indexes->rxHiCleared != indexes->rxHiReady) {
1779 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1780 &indexes->rxHiCleared, budget);
1781 budget -= work_done;
1784 if(indexes->rxLoCleared != indexes->rxLoReady) {
1785 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1786 &indexes->rxLoCleared, budget);
1790 *total_budget -= work_done;
1791 dev->quota -= work_done;
1793 if(work_done >= orig_budget)
1797 if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1798 /* rxBuff ring is empty, try to fill it. */
1799 typhoon_fill_free_ring(tp);
1803 netif_rx_complete(dev);
1804 writel(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK);
1805 typhoon_post_pci_writes(tp->ioaddr);
1808 return (done ? 0 : 1);
1812 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1814 struct net_device *dev = (struct net_device *) dev_instance;
1815 struct typhoon *tp = dev->priv;
1816 void __iomem *ioaddr = tp->ioaddr;
1819 intr_status = readl(ioaddr + TYPHOON_REG_INTR_STATUS);
1820 if(!(intr_status & TYPHOON_INTR_HOST_INT))
1823 writel(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1825 if(netif_rx_schedule_prep(dev)) {
1826 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1827 typhoon_post_pci_writes(ioaddr);
1828 __netif_rx_schedule(dev);
1830 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1837 typhoon_free_rx_rings(struct typhoon *tp)
1841 for(i = 0; i < RXENT_ENTRIES; i++) {
1842 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1844 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1845 PCI_DMA_FROMDEVICE);
1846 dev_kfree_skb(rxb->skb);
1853 typhoon_sleep(struct typhoon *tp, int state, u16 events)
1855 struct pci_dev *pdev = tp->pdev;
1856 void __iomem *ioaddr = tp->ioaddr;
1857 struct cmd_desc xp_cmd;
1860 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1861 xp_cmd.parm1 = events;
1862 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1864 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1869 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1870 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1872 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1877 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1880 /* Since we cannot monitor the status of the link while sleeping,
1881 * tell the world it went away.
1883 netif_carrier_off(tp->dev);
1885 pci_enable_wake(tp->pdev, state, 1);
1886 pci_disable_device(pdev);
1887 return pci_set_power_state(pdev, state);
1891 typhoon_wakeup(struct typhoon *tp, int wait_type)
1893 struct pci_dev *pdev = tp->pdev;
1894 void __iomem *ioaddr = tp->ioaddr;
1896 pci_set_power_state(pdev, 0);
1897 pci_restore_state(pdev, tp->pci_state);
1899 /* Post 2.x.x versions of the Sleep Image require a reset before
1900 * we can download the Runtime Image. But let's not make users of
1901 * the old firmware pay for the reset.
1903 writel(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1904 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1905 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1906 return typhoon_reset(ioaddr, wait_type);
1912 typhoon_start_runtime(struct typhoon *tp)
1914 struct net_device *dev = tp->dev;
1915 void __iomem *ioaddr = tp->ioaddr;
1916 struct cmd_desc xp_cmd;
1919 typhoon_init_rings(tp);
1920 typhoon_fill_free_ring(tp);
1922 err = typhoon_download_firmware(tp);
1924 printk("%s: cannot load runtime on 3XP\n", tp->name);
1928 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1929 printk("%s: cannot boot 3XP\n", tp->name);
1934 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1935 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1936 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1940 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1941 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1942 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1943 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1947 /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1948 * us some more information on how to control it.
1950 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1952 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1956 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1957 xp_cmd.parm1 = tp->xcvr_select;
1958 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1962 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1963 xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1964 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1968 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1969 spin_lock_bh(&tp->state_lock);
1970 xp_cmd.parm2 = tp->offload;
1971 xp_cmd.parm3 = tp->offload;
1972 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1973 spin_unlock_bh(&tp->state_lock);
1977 typhoon_set_rx_mode(dev);
1979 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1980 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1984 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1985 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1989 tp->card_state = Running;
1992 writel(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1993 writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1994 typhoon_post_pci_writes(ioaddr);
1999 typhoon_reset(ioaddr, WaitNoSleep);
2000 typhoon_free_rx_rings(tp);
2001 typhoon_init_rings(tp);
2006 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2008 struct typhoon_indexes *indexes = tp->indexes;
2009 struct transmit_ring *txLo = &tp->txLoRing;
2010 void __iomem *ioaddr = tp->ioaddr;
2011 struct cmd_desc xp_cmd;
2014 /* Disable interrupts early, since we can't schedule a poll
2015 * when called with !netif_running(). This will be posted
2016 * when we force the posting of the command.
2018 writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2020 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2021 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2023 /* Wait 1/2 sec for any outstanding transmits to occur
2024 * We'll cleanup after the reset if this times out.
2026 for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2027 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2029 udelay(TYPHOON_UDELAY);
2032 if(i == TYPHOON_WAIT_TIMEOUT)
2034 "%s: halt timed out waiting for Tx to complete\n",
2037 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2038 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2040 /* save the statistics so when we bring the interface up again,
2041 * the values reported to userspace are correct.
2043 tp->card_state = Sleeping;
2045 typhoon_do_get_stats(tp);
2046 memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2048 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2049 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2051 if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2052 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2055 if(typhoon_reset(ioaddr, wait_type) < 0) {
2056 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2060 /* cleanup any outstanding Tx packets */
2061 if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2062 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2063 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2070 typhoon_tx_timeout(struct net_device *dev)
2072 struct typhoon *tp = (struct typhoon *) dev->priv;
2074 if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2075 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2080 /* If we ever start using the Hi ring, it will need cleaning too */
2081 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2082 typhoon_free_rx_rings(tp);
2084 if(typhoon_start_runtime(tp) < 0) {
2085 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2090 netif_wake_queue(dev);
2094 /* Reset the hardware, and turn off carrier to avoid more timeouts */
2095 typhoon_reset(tp->ioaddr, NoWait);
2096 netif_carrier_off(dev);
2100 typhoon_open(struct net_device *dev)
2102 struct typhoon *tp = (struct typhoon *) dev->priv;
2105 err = typhoon_wakeup(tp, WaitSleep);
2107 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2111 err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
2116 err = typhoon_start_runtime(tp);
2120 netif_start_queue(dev);
2124 free_irq(dev->irq, dev);
2127 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2128 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2130 typhoon_reset(tp->ioaddr, NoWait);
2134 if(typhoon_sleep(tp, 3, 0) < 0)
2135 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2142 typhoon_close(struct net_device *dev)
2144 struct typhoon *tp = (struct typhoon *) dev->priv;
2146 netif_stop_queue(dev);
2148 if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2149 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2151 /* Make sure there is no irq handler running on a different CPU. */
2152 typhoon_synchronize_irq(dev->irq);
2153 free_irq(dev->irq, dev);
2155 typhoon_free_rx_rings(tp);
2156 typhoon_init_rings(tp);
2158 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2159 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2161 if(typhoon_sleep(tp, 3, 0) < 0)
2162 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2169 typhoon_resume(struct pci_dev *pdev)
2171 struct net_device *dev = pci_get_drvdata(pdev);
2172 struct typhoon *tp = (struct typhoon *) dev->priv;
2174 /* If we're down, resume when we are upped.
2176 if(!netif_running(dev))
2179 if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2180 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2185 if(typhoon_start_runtime(tp) < 0) {
2186 printk(KERN_ERR "%s: critical: could not start runtime in "
2187 "resume\n", dev->name);
2191 netif_device_attach(dev);
2192 netif_start_queue(dev);
2196 typhoon_reset(tp->ioaddr, NoWait);
2201 typhoon_suspend(struct pci_dev *pdev, u32 state)
2203 struct net_device *dev = pci_get_drvdata(pdev);
2204 struct typhoon *tp = (struct typhoon *) dev->priv;
2205 struct cmd_desc xp_cmd;
2207 /* If we're down, we're already suspended.
2209 if(!netif_running(dev))
2212 spin_lock_bh(&tp->state_lock);
2213 if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2214 spin_unlock_bh(&tp->state_lock);
2215 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2219 spin_unlock_bh(&tp->state_lock);
2221 netif_device_detach(dev);
2223 if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2224 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2228 typhoon_free_rx_rings(tp);
2229 typhoon_init_rings(tp);
2231 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2232 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2236 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2237 xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2238 xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2239 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2240 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2245 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2246 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2247 if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2248 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2253 if(typhoon_sleep(tp, state, tp->wol_events) < 0) {
2254 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2261 typhoon_resume(pdev);
2266 typhoon_enable_wake(struct pci_dev *pdev, u32 state, int enable)
2268 return pci_enable_wake(pdev, state, enable);
2272 static int __devinit
2273 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2275 static int did_version = 0;
2276 struct net_device *dev;
2278 int card_id = (int) ent->driver_data;
2279 unsigned long ioaddr;
2280 void __iomem *ioaddr_mapped;
2282 dma_addr_t shared_dma;
2283 struct cmd_desc xp_cmd;
2284 struct resp_desc xp_resp[3];
2289 printk(KERN_INFO "%s", version);
2291 dev = alloc_etherdev(sizeof(*tp));
2293 printk(ERR_PFX "%s: unable to alloc new net device\n",
2298 SET_MODULE_OWNER(dev);
2299 SET_NETDEV_DEV(dev, &pdev->dev);
2301 err = pci_enable_device(pdev);
2303 printk(ERR_PFX "%s: unable to enable device\n",
2308 /* If we transitioned from D3->D0 in pci_enable_device(),
2309 * we lost our configuration and need to restore it to the
2310 * conditions at boot.
2312 pci_restore_state(pdev, NULL);
2314 err = pci_set_dma_mask(pdev, 0xffffffffULL);
2316 printk(ERR_PFX "%s: No usable DMA configuration\n",
2321 /* sanity checks, resource #1 is our mmio area
2323 if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2325 "%s: region #1 not a PCI MMIO resource, aborting\n",
2330 if(pci_resource_len(pdev, 1) < 128) {
2331 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2337 err = pci_request_regions(pdev, "typhoon");
2339 printk(ERR_PFX "%s: could not request regions\n",
2344 pci_set_master(pdev);
2347 /* map our MMIO region
2349 ioaddr = pci_resource_start(pdev, 1);
2350 ioaddr_mapped = ioremap(ioaddr, 128);
2351 if (!ioaddr_mapped) {
2352 printk(ERR_PFX "%s: cannot remap MMIO, aborting\n",
2355 goto error_out_regions;
2358 /* allocate pci dma space for rx and tx descriptor rings
2360 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2363 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2366 goto error_out_remap;
2369 dev->irq = pdev->irq;
2371 tp->shared = (struct typhoon_shared *) shared;
2372 tp->shared_dma = shared_dma;
2375 tp->ioaddr = ioaddr_mapped;
2376 tp->tx_ioaddr = ioaddr_mapped;
2379 /* need to be able to restore PCI state after a suspend */
2380 pci_save_state(pdev, tp->pci_state);
2383 * 1) Reset the adapter to clear any bad juju
2384 * 2) Reload the sleep image
2385 * 3) Boot the sleep image
2386 * 4) Get the hardware address.
2387 * 5) Put the card to sleep.
2389 if (typhoon_reset(ioaddr_mapped, WaitSleep) < 0) {
2390 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2395 /* dev->name is not valid until we register, but we need to
2396 * use some common routines to initialize the card. So that those
2397 * routines print the right name, we keep our oun pointer to the name
2399 tp->name = pci_name(pdev);
2401 typhoon_init_interface(tp);
2402 typhoon_init_rings(tp);
2404 if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2405 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2408 goto error_out_reset;
2411 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2412 if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2413 printk(ERR_PFX "%s: cannot read MAC address\n",
2416 goto error_out_reset;
2419 *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2420 *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2422 if(!is_valid_ether_addr(dev->dev_addr)) {
2423 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2424 "aborting\n", pci_name(pdev));
2425 goto error_out_reset;
2428 /* Read the Sleep Image version last, so the response is valid
2429 * later when we print out the version reported.
2431 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2432 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2433 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2435 goto error_out_reset;
2438 tp->capabilities = typhoon_card_info[card_id].capabilities;
2439 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2441 /* Typhoon 1.0 Sleep Images return one response descriptor to the
2442 * READ_VERSIONS command. Those versions are OK after waking up
2443 * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2444 * seem to need a little extra help to get started. Since we don't
2445 * know how to nudge it along, just kick it.
2447 if(xp_resp[0].numDesc != 0)
2448 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2450 if(typhoon_sleep(tp, 3, 0) < 0) {
2451 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2454 goto error_out_reset;
2457 /* The chip-specific entries in the device structure. */
2458 dev->open = typhoon_open;
2459 dev->hard_start_xmit = typhoon_start_tx;
2460 dev->stop = typhoon_close;
2461 dev->set_multicast_list = typhoon_set_rx_mode;
2462 dev->tx_timeout = typhoon_tx_timeout;
2463 dev->poll = typhoon_poll;
2465 dev->watchdog_timeo = TX_TIMEOUT;
2466 dev->get_stats = typhoon_get_stats;
2467 dev->set_mac_address = typhoon_set_mac_address;
2468 dev->do_ioctl = typhoon_ioctl;
2469 dev->vlan_rx_register = typhoon_vlan_rx_register;
2470 dev->vlan_rx_kill_vid = typhoon_vlan_rx_kill_vid;
2472 /* We can handle scatter gather, up to 16 entries, and
2473 * we can do IP checksumming (only version 4, doh...)
2475 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2476 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2477 dev->features |= NETIF_F_TSO;
2479 if(register_netdev(dev) < 0)
2480 goto error_out_reset;
2482 /* fixup our local name */
2483 tp->name = dev->name;
2485 pci_set_drvdata(pdev, dev);
2487 printk(KERN_INFO "%s: %s at 0x%lx, ",
2488 dev->name, typhoon_card_info[card_id].name, ioaddr);
2489 for(i = 0; i < 5; i++)
2490 printk("%2.2x:", dev->dev_addr[i]);
2491 printk("%2.2x\n", dev->dev_addr[i]);
2493 /* xp_resp still contains the response to the READ_VERSIONS command.
2494 * For debugging, let the user know what version he has.
2496 if(xp_resp[0].numDesc == 0) {
2497 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2498 * of version is Month/Day of build.
2500 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2501 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2502 "%02u/%02u/2000\n", dev->name, monthday >> 8,
2504 } else if(xp_resp[0].numDesc == 2) {
2505 /* This is the Typhoon 1.1+ type Sleep Image
2507 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2508 u8 *ver_string = (u8 *) &xp_resp[1];
2510 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2511 "%u.%u.%u.%u %s\n", dev->name, HIPQUAD(sleep_ver),
2514 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2515 "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2516 le32_to_cpu(xp_resp[0].parm2));
2522 typhoon_reset(ioaddr_mapped, NoWait);
2525 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2526 shared, shared_dma);
2528 iounmap(ioaddr_mapped);
2530 pci_release_regions(pdev);
2537 static void __devexit
2538 typhoon_remove_one(struct pci_dev *pdev)
2540 struct net_device *dev = pci_get_drvdata(pdev);
2541 struct typhoon *tp = (struct typhoon *) (dev->priv);
2543 unregister_netdev(dev);
2544 pci_set_power_state(pdev, 0);
2545 pci_restore_state(pdev, tp->pci_state);
2546 typhoon_reset(tp->ioaddr, NoWait);
2547 iounmap(tp->ioaddr);
2548 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2549 tp->shared, tp->shared_dma);
2550 pci_release_regions(pdev);
2551 pci_disable_device(pdev);
2552 pci_set_drvdata(pdev, NULL);
2556 static struct pci_driver typhoon_driver = {
2557 .name = DRV_MODULE_NAME,
2558 .id_table = typhoon_pci_tbl,
2559 .probe = typhoon_init_one,
2560 .remove = __devexit_p(typhoon_remove_one),
2562 .suspend = typhoon_suspend,
2563 .resume = typhoon_resume,
2564 .enable_wake = typhoon_enable_wake,
2571 return pci_module_init(&typhoon_driver);
2575 typhoon_cleanup(void)
2577 pci_unregister_driver(&typhoon_driver);
2580 module_init(typhoon_init);
2581 module_exit(typhoon_cleanup);