VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2003 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36         *) I've not tested multicast. I think it works, but reports welcome.
37         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
38 */
39
40 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
41  * Setting to > 1518 effectively disables this feature.
42  */
43 static int rx_copybreak = 200;
44
45 /* end user-configurable values */
46
47 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
48  */
49 static const int multicast_filter_limit = 32;
50
51 /* Operational parameters that are set at compile time. */
52
53 /* Keep the ring sizes a power of two for compile efficiency.
54  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
55  * Making the Tx ring too large decreases the effectiveness of channel
56  * bonding and packet priority.
57  * There are no ill effects from too-large receive rings.
58  *
59  * We don't currently use the Hi Tx ring so, don't make it very big.
60  *
61  * Beware that if we start using the Hi Tx ring, we will need to change
62  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
63  */
64 #define TXHI_ENTRIES            2
65 #define TXLO_ENTRIES            128
66 #define RX_ENTRIES              32
67 #define COMMAND_ENTRIES         16
68 #define RESPONSE_ENTRIES        32
69
70 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
71 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
72
73 /* The 3XP will preload and remove 64 entries from the free buffer
74  * list, and we need one entry to keep the ring from wrapping, so 
75  * to keep this a power of two, we use 128 entries.
76  */
77 #define RXFREE_ENTRIES          128
78 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
79
80 /* Operational parameters that usually are not changed. */
81
82 /* Time in jiffies before concluding the transmitter is hung. */
83 #define TX_TIMEOUT  (2*HZ)
84
85 #define PKT_BUF_SZ              1536
86
87 #define DRV_MODULE_NAME         "typhoon"
88 #define DRV_MODULE_VERSION      "1.5.3"
89 #define DRV_MODULE_RELDATE      "03/12/15"
90 #define PFX                     DRV_MODULE_NAME ": "
91 #define ERR_PFX                 KERN_ERR PFX
92
93 #include <linux/module.h>
94 #include <linux/kernel.h>
95 #include <linux/string.h>
96 #include <linux/timer.h>
97 #include <linux/errno.h>
98 #include <linux/ioport.h>
99 #include <linux/slab.h>
100 #include <linux/interrupt.h>
101 #include <linux/pci.h>
102 #include <linux/netdevice.h>
103 #include <linux/etherdevice.h>
104 #include <linux/skbuff.h>
105 #include <linux/init.h>
106 #include <linux/delay.h>
107 #include <linux/ethtool.h>
108 #include <linux/if_vlan.h>
109 #include <linux/crc32.h>
110 #include <asm/processor.h>
111 #include <asm/bitops.h>
112 #include <asm/io.h>
113 #include <asm/uaccess.h>
114 #include <linux/in6.h>
115 #include <asm/checksum.h>
116 #include <linux/version.h>
117
118 #include "typhoon.h"
119 #include "typhoon-firmware.h"
120
121 static char version[] __devinitdata =
122     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
123
124 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
125 MODULE_LICENSE("GPL");
126 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
127 MODULE_PARM(rx_copybreak, "i");
128
129 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
130 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
131 #undef NETIF_F_TSO
132 #endif
133
134 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
135 #error TX ring too small!
136 #endif
137
138 struct typhoon_card_info {
139         char *name;
140         int capabilities;
141 };
142
143 #define TYPHOON_CRYPTO_NONE             0x00
144 #define TYPHOON_CRYPTO_DES              0x01
145 #define TYPHOON_CRYPTO_3DES             0x02
146 #define TYPHOON_CRYPTO_VARIABLE         0x04
147 #define TYPHOON_FIBER                   0x08
148 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
149
150 enum typhoon_cards {
151         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
152         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
153         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
154         TYPHOON_FXM,
155 };
156
157 /* directly indexed by enum typhoon_cards, above */
158 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
159         { "3Com Typhoon (3C990-TX)",
160                 TYPHOON_CRYPTO_NONE},
161         { "3Com Typhoon (3CR990-TX-95)",
162                 TYPHOON_CRYPTO_DES},
163         { "3Com Typhoon (3CR990-TX-97)",
164                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
165         { "3Com Typhoon (3C990SVR)",
166                 TYPHOON_CRYPTO_NONE},
167         { "3Com Typhoon (3CR990SVR95)",
168                 TYPHOON_CRYPTO_DES},
169         { "3Com Typhoon (3CR990SVR97)",
170                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
171         { "3Com Typhoon2 (3C990B-TX-M)",
172                 TYPHOON_CRYPTO_VARIABLE},
173         { "3Com Typhoon2 (3C990BSVR)",
174                 TYPHOON_CRYPTO_VARIABLE},
175         { "3Com Typhoon (3CR990-FX-95)",
176                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
177         { "3Com Typhoon (3CR990-FX-97)",
178                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
179         { "3Com Typhoon (3CR990-FX-95 Server)",
180                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
181         { "3Com Typhoon (3CR990-FX-97 Server)",
182                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
183         { "3Com Typhoon2 (3C990B-FX-97)",
184                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
185 };
186
187 /* Notes on the new subsystem numbering scheme:
188  * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
189  * bit 4 indicates if this card has secured firmware (we don't support it)
190  * bit 8 indicates if this is a (0) copper or (1) fiber card
191  * bits 12-16 indicate card type: (0) client and (1) server
192  */
193 static struct pci_device_id typhoon_pci_tbl[] = {
194         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
196         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
198         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
200         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
201           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
202         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
203           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
204         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
205           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
206         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
207           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
208         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
209           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
210         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
211           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
212         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
213           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
214         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
216         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
220         { 0, }
221 };
222 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
223
224 /* Define the shared memory area
225  * Align everything the 3XP will normally be using.
226  * We'll need to move/align txHi if we start using that ring.
227  */
228 #define __3xp_aligned   ____cacheline_aligned
229 struct typhoon_shared {
230         struct typhoon_interface        iface;
231         struct typhoon_indexes          indexes                 __3xp_aligned;
232         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
233         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
234         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
235         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
236         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
237         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
238         u32                             zeroWord;
239         struct tx_desc                  txHi[TXHI_ENTRIES];
240 } __attribute__ ((packed));
241
242 struct rxbuff_ent {
243         struct sk_buff *skb;
244         dma_addr_t      dma_addr;
245 };
246
247 struct typhoon {
248         /* Tx cache line section */
249         struct transmit_ring    txLoRing        ____cacheline_aligned;  
250         struct pci_dev *        tx_pdev;
251         unsigned long           tx_ioaddr;
252         u32                     txlo_dma_addr;
253
254         /* Irq/Rx cache line section */
255         unsigned long           ioaddr          ____cacheline_aligned;
256         struct typhoon_indexes *indexes;
257         u8                      awaiting_resp;
258         u8                      duplex;
259         u8                      speed;
260         u8                      card_state;
261         struct basic_ring       rxLoRing;
262         struct pci_dev *        pdev;
263         struct net_device *     dev;
264         spinlock_t              state_lock;
265         struct vlan_group *     vlgrp;
266         struct basic_ring       rxHiRing;
267         struct basic_ring       rxBuffRing;
268         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
269
270         /* general section */
271         spinlock_t              command_lock    ____cacheline_aligned;
272         struct basic_ring       cmdRing;
273         struct basic_ring       respRing;
274         struct net_device_stats stats;
275         struct net_device_stats stats_saved;
276         const char *            name;
277         struct typhoon_shared * shared;
278         dma_addr_t              shared_dma;
279         u16                     xcvr_select;
280         u16                     wol_events;
281         u32                     offload;
282         u32                     pci_state[16];
283
284         /* unused stuff (future use) */
285         int                     capabilities;
286         struct transmit_ring    txHiRing;
287 };
288
289 enum completion_wait_values {
290         NoWait = 0, WaitNoSleep, WaitSleep,
291 };
292
293 /* These are the values for the typhoon.card_state variable.
294  * These determine where the statistics will come from in get_stats().
295  * The sleep image does not support the statistics we need.
296  */
297 enum state_values {
298         Sleeping = 0, Running,
299 };
300
301 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
302  * cannot pass a read, so this forces current writes to post.
303  */
304 #define typhoon_post_pci_writes(x) \
305         do { readl(x + TYPHOON_REG_HEARTBEAT); } while(0)
306
307 /* We'll wait up to six seconds for a reset, and half a second normally.
308  */
309 #define TYPHOON_UDELAY                  50
310 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
311 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
312 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
313
314 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
315 #define typhoon_synchronize_irq(x) synchronize_irq()
316 #else
317 #define typhoon_synchronize_irq(x) synchronize_irq(x)
318 #endif
319
320 #if defined(NETIF_F_TSO)
321 #define skb_tso_size(x)         (skb_shinfo(x)->tso_size)
322 #define TSO_NUM_DESCRIPTORS     2
323 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
324 #else
325 #define NETIF_F_TSO             0
326 #define skb_tso_size(x)         0
327 #define TSO_NUM_DESCRIPTORS     0
328 #define TSO_OFFLOAD_ON          0
329 #endif
330
331 static inline void
332 typhoon_inc_index(u32 *index, const int count, const int num_entries)
333 {
334         /* Increment a ring index -- we can use this for all rings execept
335          * the Rx rings, as they use different size descriptors
336          * otherwise, everything is the same size as a cmd_desc
337          */
338         *index += count * sizeof(struct cmd_desc);
339         *index %= num_entries * sizeof(struct cmd_desc);
340 }
341
342 static inline void
343 typhoon_inc_cmd_index(u32 *index, const int count)
344 {
345         typhoon_inc_index(index, count, COMMAND_ENTRIES);
346 }
347
348 static inline void
349 typhoon_inc_resp_index(u32 *index, const int count)
350 {
351         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
352 }
353
354 static inline void
355 typhoon_inc_rxfree_index(u32 *index, const int count)
356 {
357         typhoon_inc_index(index, count, RXFREE_ENTRIES);
358 }
359
360 static inline void
361 typhoon_inc_tx_index(u32 *index, const int count)
362 {
363         /* if we start using the Hi Tx ring, this needs updateing */
364         typhoon_inc_index(index, count, TXLO_ENTRIES);
365 }
366
367 static inline void
368 typhoon_inc_rx_index(u32 *index, const int count)
369 {
370         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
371         *index += count * sizeof(struct rx_desc);
372         *index %= RX_ENTRIES * sizeof(struct rx_desc);
373 }
374
375 static int
376 typhoon_reset(unsigned long ioaddr, int wait_type)
377 {
378         int i, err = 0;
379         int timeout;
380
381         if(wait_type == WaitNoSleep)
382                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
383         else
384                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
385
386         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
387         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
388
389         writel(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
390         typhoon_post_pci_writes(ioaddr);
391         udelay(1);
392         writel(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
393
394         if(wait_type != NoWait) {
395                 for(i = 0; i < timeout; i++) {
396                         if(readl(ioaddr + TYPHOON_REG_STATUS) ==
397                            TYPHOON_STATUS_WAITING_FOR_HOST)
398                                 goto out;
399
400                         if(wait_type == WaitSleep) {
401                                 set_current_state(TASK_UNINTERRUPTIBLE);
402                                 schedule_timeout(1);
403                         } else
404                                 udelay(TYPHOON_UDELAY);
405                 }
406
407                 err = -ETIMEDOUT;
408         }
409
410 out:
411         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
412         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
413         udelay(100);
414         return err;
415
416         /* The 3XP seems to need a little extra time to complete the load
417          * of the sleep image before we can reliably boot it. Failure to
418          * do this occasionally results in a hung adapter after boot in
419          * typhoon_init_one() while trying to read the MAC address or
420          * putting the card to sleep. 3Com's driver waits 5ms, but
421          * that seems to be overkill -- with a 50usec delay, it survives
422          * 35000 typhoon_init_one() calls, where it only make it 25-100
423          * without it.
424          *
425          * As it turns out, still occasionally getting a hung adapter,
426          * so I'm bumping it to 100us.
427          */
428 }
429
430 static int
431 typhoon_wait_status(unsigned long ioaddr, u32 wait_value)
432 {
433         int i, err = 0;
434
435         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
436                 if(readl(ioaddr + TYPHOON_REG_STATUS) == wait_value)
437                         goto out;
438                 udelay(TYPHOON_UDELAY);
439         }
440
441         err = -ETIMEDOUT;
442
443 out:
444         return err;
445 }
446
447 static inline void
448 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
449 {
450         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
451                 netif_carrier_off(dev);
452         else
453                 netif_carrier_on(dev);
454 }
455
456 static inline void
457 typhoon_hello(struct typhoon *tp)
458 {
459         struct basic_ring *ring = &tp->cmdRing;
460         struct cmd_desc *cmd;
461
462         /* We only get a hello request if we've not sent anything to the
463          * card in a long while. If the lock is held, then we're in the
464          * process of issuing a command, so we don't need to respond.
465          */
466         if(spin_trylock(&tp->command_lock)) {
467                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
468                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
469
470                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
471                 smp_wmb();
472                 writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
473                 spin_unlock(&tp->command_lock);
474         }
475 }
476
477 static int
478 typhoon_process_response(struct typhoon *tp, int resp_size,
479                                 struct resp_desc *resp_save)
480 {
481         struct typhoon_indexes *indexes = tp->indexes;
482         struct resp_desc *resp;
483         u8 *base = tp->respRing.ringBase;
484         int count, len, wrap_len;
485         u32 cleared;
486         u32 ready;
487
488         cleared = le32_to_cpu(indexes->respCleared);
489         ready = le32_to_cpu(indexes->respReady);
490         while(cleared != ready) {
491                 resp = (struct resp_desc *)(base + cleared);
492                 count = resp->numDesc + 1;
493                 if(resp_save && resp->seqNo) {
494                         if(count > resp_size) {
495                                 resp_save->flags = TYPHOON_RESP_ERROR;
496                                 goto cleanup;
497                         }
498
499                         wrap_len = 0;
500                         len = count * sizeof(*resp);
501                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
502                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
503                                 len = RESPONSE_RING_SIZE - cleared;
504                         }
505
506                         memcpy(resp_save, resp, len);
507                         if(unlikely(wrap_len)) {
508                                 resp_save += len / sizeof(*resp);
509                                 memcpy(resp_save, base, wrap_len);
510                         }
511
512                         resp_save = NULL;
513                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
514                         typhoon_media_status(tp->dev, resp);
515                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
516                         typhoon_hello(tp);
517                 } else {
518                         printk(KERN_ERR "%s: dumping unexpected response "
519                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
520                                tp->name, le16_to_cpu(resp->cmd),
521                                resp->numDesc, resp->flags,
522                                le16_to_cpu(resp->parm1),
523                                le32_to_cpu(resp->parm2),
524                                le32_to_cpu(resp->parm3));
525                 }
526
527 cleanup:
528                 typhoon_inc_resp_index(&cleared, count);
529         }
530
531         indexes->respCleared = cpu_to_le32(cleared);
532         wmb();
533         return (resp_save == NULL);
534 }
535
536 static inline int
537 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
538 {
539         /* this works for all descriptors but rx_desc, as they are a
540          * different size than the cmd_desc -- everyone else is the same
541          */
542         lastWrite /= sizeof(struct cmd_desc);
543         lastRead /= sizeof(struct cmd_desc);
544         return (ringSize + lastRead - lastWrite - 1) % ringSize;
545 }
546
547 static inline int
548 typhoon_num_free_cmd(struct typhoon *tp)
549 {
550         int lastWrite = tp->cmdRing.lastWrite;
551         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
552
553         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
554 }
555
556 static inline int
557 typhoon_num_free_resp(struct typhoon *tp)
558 {
559         int respReady = le32_to_cpu(tp->indexes->respReady);
560         int respCleared = le32_to_cpu(tp->indexes->respCleared);
561
562         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
563 }
564
565 static inline int
566 typhoon_num_free_tx(struct transmit_ring *ring)
567 {
568         /* if we start using the Hi Tx ring, this needs updating */
569         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
570 }
571
572 static int
573 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
574                       int num_resp, struct resp_desc *resp)
575 {
576         struct typhoon_indexes *indexes = tp->indexes;
577         struct basic_ring *ring = &tp->cmdRing;
578         struct resp_desc local_resp;
579         int i, err = 0;
580         int got_resp;
581         int freeCmd, freeResp;
582         int len, wrap_len;
583
584         spin_lock(&tp->command_lock);
585
586         freeCmd = typhoon_num_free_cmd(tp);
587         freeResp = typhoon_num_free_resp(tp);
588
589         if(freeCmd < num_cmd || freeResp < num_resp) {
590                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
591                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
592                         freeResp, num_resp);
593                 err = -ENOMEM;
594                 goto out;
595         }
596
597         if(cmd->flags & TYPHOON_CMD_RESPOND) {
598                 /* If we're expecting a response, but the caller hasn't given
599                  * us a place to put it, we'll provide one.
600                  */
601                 tp->awaiting_resp = 1;
602                 if(resp == NULL) {
603                         resp = &local_resp;
604                         num_resp = 1;
605                 }
606         }
607
608         wrap_len = 0;
609         len = num_cmd * sizeof(*cmd);
610         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
611                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
612                 len = COMMAND_RING_SIZE - ring->lastWrite;
613         }
614
615         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
616         if(unlikely(wrap_len)) {
617                 struct cmd_desc *wrap_ptr = cmd;
618                 wrap_ptr += len / sizeof(*cmd);
619                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
620         }
621
622         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
623
624         /* "I feel a presence... another warrior is on the the mesa."
625          */
626         wmb();
627         writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
628         typhoon_post_pci_writes(tp->ioaddr);
629
630         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
631                 goto out;
632
633         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
634          * preempt or do anything other than take interrupts. So, don't
635          * wait for a response unless you have to.
636          *
637          * I've thought about trying to sleep here, but we're called
638          * from many contexts that don't allow that. Also, given the way
639          * 3Com has implemented irq coalescing, we would likely timeout --
640          * this has been observed in real life!
641          *
642          * The big killer is we have to wait to get stats from the card,
643          * though we could go to a periodic refresh of those if we don't
644          * mind them getting somewhat stale. The rest of the waiting
645          * commands occur during open/close/suspend/resume, so they aren't
646          * time critical. Creating SAs in the future will also have to
647          * wait here.
648          */
649         got_resp = 0;
650         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
651                 if(indexes->respCleared != indexes->respReady)
652                         got_resp = typhoon_process_response(tp, num_resp,
653                                                                 resp);
654                 udelay(TYPHOON_UDELAY);
655         }
656
657         if(!got_resp) {
658                 err = -ETIMEDOUT;
659                 goto out;
660         }
661
662         /* Collect the error response even if we don't care about the
663          * rest of the response
664          */
665         if(resp->flags & TYPHOON_RESP_ERROR)
666                 err = -EIO;
667
668 out:
669         if(tp->awaiting_resp) {
670                 tp->awaiting_resp = 0;
671                 smp_wmb();
672
673                 /* Ugh. If a response was added to the ring between
674                  * the call to typhoon_process_response() and the clearing
675                  * of tp->awaiting_resp, we could have missed the interrupt
676                  * and it could hang in the ring an indeterminate amount of
677                  * time. So, check for it, and interrupt ourselves if this
678                  * is the case.
679                  */
680                 if(indexes->respCleared != indexes->respReady)
681                         writel(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
682         }
683
684         spin_unlock(&tp->command_lock);
685         return err;
686 }
687
688 static void
689 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
690 {
691         struct typhoon *tp = (struct typhoon *) dev->priv;
692         struct cmd_desc xp_cmd;
693         int err;
694
695         spin_lock_bh(&tp->state_lock);
696         if(!tp->vlgrp != !grp) {
697                 /* We've either been turned on for the first time, or we've
698                  * been turned off. Update the 3XP.
699                  */
700                 if(grp)
701                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
702                 else
703                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
704
705                 /* If the interface is up, the runtime is running -- and we
706                  * must be up for the vlan core to call us.
707                  *
708                  * Do the command outside of the spin lock, as it is slow.
709                  */
710                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
711                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
712                 xp_cmd.parm2 = tp->offload;
713                 xp_cmd.parm3 = tp->offload;
714                 spin_unlock_bh(&tp->state_lock);
715                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
716                 if(err < 0)
717                         printk("%s: vlan offload error %d\n", tp->name, -err);
718                 spin_lock_bh(&tp->state_lock);
719         }
720
721         /* now make the change visible */
722         tp->vlgrp = grp;
723         spin_unlock_bh(&tp->state_lock);
724 }
725
726 static void
727 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
728 {
729         struct typhoon *tp = (struct typhoon *) dev->priv;
730         spin_lock_bh(&tp->state_lock);
731         if(tp->vlgrp)
732                 tp->vlgrp->vlan_devices[vid] = NULL;
733         spin_unlock_bh(&tp->state_lock);
734 }
735
736 static inline void
737 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
738                         u32 ring_dma)
739 {
740         struct tcpopt_desc *tcpd;
741         u32 tcpd_offset = ring_dma;
742
743         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
744         tcpd_offset += txRing->lastWrite;
745         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
746         typhoon_inc_tx_index(&txRing->lastWrite, 1);
747
748         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
749         tcpd->numDesc = 1;
750         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
751         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
752         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
753         tcpd->bytesTx = cpu_to_le32(skb->len);
754         tcpd->status = 0;
755 }
756
757 static int
758 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
759 {
760         struct typhoon *tp = (struct typhoon *) dev->priv;
761         struct transmit_ring *txRing;
762         struct tx_desc *txd, *first_txd;
763         dma_addr_t skb_dma;
764         int numDesc;
765
766         /* we have two rings to choose from, but we only use txLo for now
767          * If we start using the Hi ring as well, we'll need to update
768          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
769          * and TXHI_ENTIRES to match, as well as update the TSO code below
770          * to get the right DMA address
771          */
772         txRing = &tp->txLoRing;
773
774         /* We need one descriptor for each fragment of the sk_buff, plus the
775          * one for the ->data area of it.
776          *
777          * The docs say a maximum of 16 fragment descriptors per TCP option
778          * descriptor, then make a new packet descriptor and option descriptor
779          * for the next 16 fragments. The engineers say just an option
780          * descriptor is needed. I've tested up to 26 fragments with a single
781          * packet descriptor/option descriptor combo, so I use that for now.
782          *
783          * If problems develop with TSO, check this first.
784          */
785         numDesc = skb_shinfo(skb)->nr_frags + 1;
786         if(skb_tso_size(skb))
787                 numDesc++;
788
789         /* When checking for free space in the ring, we need to also
790          * account for the initial Tx descriptor, and we always must leave
791          * at least one descriptor unused in the ring so that it doesn't
792          * wrap and look empty.
793          *
794          * The only time we should loop here is when we hit the race
795          * between marking the queue awake and updating the cleared index.
796          * Just loop and it will appear. This comes from the acenic driver.
797          */
798         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
799                 smp_rmb();
800
801         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
802         typhoon_inc_tx_index(&txRing->lastWrite, 1);
803
804         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
805         first_txd->numDesc = 0;
806         first_txd->len = 0;
807         first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
808         first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
809         first_txd->processFlags = 0;
810
811         if(skb->ip_summed == CHECKSUM_HW) {
812                 /* The 3XP will figure out if this is UDP/TCP */
813                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
814                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
815                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
816         }
817
818         if(vlan_tx_tag_present(skb)) {
819                 first_txd->processFlags |=
820                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
821                 first_txd->processFlags |=
822                     cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
823                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
824         }
825
826         if(skb_tso_size(skb)) {
827                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
828                 first_txd->numDesc++;
829
830                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
831         }
832
833         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
834         typhoon_inc_tx_index(&txRing->lastWrite, 1);
835
836         /* No need to worry about padding packet -- the firmware pads
837          * it with zeros to ETH_ZLEN for us.
838          */
839         if(skb_shinfo(skb)->nr_frags == 0) {
840                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
841                                        PCI_DMA_TODEVICE);
842                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
843                 txd->len = cpu_to_le16(skb->len);
844                 txd->addr = cpu_to_le32(skb_dma);
845                 txd->addrHi = 0;
846                 first_txd->numDesc++;
847         } else {
848                 int i, len;
849
850                 len = skb_headlen(skb);
851                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
852                                          PCI_DMA_TODEVICE);
853                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
854                 txd->len = cpu_to_le16(len);
855                 txd->addr = cpu_to_le32(skb_dma);
856                 txd->addrHi = 0;
857                 first_txd->numDesc++;
858
859                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
860                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
861                         void *frag_addr;
862
863                         txd = (struct tx_desc *) (txRing->ringBase +
864                                                 txRing->lastWrite);
865                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
866
867                         len = frag->size;
868                         frag_addr = (void *) page_address(frag->page) +
869                                                 frag->page_offset;
870                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
871                                          PCI_DMA_TODEVICE);
872                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
873                         txd->len = cpu_to_le16(len);
874                         txd->addr = cpu_to_le32(skb_dma);
875                         txd->addrHi = 0;
876                         first_txd->numDesc++;
877                 }
878         }
879
880         /* Kick the 3XP
881          */
882         wmb();
883         writel(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
884
885         dev->trans_start = jiffies;
886
887         /* If we don't have room to put the worst case packet on the
888          * queue, then we must stop the queue. We need 2 extra
889          * descriptors -- one to prevent ring wrap, and one for the
890          * Tx header.
891          */
892         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
893
894         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
895                 netif_stop_queue(dev);
896
897                 /* A Tx complete IRQ could have gotten inbetween, making
898                  * the ring free again. Only need to recheck here, since
899                  * Tx is serialized.
900                  */
901                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
902                         netif_wake_queue(dev);
903         }
904
905         return 0;
906 }
907
908 static void
909 typhoon_set_rx_mode(struct net_device *dev)
910 {
911         struct typhoon *tp = (struct typhoon *) dev->priv;
912         struct cmd_desc xp_cmd;
913         u32 mc_filter[2];
914         u16 filter;
915
916         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
917         if(dev->flags & IFF_PROMISC) {
918                 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
919                        dev->name);
920                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
921         } else if((dev->mc_count > multicast_filter_limit) ||
922                   (dev->flags & IFF_ALLMULTI)) {
923                 /* Too many to match, or accept all multicasts. */
924                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
925         } else if(dev->mc_count) {
926                 struct dev_mc_list *mclist;
927                 int i;
928
929                 memset(mc_filter, 0, sizeof(mc_filter));
930                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
931                     i++, mclist = mclist->next) {
932                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
933                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
934                 }
935
936                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
937                                          TYPHOON_CMD_SET_MULTICAST_HASH);
938                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
939                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
940                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
941                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
942
943                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
944         }
945
946         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
947         xp_cmd.parm1 = filter;
948         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
949 }
950
951 static int
952 typhoon_do_get_stats(struct typhoon *tp)
953 {
954         struct net_device_stats *stats = &tp->stats;
955         struct net_device_stats *saved = &tp->stats_saved;
956         struct cmd_desc xp_cmd;
957         struct resp_desc xp_resp[7];
958         struct stats_resp *s = (struct stats_resp *) xp_resp;
959         int err;
960
961         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
962         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
963         if(err < 0)
964                 return err;
965
966         /* 3Com's Linux driver uses txMultipleCollisions as it's
967          * collisions value, but there is some other collision info as well...
968          */
969         stats->tx_packets = le32_to_cpu(s->txPackets);
970         stats->tx_bytes = le32_to_cpu(s->txBytes);
971         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
972         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
973         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
974         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
975         stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
976         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
977         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
978                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
979         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
980         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
981         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
982                         SPEED_100 : SPEED_10;
983         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
984                         DUPLEX_FULL : DUPLEX_HALF;
985
986         /* add in the saved statistics
987          */
988         stats->tx_packets += saved->tx_packets;
989         stats->tx_bytes += saved->tx_bytes;
990         stats->tx_errors += saved->tx_errors;
991         stats->collisions += saved->collisions;
992         stats->rx_packets += saved->rx_packets;
993         stats->rx_bytes += saved->rx_bytes;
994         stats->rx_fifo_errors += saved->rx_fifo_errors;
995         stats->rx_errors += saved->rx_errors;
996         stats->rx_crc_errors += saved->rx_crc_errors;
997         stats->rx_length_errors += saved->rx_length_errors;
998
999         return 0;
1000 }
1001
1002 static struct net_device_stats *
1003 typhoon_get_stats(struct net_device *dev)
1004 {
1005         struct typhoon *tp = (struct typhoon *) dev->priv;
1006         struct net_device_stats *stats = &tp->stats;
1007         struct net_device_stats *saved = &tp->stats_saved;
1008
1009         smp_rmb();
1010         if(tp->card_state == Sleeping)
1011                 return saved;
1012
1013         if(typhoon_do_get_stats(tp) < 0) {
1014                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1015                 return saved;
1016         }
1017
1018         return stats;
1019 }
1020
1021 static int
1022 typhoon_set_mac_address(struct net_device *dev, void *addr)
1023 {
1024         struct sockaddr *saddr = (struct sockaddr *) addr;
1025
1026         if(netif_running(dev))
1027                 return -EBUSY;
1028
1029         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1030         return 0;
1031 }
1032
1033 static inline void
1034 typhoon_ethtool_gdrvinfo(struct typhoon *tp, struct ethtool_drvinfo *info)
1035 {
1036         struct pci_dev *pci_dev = tp->pdev;
1037         struct cmd_desc xp_cmd;
1038         struct resp_desc xp_resp[3];
1039
1040         smp_rmb();
1041         if(tp->card_state == Sleeping) {
1042                 strcpy(info->fw_version, "Sleep image");
1043         } else {
1044                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1045                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1046                         strcpy(info->fw_version, "Unknown runtime");
1047                 } else {
1048                         strncpy(info->fw_version, (char *) &xp_resp[1], 32);
1049                         info->fw_version[31] = 0;
1050                 }
1051         }
1052
1053         strcpy(info->driver, DRV_MODULE_NAME);
1054         strcpy(info->version, DRV_MODULE_VERSION);
1055         strcpy(info->bus_info, pci_name(pci_dev));
1056 }
1057
1058 static inline void
1059 typhoon_ethtool_gset(struct typhoon *tp, struct ethtool_cmd *cmd)
1060 {
1061         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1062                                 SUPPORTED_Autoneg;
1063
1064         switch (tp->xcvr_select) {
1065         case TYPHOON_XCVR_10HALF:
1066                 cmd->advertising = ADVERTISED_10baseT_Half;
1067                 break;
1068         case TYPHOON_XCVR_10FULL:
1069                 cmd->advertising = ADVERTISED_10baseT_Full;
1070                 break;
1071         case TYPHOON_XCVR_100HALF:
1072                 cmd->advertising = ADVERTISED_100baseT_Half;
1073                 break;
1074         case TYPHOON_XCVR_100FULL:
1075                 cmd->advertising = ADVERTISED_100baseT_Full;
1076                 break;
1077         case TYPHOON_XCVR_AUTONEG:
1078                 cmd->advertising = ADVERTISED_10baseT_Half |
1079                                             ADVERTISED_10baseT_Full |
1080                                             ADVERTISED_100baseT_Half |
1081                                             ADVERTISED_100baseT_Full |
1082                                             ADVERTISED_Autoneg;
1083                 break;
1084         }
1085
1086         if(tp->capabilities & TYPHOON_FIBER) {
1087                 cmd->supported |= SUPPORTED_FIBRE;
1088                 cmd->advertising |= ADVERTISED_FIBRE;
1089                 cmd->port = PORT_FIBRE;
1090         } else {
1091                 cmd->supported |= SUPPORTED_10baseT_Half |
1092                                         SUPPORTED_10baseT_Full |
1093                                         SUPPORTED_TP;
1094                 cmd->advertising |= ADVERTISED_TP;
1095                 cmd->port = PORT_TP;
1096         }
1097
1098         /* need to get stats to make these link speed/duplex valid */
1099         typhoon_do_get_stats(tp);
1100         cmd->speed = tp->speed;
1101         cmd->duplex = tp->duplex;
1102         cmd->phy_address = 0;
1103         cmd->transceiver = XCVR_INTERNAL;
1104         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1105                 cmd->autoneg = AUTONEG_ENABLE;
1106         else
1107                 cmd->autoneg = AUTONEG_DISABLE;
1108         cmd->maxtxpkt = 1;
1109         cmd->maxrxpkt = 1;
1110 }
1111
1112 static inline int
1113 typhoon_ethtool_sset(struct typhoon *tp, struct ethtool_cmd *cmd)
1114 {
1115         struct cmd_desc xp_cmd;
1116         int xcvr;
1117         int err;
1118
1119         if(cmd->autoneg == AUTONEG_ENABLE) {
1120                 xcvr = TYPHOON_XCVR_AUTONEG;
1121         } else {
1122                 if(cmd->duplex == DUPLEX_HALF) {
1123                         if(cmd->speed == SPEED_10)
1124                                 xcvr = TYPHOON_XCVR_10HALF;
1125                         else if(cmd->speed == SPEED_100)
1126                                 xcvr = TYPHOON_XCVR_100HALF;
1127                         else
1128                                 return -EINVAL;
1129                 } else if(cmd->duplex == DUPLEX_FULL) {
1130                         if(cmd->speed == SPEED_10)
1131                                 xcvr = TYPHOON_XCVR_10FULL;
1132                         else if(cmd->speed == SPEED_100)
1133                                 xcvr = TYPHOON_XCVR_100FULL;
1134                         else
1135                                 return -EINVAL;
1136                 } else
1137                         return -EINVAL;
1138         }
1139
1140         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1141         xp_cmd.parm1 = cpu_to_le16(xcvr);
1142         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1143         if(err < 0)
1144                 return err;
1145
1146         tp->xcvr_select = xcvr;
1147         if(cmd->autoneg == AUTONEG_ENABLE) {
1148                 tp->speed = 0xff;       /* invalid */
1149                 tp->duplex = 0xff;      /* invalid */
1150         } else {
1151                 tp->speed = cmd->speed;
1152                 tp->duplex = cmd->duplex;
1153         }
1154
1155         return 0;
1156 }
1157
1158 static inline int
1159 typhoon_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
1160 {
1161         struct typhoon *tp = (struct typhoon *) dev->priv;
1162         u32 ethcmd;
1163
1164         if(copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1165                 return -EFAULT;
1166
1167         switch (ethcmd) {
1168         case ETHTOOL_GDRVINFO: {
1169                         struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1170
1171                         typhoon_ethtool_gdrvinfo(tp, &info);
1172                         if(copy_to_user(useraddr, &info, sizeof(info)))
1173                                 return -EFAULT;
1174                         return 0;
1175                 }
1176         case ETHTOOL_GSET: {
1177                         struct ethtool_cmd cmd = { ETHTOOL_GSET };
1178
1179                         typhoon_ethtool_gset(tp, &cmd);
1180                         if(copy_to_user(useraddr, &cmd, sizeof(cmd)))
1181                                 return -EFAULT;
1182                         return 0;
1183                 }
1184         case ETHTOOL_SSET: {
1185                         struct ethtool_cmd cmd;
1186                         if(copy_from_user(&cmd, useraddr, sizeof(cmd)))
1187                                 return -EFAULT;
1188
1189                         return typhoon_ethtool_sset(tp, &cmd);
1190                 }
1191         case ETHTOOL_GLINK:{
1192                         struct ethtool_value edata = { ETHTOOL_GLINK };
1193
1194                         edata.data = netif_carrier_ok(dev) ? 1 : 0;
1195                         if(copy_to_user(useraddr, &edata, sizeof(edata)))
1196                                 return -EFAULT;
1197                         return 0;
1198                 }
1199         case ETHTOOL_GWOL: {
1200                         struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
1201
1202                         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1203                                 wol.wolopts |= WAKE_PHY;
1204                         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1205                                 wol.wolopts |= WAKE_MAGIC;
1206                         if(copy_to_user(useraddr, &wol, sizeof(wol)))
1207                                 return -EFAULT;
1208                         return 0;
1209         }
1210         case ETHTOOL_SWOL: {
1211                         struct ethtool_wolinfo wol;
1212
1213                         if(copy_from_user(&wol, useraddr, sizeof(wol)))
1214                                 return -EFAULT;
1215                         tp->wol_events = 0;
1216                         if(wol.wolopts & WAKE_PHY)
1217                                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1218                         if(wol.wolopts & WAKE_MAGIC)
1219                                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1220                         return 0;
1221         }
1222         default:
1223                 break;
1224         }
1225
1226         return -EOPNOTSUPP;
1227 }
1228
1229 static int
1230 typhoon_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1231 {
1232         switch (cmd) {
1233         case SIOCETHTOOL:
1234                 return typhoon_ethtool_ioctl(dev, ifr->ifr_data);
1235         default:
1236                 break;
1237         }
1238
1239         return -EOPNOTSUPP;
1240 }
1241
1242 static int
1243 typhoon_wait_interrupt(unsigned long ioaddr)
1244 {
1245         int i, err = 0;
1246
1247         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1248                 if(readl(ioaddr + TYPHOON_REG_INTR_STATUS) &
1249                    TYPHOON_INTR_BOOTCMD)
1250                         goto out;
1251                 udelay(TYPHOON_UDELAY);
1252         }
1253
1254         err = -ETIMEDOUT;
1255
1256 out:
1257         writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1258         return err;
1259 }
1260
1261 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1262
1263 static void
1264 typhoon_init_interface(struct typhoon *tp)
1265 {
1266         struct typhoon_interface *iface = &tp->shared->iface;
1267         dma_addr_t shared_dma;
1268
1269         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1270
1271         /* The *Hi members of iface are all init'd to zero by the memset().
1272          */
1273         shared_dma = tp->shared_dma + shared_offset(indexes);
1274         iface->ringIndex = cpu_to_le32(shared_dma);
1275
1276         shared_dma = tp->shared_dma + shared_offset(txLo);
1277         iface->txLoAddr = cpu_to_le32(shared_dma);
1278         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1279
1280         shared_dma = tp->shared_dma + shared_offset(txHi);
1281         iface->txHiAddr = cpu_to_le32(shared_dma);
1282         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1283
1284         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1285         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1286         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1287                                         sizeof(struct rx_free));
1288
1289         shared_dma = tp->shared_dma + shared_offset(rxLo);
1290         iface->rxLoAddr = cpu_to_le32(shared_dma);
1291         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1292
1293         shared_dma = tp->shared_dma + shared_offset(rxHi);
1294         iface->rxHiAddr = cpu_to_le32(shared_dma);
1295         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1296
1297         shared_dma = tp->shared_dma + shared_offset(cmd);
1298         iface->cmdAddr = cpu_to_le32(shared_dma);
1299         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1300
1301         shared_dma = tp->shared_dma + shared_offset(resp);
1302         iface->respAddr = cpu_to_le32(shared_dma);
1303         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1304
1305         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1306         iface->zeroAddr = cpu_to_le32(shared_dma);
1307
1308         tp->indexes = &tp->shared->indexes;
1309         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1310         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1311         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1312         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1313         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1314         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1315         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1316
1317         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1318         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1319
1320         tp->txlo_dma_addr = iface->txLoAddr;
1321         tp->card_state = Sleeping;
1322         smp_wmb();
1323
1324         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1325         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1326
1327         spin_lock_init(&tp->command_lock);
1328         spin_lock_init(&tp->state_lock);
1329 }
1330
1331 static void
1332 typhoon_init_rings(struct typhoon *tp)
1333 {
1334         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1335
1336         tp->txLoRing.lastWrite = 0;
1337         tp->txHiRing.lastWrite = 0;
1338         tp->rxLoRing.lastWrite = 0;
1339         tp->rxHiRing.lastWrite = 0;
1340         tp->rxBuffRing.lastWrite = 0;
1341         tp->cmdRing.lastWrite = 0;
1342         tp->cmdRing.lastWrite = 0;
1343
1344         tp->txLoRing.lastRead = 0;
1345         tp->txHiRing.lastRead = 0;
1346 }
1347
1348 static int
1349 typhoon_download_firmware(struct typhoon *tp)
1350 {
1351         unsigned long ioaddr = tp->ioaddr;
1352         struct pci_dev *pdev = tp->pdev;
1353         struct typhoon_file_header *fHdr;
1354         struct typhoon_section_header *sHdr;
1355         u8 *image_data;
1356         void *dpage;
1357         dma_addr_t dpage_dma;
1358         unsigned int csum;
1359         u32 irqEnabled;
1360         u32 irqMasked;
1361         u32 numSections;
1362         u32 section_len;
1363         u32 len;
1364         u32 load_addr;
1365         u32 hmac;
1366         int i;
1367         int err;
1368
1369         err = -EINVAL;
1370         fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1371         image_data = (u8 *) fHdr;
1372
1373         if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1374                 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1375                 goto err_out;
1376         }
1377
1378         /* Cannot just map the firmware image using pci_map_single() as
1379          * the firmware is part of the kernel/module image, so we allocate
1380          * some consistent memory to copy the sections into, as it is simpler,
1381          * and short-lived. If we ever split out and require a userland
1382          * firmware loader, then we can revisit this.
1383          */
1384         err = -ENOMEM;
1385         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1386         if(!dpage) {
1387                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1388                 goto err_out;
1389         }
1390
1391         irqEnabled = readl(ioaddr + TYPHOON_REG_INTR_ENABLE);
1392         writel(irqEnabled | TYPHOON_INTR_BOOTCMD,
1393                ioaddr + TYPHOON_REG_INTR_ENABLE);
1394         irqMasked = readl(ioaddr + TYPHOON_REG_INTR_MASK);
1395         writel(irqMasked | TYPHOON_INTR_BOOTCMD,
1396                ioaddr + TYPHOON_REG_INTR_MASK);
1397
1398         err = -ETIMEDOUT;
1399         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1400                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1401                 goto err_out_irq;
1402         }
1403
1404         numSections = le32_to_cpu(fHdr->numSections);
1405         load_addr = le32_to_cpu(fHdr->startAddr);
1406
1407         writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1408         writel(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1409         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1410         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1411         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1412         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1413         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1414         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1415         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1416         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1417         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1418         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1419         typhoon_post_pci_writes(ioaddr);
1420         writel(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1421
1422         image_data += sizeof(struct typhoon_file_header);
1423
1424         /* The readl() in typhoon_wait_interrupt() will force the
1425          * last write to the command register to post, so
1426          * we don't need a typhoon_post_pci_writes() after it.
1427          */
1428         for(i = 0; i < numSections; i++) {
1429                 sHdr = (struct typhoon_section_header *) image_data;
1430                 image_data += sizeof(struct typhoon_section_header);
1431                 load_addr = le32_to_cpu(sHdr->startAddr);
1432                 section_len = le32_to_cpu(sHdr->len);
1433
1434                 while(section_len) {
1435                         len = min_t(u32, section_len, PAGE_SIZE);
1436
1437                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1438                            readl(ioaddr + TYPHOON_REG_STATUS) !=
1439                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1440                                 printk(KERN_ERR "%s: segment ready timeout\n",
1441                                        tp->name);
1442                                 goto err_out_irq;
1443                         }
1444
1445                         /* Do an pseudo IPv4 checksum on the data -- first
1446                          * need to convert each u16 to cpu order before
1447                          * summing. Fortunately, due to the properties of
1448                          * the checksum, we can do this once, at the end.
1449                          */
1450                         csum = csum_partial_copy_nocheck(image_data, dpage,
1451                                                          len, 0);
1452                         csum = csum_fold(csum);
1453                         csum = le16_to_cpu(csum);
1454
1455                         writel(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1456                         writel(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1457                         writel(load_addr, ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1458                         writel(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1459                         writel(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1460                         typhoon_post_pci_writes(ioaddr);
1461                         writel(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1462                                ioaddr + TYPHOON_REG_COMMAND);
1463
1464                         image_data += len;
1465                         load_addr += len;
1466                         section_len -= len;
1467                 }
1468         }
1469
1470         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1471            readl(ioaddr + TYPHOON_REG_STATUS) !=
1472            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1473                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1474                 goto err_out_irq;
1475         }
1476
1477         writel(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1478
1479         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1480                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1481                        tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1482                 goto err_out_irq;
1483         }
1484
1485         err = 0;
1486
1487 err_out_irq:
1488         writel(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1489         writel(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1490
1491         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1492
1493 err_out:
1494         return err;
1495 }
1496
1497 static int
1498 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1499 {
1500         unsigned long ioaddr = tp->ioaddr;
1501
1502         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1503                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1504                 goto out_timeout;
1505         }
1506
1507         writel(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1508         writel(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1509         typhoon_post_pci_writes(ioaddr);
1510         writel(TYPHOON_BOOTCMD_REG_BOOT_RECORD, ioaddr + TYPHOON_REG_COMMAND);
1511
1512         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1513                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1514                        tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1515                 goto out_timeout;
1516         }
1517
1518         /* Clear the Transmit and Command ready registers
1519          */
1520         writel(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1521         writel(0, ioaddr + TYPHOON_REG_CMD_READY);
1522         writel(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1523         typhoon_post_pci_writes(ioaddr);
1524         writel(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1525
1526         return 0;
1527
1528 out_timeout:
1529         return -ETIMEDOUT;
1530 }
1531
1532 static u32
1533 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1534                         volatile u32 * index)
1535 {
1536         u32 lastRead = txRing->lastRead;
1537         struct tx_desc *tx;
1538         dma_addr_t skb_dma;
1539         int dma_len;
1540         int type;
1541
1542         while(lastRead != le32_to_cpu(*index)) {
1543                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1544                 type = tx->flags & TYPHOON_TYPE_MASK;
1545
1546                 if(type == TYPHOON_TX_DESC) {
1547                         /* This tx_desc describes a packet.
1548                          */
1549                         unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1550                         struct sk_buff *skb = (struct sk_buff *) ptr;
1551                         dev_kfree_skb_irq(skb);
1552                 } else if(type == TYPHOON_FRAG_DESC) {
1553                         /* This tx_desc describes a memory mapping. Free it.
1554                          */
1555                         skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1556                         dma_len = le16_to_cpu(tx->len);
1557                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1558                                        PCI_DMA_TODEVICE);
1559                 }
1560
1561                 tx->flags = 0;
1562                 typhoon_inc_tx_index(&lastRead, 1);
1563         }
1564
1565         return lastRead;
1566 }
1567
1568 static void
1569 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1570                         volatile u32 * index)
1571 {
1572         u32 lastRead;
1573         int numDesc = MAX_SKB_FRAGS + 1;
1574
1575         /* This will need changing if we start to use the Hi Tx ring. */
1576         lastRead = typhoon_clean_tx(tp, txRing, index);
1577         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1578                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1579                 netif_wake_queue(tp->dev);
1580
1581         txRing->lastRead = lastRead;
1582         smp_wmb();
1583 }
1584
1585 static void
1586 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1587 {
1588         struct typhoon_indexes *indexes = tp->indexes;
1589         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1590         struct basic_ring *ring = &tp->rxBuffRing;
1591         struct rx_free *r;
1592
1593         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1594                                 indexes->rxBuffCleared) {
1595                 /* no room in ring, just drop the skb
1596                  */
1597                 dev_kfree_skb_any(rxb->skb);
1598                 rxb->skb = NULL;
1599                 return;
1600         }
1601
1602         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1603         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1604         r->virtAddr = idx;
1605         r->physAddr = cpu_to_le32(rxb->dma_addr);
1606
1607         /* Tell the card about it */
1608         wmb();
1609         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1610 }
1611
1612 static int
1613 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1614 {
1615         struct typhoon_indexes *indexes = tp->indexes;
1616         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1617         struct basic_ring *ring = &tp->rxBuffRing;
1618         struct rx_free *r;
1619         struct sk_buff *skb;
1620         dma_addr_t dma_addr;
1621
1622         rxb->skb = NULL;
1623
1624         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1625                                 indexes->rxBuffCleared)
1626                 return -ENOMEM;
1627
1628         skb = dev_alloc_skb(PKT_BUF_SZ);
1629         if(!skb)
1630                 return -ENOMEM;
1631
1632 #if 0
1633         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1634          * address! Pretty please?
1635          */
1636         skb_reserve(skb, 2);
1637 #endif
1638
1639         skb->dev = tp->dev;
1640         dma_addr = pci_map_single(tp->pdev, skb->tail,
1641                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1642
1643         /* Since no card does 64 bit DAC, the high bits will never
1644          * change from zero.
1645          */
1646         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1647         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1648         r->virtAddr = idx;
1649         r->physAddr = cpu_to_le32(dma_addr);
1650         rxb->skb = skb;
1651         rxb->dma_addr = dma_addr;
1652
1653         /* Tell the card about it */
1654         wmb();
1655         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1656         return 0;
1657 }
1658
1659 static int
1660 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1661            volatile u32 * cleared, int budget)
1662 {
1663         struct rx_desc *rx;
1664         struct sk_buff *skb, *new_skb;
1665         struct rxbuff_ent *rxb;
1666         dma_addr_t dma_addr;
1667         u32 local_ready;
1668         u32 rxaddr;
1669         int pkt_len;
1670         u32 idx;
1671         u32 csum_bits;
1672         int received;
1673
1674         received = 0;
1675         local_ready = le32_to_cpu(*ready);
1676         rxaddr = le32_to_cpu(*cleared);
1677         while(rxaddr != local_ready && budget > 0) {
1678                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1679                 idx = rx->addr;
1680                 rxb = &tp->rxbuffers[idx];
1681                 skb = rxb->skb;
1682                 dma_addr = rxb->dma_addr;
1683
1684                 rxaddr += sizeof(struct rx_desc);
1685                 rxaddr %= RX_ENTRIES * sizeof(struct rx_desc);
1686
1687                 if(rx->flags & TYPHOON_RX_ERROR) {
1688                         typhoon_recycle_rx_skb(tp, idx);
1689                         continue;
1690                 }
1691
1692                 pkt_len = le16_to_cpu(rx->frameLen);
1693
1694                 if(pkt_len < rx_copybreak &&
1695                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1696                         new_skb->dev = tp->dev;
1697                         skb_reserve(new_skb, 2);
1698                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1699                                                     PKT_BUF_SZ,
1700                                                     PCI_DMA_FROMDEVICE);
1701                         eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
1702                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1703                                                        PKT_BUF_SZ,
1704                                                        PCI_DMA_FROMDEVICE);
1705                         skb_put(new_skb, pkt_len);
1706                         typhoon_recycle_rx_skb(tp, idx);
1707                 } else {
1708                         new_skb = skb;
1709                         skb_put(new_skb, pkt_len);
1710                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1711                                        PCI_DMA_FROMDEVICE);
1712                         typhoon_alloc_rx_skb(tp, idx);
1713                 }
1714                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1715                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1716                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1717                 if(csum_bits ==
1718                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1719                    || csum_bits ==
1720                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1721                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1722                 } else
1723                         new_skb->ip_summed = CHECKSUM_NONE;
1724
1725                 spin_lock(&tp->state_lock);
1726                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1727                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1728                                                  ntohl(rx->vlanTag) & 0xffff);
1729                 else
1730                         netif_receive_skb(new_skb);
1731                 spin_unlock(&tp->state_lock);
1732
1733                 tp->dev->last_rx = jiffies;
1734                 received++;
1735                 budget--;
1736         }
1737         *cleared = cpu_to_le32(rxaddr);
1738
1739         return received;
1740 }
1741
1742 static void
1743 typhoon_fill_free_ring(struct typhoon *tp)
1744 {
1745         u32 i;
1746
1747         for(i = 0; i < RXENT_ENTRIES; i++) {
1748                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1749                 if(rxb->skb)
1750                         continue;
1751                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1752                         break;
1753         }
1754 }
1755
1756 static int
1757 typhoon_poll(struct net_device *dev, int *total_budget)
1758 {
1759         struct typhoon *tp = (struct typhoon *) dev->priv;
1760         struct typhoon_indexes *indexes = tp->indexes;
1761         int orig_budget = *total_budget;
1762         int budget, work_done, done;
1763
1764         rmb();
1765         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1766                         typhoon_process_response(tp, 0, NULL);
1767
1768         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1769                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1770
1771         if(orig_budget > dev->quota)
1772                 orig_budget = dev->quota;
1773
1774         budget = orig_budget;
1775         work_done = 0;
1776         done = 1;
1777
1778         if(indexes->rxHiCleared != indexes->rxHiReady) {
1779                 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1780                                         &indexes->rxHiCleared, budget);
1781                 budget -= work_done;
1782         }
1783
1784         if(indexes->rxLoCleared != indexes->rxLoReady) {
1785                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1786                                         &indexes->rxLoCleared, budget);
1787         }
1788
1789         if(work_done) {
1790                 *total_budget -= work_done;
1791                 dev->quota -= work_done;
1792
1793                 if(work_done >= orig_budget)
1794                         done = 0;
1795         }
1796
1797         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1798                 /* rxBuff ring is empty, try to fill it. */
1799                 typhoon_fill_free_ring(tp);
1800         }
1801
1802         if(done) {
1803                 netif_rx_complete(dev);
1804                 writel(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK);
1805                 typhoon_post_pci_writes(tp->ioaddr);
1806         }
1807
1808         return (done ? 0 : 1);
1809 }
1810
1811 static irqreturn_t
1812 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1813 {
1814         struct net_device *dev = (struct net_device *) dev_instance;
1815         unsigned long ioaddr = dev->base_addr;
1816         u32 intr_status;
1817
1818         intr_status = readl(ioaddr + TYPHOON_REG_INTR_STATUS);
1819         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1820                 return IRQ_NONE;
1821
1822         writel(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1823
1824         if(netif_rx_schedule_prep(dev)) {
1825                 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1826                 typhoon_post_pci_writes(ioaddr);
1827                 __netif_rx_schedule(dev);
1828         } else {
1829                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1830                        dev->name);
1831         }
1832         return IRQ_HANDLED;
1833 }
1834
1835 static void
1836 typhoon_free_rx_rings(struct typhoon *tp)
1837 {
1838         u32 i;
1839
1840         for(i = 0; i < RXENT_ENTRIES; i++) {
1841                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1842                 if(rxb->skb) {
1843                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1844                                        PCI_DMA_FROMDEVICE);
1845                         dev_kfree_skb(rxb->skb);
1846                         rxb->skb = NULL;
1847                 }
1848         }
1849 }
1850
1851 static int
1852 typhoon_sleep(struct typhoon *tp, int state, u16 events)
1853 {
1854         struct pci_dev *pdev = tp->pdev;
1855         unsigned long ioaddr = tp->ioaddr;
1856         struct cmd_desc xp_cmd;
1857         int err;
1858
1859         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1860         xp_cmd.parm1 = events;
1861         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1862         if(err < 0) {
1863                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1864                                 tp->name, err);
1865                 return err;
1866         }
1867
1868         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1869         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1870         if(err < 0) {
1871                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1872                                 tp->name, err);
1873                 return err;
1874         }
1875
1876         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1877                 return -ETIMEDOUT;
1878
1879         /* Since we cannot monitor the status of the link while sleeping,
1880          * tell the world it went away.
1881          */
1882         netif_carrier_off(tp->dev);
1883
1884         pci_enable_wake(tp->pdev, state, 1);
1885         pci_disable_device(pdev);
1886         return pci_set_power_state(pdev, state);
1887 }
1888
1889 static int
1890 typhoon_wakeup(struct typhoon *tp, int wait_type)
1891 {
1892         struct pci_dev *pdev = tp->pdev;
1893         unsigned long ioaddr = tp->ioaddr;
1894
1895         pci_set_power_state(pdev, 0);
1896         pci_restore_state(pdev, tp->pci_state);
1897
1898         /* Post 2.x.x versions of the Sleep Image require a reset before
1899          * we can download the Runtime Image. But let's not make users of
1900          * the old firmware pay for the reset.
1901          */
1902         writel(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1903         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1904                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1905                 return typhoon_reset(ioaddr, wait_type);
1906
1907         return 0;
1908 }
1909
1910 static int
1911 typhoon_start_runtime(struct typhoon *tp)
1912 {
1913         struct net_device *dev = tp->dev;
1914         unsigned long ioaddr = tp->ioaddr;
1915         struct cmd_desc xp_cmd;
1916         int err;
1917
1918         typhoon_init_rings(tp);
1919         typhoon_fill_free_ring(tp);
1920
1921         err = typhoon_download_firmware(tp);
1922         if(err < 0) {
1923                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1924                 goto error_out;
1925         }
1926
1927         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1928                 printk("%s: cannot boot 3XP\n", tp->name);
1929                 err = -EIO;
1930                 goto error_out;
1931         }
1932
1933         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1934         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1935         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1936         if(err < 0)
1937                 goto error_out;
1938
1939         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1940         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1941         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1942         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1943         if(err < 0)
1944                 goto error_out;
1945
1946         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1947          * us some more information on how to control it.
1948          */
1949         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1950         xp_cmd.parm1 = 0;
1951         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1952         if(err < 0)
1953                 goto error_out;
1954
1955         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1956         xp_cmd.parm1 = tp->xcvr_select;
1957         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1958         if(err < 0)
1959                 goto error_out;
1960
1961         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1962         xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1963         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1964         if(err < 0)
1965                 goto error_out;
1966
1967         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1968         spin_lock_bh(&tp->state_lock);
1969         xp_cmd.parm2 = tp->offload;
1970         xp_cmd.parm3 = tp->offload;
1971         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1972         spin_unlock_bh(&tp->state_lock);
1973         if(err < 0)
1974                 goto error_out;
1975
1976         typhoon_set_rx_mode(dev);
1977
1978         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1979         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1980         if(err < 0)
1981                 goto error_out;
1982
1983         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1984         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1985         if(err < 0)
1986                 goto error_out;
1987
1988         tp->card_state = Running;
1989         smp_wmb();
1990
1991         writel(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1992         writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1993         typhoon_post_pci_writes(ioaddr);
1994
1995         return 0;
1996
1997 error_out:
1998         typhoon_reset(ioaddr, WaitNoSleep);
1999         typhoon_free_rx_rings(tp);
2000         typhoon_init_rings(tp);
2001         return err;
2002 }
2003
2004 static int
2005 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2006 {
2007         struct typhoon_indexes *indexes = tp->indexes;
2008         struct transmit_ring *txLo = &tp->txLoRing;
2009         unsigned long ioaddr = tp->ioaddr;
2010         struct cmd_desc xp_cmd;
2011         int i;
2012
2013         /* Disable interrupts early, since we can't schedule a poll
2014          * when called with !netif_running(). This will be posted
2015          * when we force the posting of the command.
2016          */
2017         writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2018
2019         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2020         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2021
2022         /* Wait 1/2 sec for any outstanding transmits to occur
2023          * We'll cleanup after the reset if this times out.
2024          */
2025         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2026                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2027                         break;
2028                 udelay(TYPHOON_UDELAY);
2029         }
2030
2031         if(i == TYPHOON_WAIT_TIMEOUT)
2032                 printk(KERN_ERR
2033                        "%s: halt timed out waiting for Tx to complete\n",
2034                        tp->name);
2035
2036         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2037         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2038
2039         /* save the statistics so when we bring the interface up again,
2040          * the values reported to userspace are correct.
2041          */
2042         tp->card_state = Sleeping;
2043         smp_wmb();
2044         typhoon_do_get_stats(tp);
2045         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2046
2047         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2048         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2049
2050         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2051                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2052                        tp->name);
2053
2054         if(typhoon_reset(ioaddr, wait_type) < 0) {
2055                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2056                 return -ETIMEDOUT;
2057         }
2058
2059         /* cleanup any outstanding Tx packets */
2060         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2061                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2062                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2063         }
2064
2065         return 0;
2066 }
2067
2068 static void
2069 typhoon_tx_timeout(struct net_device *dev)
2070 {
2071         struct typhoon *tp = (struct typhoon *) dev->priv;
2072
2073         if(typhoon_reset(dev->base_addr, WaitNoSleep) < 0) {
2074                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2075                                         dev->name);
2076                 goto truely_dead;
2077         }
2078
2079         /* If we ever start using the Hi ring, it will need cleaning too */
2080         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2081         typhoon_free_rx_rings(tp);
2082
2083         if(typhoon_start_runtime(tp) < 0) {
2084                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2085                                         dev->name);
2086                 goto truely_dead;
2087         }
2088
2089         netif_wake_queue(dev);
2090         return;
2091
2092 truely_dead:
2093         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2094         typhoon_reset(dev->base_addr, NoWait);
2095         netif_carrier_off(dev);
2096 }
2097
2098 static int
2099 typhoon_open(struct net_device *dev)
2100 {
2101         struct typhoon *tp = (struct typhoon *) dev->priv;
2102         int err;
2103
2104         err = typhoon_wakeup(tp, WaitSleep);
2105         if(err < 0) {
2106                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2107                 goto out_sleep;
2108         }
2109
2110         err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
2111                                 dev->name, dev);
2112         if(err < 0)
2113                 goto out_sleep;
2114
2115         err = typhoon_start_runtime(tp);
2116         if(err < 0)
2117                 goto out_irq;
2118
2119         netif_start_queue(dev);
2120         return 0;
2121
2122 out_irq:
2123         free_irq(dev->irq, dev);
2124
2125 out_sleep:
2126         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2127                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2128                                 dev->name);
2129                 typhoon_reset(dev->base_addr, NoWait);
2130                 goto out;
2131         }
2132
2133         if(typhoon_sleep(tp, 3, 0) < 0) 
2134                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2135
2136 out:
2137         return err;
2138 }
2139
2140 static int
2141 typhoon_close(struct net_device *dev)
2142 {
2143         struct typhoon *tp = (struct typhoon *) dev->priv;
2144
2145         netif_stop_queue(dev);
2146
2147         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2148                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2149
2150         /* Make sure there is no irq handler running on a different CPU. */
2151         typhoon_synchronize_irq(dev->irq);
2152         free_irq(dev->irq, dev);
2153
2154         typhoon_free_rx_rings(tp);
2155         typhoon_init_rings(tp);
2156
2157         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2158                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2159
2160         if(typhoon_sleep(tp, 3, 0) < 0)
2161                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2162
2163         return 0;
2164 }
2165
2166 #ifdef CONFIG_PM
2167 static int
2168 typhoon_resume(struct pci_dev *pdev)
2169 {
2170         struct net_device *dev = pci_get_drvdata(pdev);
2171         struct typhoon *tp = (struct typhoon *) dev->priv;
2172
2173         /* If we're down, resume when we are upped.
2174          */
2175         if(!netif_running(dev))
2176                 return 0;
2177
2178         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2179                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2180                                 dev->name);
2181                 goto reset;
2182         }
2183
2184         if(typhoon_start_runtime(tp) < 0) {
2185                 printk(KERN_ERR "%s: critical: could not start runtime in "
2186                                 "resume\n", dev->name);
2187                 goto reset;
2188         }
2189
2190         netif_device_attach(dev);
2191         netif_start_queue(dev);
2192         return 0;
2193
2194 reset:
2195         typhoon_reset(dev->base_addr, NoWait);
2196         return -EBUSY;
2197 }
2198
2199 static int
2200 typhoon_suspend(struct pci_dev *pdev, u32 state)
2201 {
2202         struct net_device *dev = pci_get_drvdata(pdev);
2203         struct typhoon *tp = (struct typhoon *) dev->priv;
2204         struct cmd_desc xp_cmd;
2205
2206         /* If we're down, we're already suspended.
2207          */
2208         if(!netif_running(dev))
2209                 return 0;
2210
2211         spin_lock_bh(&tp->state_lock);
2212         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2213                 spin_unlock_bh(&tp->state_lock);
2214                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2215                                 dev->name);
2216                 return -EBUSY;
2217         }
2218         spin_unlock_bh(&tp->state_lock);
2219
2220         netif_device_detach(dev);
2221
2222         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2223                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2224                 goto need_resume;
2225         }
2226
2227         typhoon_free_rx_rings(tp);
2228         typhoon_init_rings(tp);
2229
2230         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2231                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2232                 goto need_resume;
2233         }
2234
2235         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2236         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2237         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2238         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2239                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2240                                 dev->name);
2241                 goto need_resume;
2242         }
2243
2244         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2245         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2246         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2247                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2248                                 dev->name);
2249                 goto need_resume;
2250         }
2251
2252         if(typhoon_sleep(tp, state, tp->wol_events) < 0) {
2253                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2254                 goto need_resume;
2255         }
2256
2257         return 0;
2258
2259 need_resume:
2260         typhoon_resume(pdev);
2261         return -EBUSY;
2262 }
2263
2264 static int
2265 typhoon_enable_wake(struct pci_dev *pdev, u32 state, int enable)
2266 {
2267         return pci_enable_wake(pdev, state, enable);
2268 }
2269 #endif
2270
2271 static int __devinit
2272 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2273 {
2274         static int did_version = 0;
2275         struct net_device *dev;
2276         struct typhoon *tp;
2277         int card_id = (int) ent->driver_data;
2278         unsigned long ioaddr;
2279         void *shared;
2280         dma_addr_t shared_dma;
2281         struct cmd_desc xp_cmd;
2282         struct resp_desc xp_resp[3];
2283         int i;
2284         int err = 0;
2285
2286         if(!did_version++)
2287                 printk(KERN_INFO "%s", version);
2288
2289         dev = alloc_etherdev(sizeof(*tp));
2290         if(dev == NULL) {
2291                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2292                        pci_name(pdev));
2293                 err = -ENOMEM;
2294                 goto error_out;
2295         }
2296         SET_MODULE_OWNER(dev);
2297         SET_NETDEV_DEV(dev, &pdev->dev);
2298
2299         err = pci_enable_device(pdev);
2300         if(err < 0) {
2301                 printk(ERR_PFX "%s: unable to enable device\n",
2302                        pci_name(pdev));
2303                 goto error_out_dev;
2304         }
2305
2306         /* If we transitioned from D3->D0 in pci_enable_device(),
2307          * we lost our configuration and need to restore it to the
2308          * conditions at boot.
2309          */
2310         pci_restore_state(pdev, NULL);
2311
2312         err = pci_set_dma_mask(pdev, 0xffffffffULL);
2313         if(err < 0) {
2314                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2315                        pci_name(pdev));
2316                 goto error_out_dev;
2317         }
2318
2319         /* sanity checks, resource #1 is our mmio area
2320          */
2321         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2322                 printk(ERR_PFX
2323                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2324                        pci_name(pdev));
2325                 err = -ENODEV;
2326                 goto error_out_dev;
2327         }
2328         if(pci_resource_len(pdev, 1) < 128) {
2329                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2330                        pci_name(pdev));
2331                 err = -ENODEV;
2332                 goto error_out_dev;
2333         }
2334
2335         err = pci_request_regions(pdev, "typhoon");
2336         if(err < 0) {
2337                 printk(ERR_PFX "%s: could not request regions\n",
2338                        pci_name(pdev));
2339                 goto error_out_dev;
2340         }
2341
2342         pci_set_master(pdev);
2343         pci_set_mwi(pdev);
2344
2345         /* map our MMIO region
2346          */
2347         ioaddr = pci_resource_start(pdev, 1);
2348         ioaddr = (unsigned long) ioremap(ioaddr, 128);
2349         if(!ioaddr) {
2350                 printk(ERR_PFX "%s: cannot remap MMIO, aborting\n",
2351                        pci_name(pdev));
2352                 err = -EIO;
2353                 goto error_out_regions;
2354         }
2355         dev->base_addr = ioaddr;
2356
2357         /* allocate pci dma space for rx and tx descriptor rings
2358          */
2359         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2360                                       &shared_dma);
2361         if(!shared) {
2362                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2363                        pci_name(pdev));
2364                 err = -ENOMEM;
2365                 goto error_out_remap;
2366         }
2367
2368         dev->irq = pdev->irq;
2369         tp = dev->priv;
2370         tp->shared = (struct typhoon_shared *) shared;
2371         tp->shared_dma = shared_dma;
2372         tp->pdev = pdev;
2373         tp->tx_pdev = pdev;
2374         tp->ioaddr = dev->base_addr;
2375         tp->tx_ioaddr = dev->base_addr;
2376         tp->dev = dev;
2377
2378         /* need to be able to restore PCI state after a suspend */
2379         pci_save_state(pdev, tp->pci_state);
2380
2381         /* Init sequence:
2382          * 1) Reset the adapter to clear any bad juju
2383          * 2) Reload the sleep image
2384          * 3) Boot the sleep image
2385          * 4) Get the hardware address.
2386          * 5) Put the card to sleep.
2387          */
2388         if(typhoon_reset(ioaddr, WaitSleep) < 0) {
2389                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2390                 err = -EIO;
2391                 goto error_out_dma;
2392         }
2393
2394         /* dev->name is not valid until we register, but we need to
2395          * use some common routines to initialize the card. So that those
2396          * routines print the right name, we keep our oun pointer to the name
2397          */
2398         tp->name = pci_name(pdev);
2399
2400         typhoon_init_interface(tp);
2401         typhoon_init_rings(tp);
2402
2403         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2404                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2405                        pci_name(pdev));
2406                 err = -EIO;
2407                 goto error_out_reset;
2408         }
2409
2410         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2411         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2412                 printk(ERR_PFX "%s: cannot read MAC address\n",
2413                        pci_name(pdev));
2414                 err = -EIO;
2415                 goto error_out_reset;
2416         }
2417
2418         *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2419         *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2420
2421         if(!is_valid_ether_addr(dev->dev_addr)) {
2422                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2423                        "aborting\n", pci_name(pdev));
2424                 goto error_out_reset;
2425         }
2426
2427         /* Read the Sleep Image version last, so the response is valid
2428          * later when we print out the version reported.
2429          */
2430         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2431         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2432                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2433                         pdev->slot_name);
2434                 goto error_out_reset;
2435         }
2436
2437         tp->capabilities = typhoon_card_info[card_id].capabilities;
2438         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2439
2440         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2441          * READ_VERSIONS command. Those versions are OK after waking up
2442          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2443          * seem to need a little extra help to get started. Since we don't
2444          * know how to nudge it along, just kick it.
2445          */
2446         if(xp_resp[0].numDesc != 0)
2447                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2448
2449         if(typhoon_sleep(tp, 3, 0) < 0) {
2450                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2451                        pci_name(pdev));
2452                 err = -EIO;
2453                 goto error_out_reset;
2454         }
2455
2456         /* The chip-specific entries in the device structure. */
2457         dev->open               = typhoon_open;
2458         dev->hard_start_xmit    = typhoon_start_tx;
2459         dev->stop               = typhoon_close;
2460         dev->set_multicast_list = typhoon_set_rx_mode;
2461         dev->tx_timeout         = typhoon_tx_timeout;
2462         dev->poll               = typhoon_poll;
2463         dev->weight             = 16;
2464         dev->watchdog_timeo     = TX_TIMEOUT;
2465         dev->get_stats          = typhoon_get_stats;
2466         dev->set_mac_address    = typhoon_set_mac_address;
2467         dev->do_ioctl           = typhoon_ioctl;
2468         dev->vlan_rx_register   = typhoon_vlan_rx_register;
2469         dev->vlan_rx_kill_vid   = typhoon_vlan_rx_kill_vid;
2470
2471         /* We can handle scatter gather, up to 16 entries, and
2472          * we can do IP checksumming (only version 4, doh...)
2473          */
2474         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2475         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2476         dev->features |= NETIF_F_TSO;
2477
2478         if(register_netdev(dev) < 0)
2479                 goto error_out_reset;
2480
2481         /* fixup our local name */
2482         tp->name = dev->name;
2483
2484         pci_set_drvdata(pdev, dev);
2485
2486         printk(KERN_INFO "%s: %s at 0x%lx, ",
2487                dev->name, typhoon_card_info[card_id].name, ioaddr);
2488         for(i = 0; i < 5; i++)
2489                 printk("%2.2x:", dev->dev_addr[i]);
2490         printk("%2.2x\n", dev->dev_addr[i]);
2491
2492         /* xp_resp still contains the response to the READ_VERSIONS command.
2493          * For debugging, let the user know what version he has.
2494          */
2495         if(xp_resp[0].numDesc == 0) {
2496                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2497                  * of version is Month/Day of build.
2498                  */
2499                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2500                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2501                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
2502                         monthday & 0xff);
2503         } else if(xp_resp[0].numDesc == 2) {
2504                 /* This is the Typhoon 1.1+ type Sleep Image
2505                  */
2506                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2507                 u8 *ver_string = (u8 *) &xp_resp[1];
2508                 ver_string[25] = 0;
2509                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2510                         "%u.%u.%u.%u %s\n", dev->name, HIPQUAD(sleep_ver),
2511                         ver_string);
2512         } else {
2513                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2514                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2515                         le32_to_cpu(xp_resp[0].parm2));
2516         }
2517                 
2518         return 0;
2519
2520 error_out_reset:
2521         typhoon_reset(ioaddr, NoWait);
2522
2523 error_out_dma:
2524         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2525                             shared, shared_dma);
2526 error_out_remap:
2527         iounmap((void *) ioaddr);
2528 error_out_regions:
2529         pci_release_regions(pdev);
2530 error_out_dev:
2531         free_netdev(dev);
2532 error_out:
2533         return err;
2534 }
2535
2536 static void __devexit
2537 typhoon_remove_one(struct pci_dev *pdev)
2538 {
2539         struct net_device *dev = pci_get_drvdata(pdev);
2540         struct typhoon *tp = (struct typhoon *) (dev->priv);
2541
2542         unregister_netdev(dev);
2543         pci_set_power_state(pdev, 0);
2544         pci_restore_state(pdev, tp->pci_state);
2545         typhoon_reset(dev->base_addr, NoWait);
2546         iounmap((char *) (dev->base_addr));
2547         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2548                             tp->shared, tp->shared_dma);
2549         pci_release_regions(pdev);
2550         pci_disable_device(pdev);
2551         pci_set_drvdata(pdev, NULL);
2552         free_netdev(dev);
2553 }
2554
2555 static struct pci_driver typhoon_driver = {
2556         .name           = DRV_MODULE_NAME,
2557         .id_table       = typhoon_pci_tbl,
2558         .probe          = typhoon_init_one,
2559         .remove         = __devexit_p(typhoon_remove_one),
2560 #ifdef CONFIG_PM
2561         .suspend        = typhoon_suspend,
2562         .resume         = typhoon_resume,
2563         .enable_wake    = typhoon_enable_wake,
2564 #endif
2565 };
2566
2567 static int __init
2568 typhoon_init(void)
2569 {
2570         return pci_module_init(&typhoon_driver);
2571 }
2572
2573 static void __exit
2574 typhoon_cleanup(void)
2575 {
2576         pci_unregister_driver(&typhoon_driver);
2577 }
2578
2579 module_init(typhoon_init);
2580 module_exit(typhoon_cleanup);