patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2003 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36         *) I've not tested multicast. I think it works, but reports welcome.
37         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
38 */
39
40 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
41  * Setting to > 1518 effectively disables this feature.
42  */
43 static int rx_copybreak = 200;
44
45 /* end user-configurable values */
46
47 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
48  */
49 static const int multicast_filter_limit = 32;
50
51 /* Operational parameters that are set at compile time. */
52
53 /* Keep the ring sizes a power of two for compile efficiency.
54  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
55  * Making the Tx ring too large decreases the effectiveness of channel
56  * bonding and packet priority.
57  * There are no ill effects from too-large receive rings.
58  *
59  * We don't currently use the Hi Tx ring so, don't make it very big.
60  *
61  * Beware that if we start using the Hi Tx ring, we will need to change
62  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
63  */
64 #define TXHI_ENTRIES            2
65 #define TXLO_ENTRIES            128
66 #define RX_ENTRIES              32
67 #define COMMAND_ENTRIES         16
68 #define RESPONSE_ENTRIES        32
69
70 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
71 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
72
73 /* The 3XP will preload and remove 64 entries from the free buffer
74  * list, and we need one entry to keep the ring from wrapping, so 
75  * to keep this a power of two, we use 128 entries.
76  */
77 #define RXFREE_ENTRIES          128
78 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
79
80 /* Operational parameters that usually are not changed. */
81
82 /* Time in jiffies before concluding the transmitter is hung. */
83 #define TX_TIMEOUT  (2*HZ)
84
85 #define PKT_BUF_SZ              1536
86
87 #define DRV_MODULE_NAME         "typhoon"
88 #define DRV_MODULE_VERSION      "1.5.3"
89 #define DRV_MODULE_RELDATE      "03/12/15"
90 #define PFX                     DRV_MODULE_NAME ": "
91 #define ERR_PFX                 KERN_ERR PFX
92
93 #if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
94 #warning  You must compile this file with the correct options!
95 #warning  See the last lines of the source file.
96 #error  You must compile this driver with "-O".
97 #endif
98
99 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/timer.h>
103 #include <linux/errno.h>
104 #include <linux/ioport.h>
105 #include <linux/slab.h>
106 #include <linux/interrupt.h>
107 #include <linux/pci.h>
108 #include <linux/netdevice.h>
109 #include <linux/etherdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/init.h>
112 #include <linux/delay.h>
113 #include <linux/ethtool.h>
114 #include <linux/if_vlan.h>
115 #include <linux/crc32.h>
116 #include <asm/processor.h>
117 #include <asm/bitops.h>
118 #include <asm/io.h>
119 #include <asm/uaccess.h>
120 #include <linux/in6.h>
121 #include <asm/checksum.h>
122 #include <linux/version.h>
123
124 #include "typhoon.h"
125 #include "typhoon-firmware.h"
126
127 static char version[] __devinitdata =
128     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
129
130 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
131 MODULE_LICENSE("GPL");
132 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
133 MODULE_PARM(rx_copybreak, "i");
134
135 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
136 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
137 #undef NETIF_F_TSO
138 #endif
139
140 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
141 #error TX ring too small!
142 #endif
143
144 struct typhoon_card_info {
145         char *name;
146         int capabilities;
147 };
148
149 #define TYPHOON_CRYPTO_NONE             0x00
150 #define TYPHOON_CRYPTO_DES              0x01
151 #define TYPHOON_CRYPTO_3DES             0x02
152 #define TYPHOON_CRYPTO_VARIABLE         0x04
153 #define TYPHOON_FIBER                   0x08
154 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
155
156 enum typhoon_cards {
157         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
158         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
159         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
160         TYPHOON_FXM,
161 };
162
163 /* directly indexed by enum typhoon_cards, above */
164 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
165         { "3Com Typhoon (3C990-TX)",
166                 TYPHOON_CRYPTO_NONE},
167         { "3Com Typhoon (3CR990-TX-95)",
168                 TYPHOON_CRYPTO_DES},
169         { "3Com Typhoon (3CR990-TX-97)",
170                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
171         { "3Com Typhoon (3C990SVR)",
172                 TYPHOON_CRYPTO_NONE},
173         { "3Com Typhoon (3CR990SVR95)",
174                 TYPHOON_CRYPTO_DES},
175         { "3Com Typhoon (3CR990SVR97)",
176                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
177         { "3Com Typhoon2 (3C990B-TX-M)",
178                 TYPHOON_CRYPTO_VARIABLE},
179         { "3Com Typhoon2 (3C990BSVR)",
180                 TYPHOON_CRYPTO_VARIABLE},
181         { "3Com Typhoon (3CR990-FX-95)",
182                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
183         { "3Com Typhoon (3CR990-FX-97)",
184                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
185         { "3Com Typhoon (3CR990-FX-95 Server)",
186                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
187         { "3Com Typhoon (3CR990-FX-97 Server)",
188                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
189         { "3Com Typhoon2 (3C990B-FX-97)",
190                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
191 };
192
193 /* Notes on the new subsystem numbering scheme:
194  * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
195  * bit 4 indicates if this card has secured firmware (we don't support it)
196  * bit 8 indicates if this is a (0) copper or (1) fiber card
197  * bits 12-16 indicate card type: (0) client and (1) server
198  */
199 static struct pci_device_id typhoon_pci_tbl[] = {
200         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
202         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
204         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
206         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
207           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
208         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
209           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
210         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
211           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
212         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
213           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
214         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
215           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
216         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
217           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
219           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
220         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
222         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
224         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
226         { 0, }
227 };
228 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
229
230 /* Define the shared memory area
231  * Align everything the 3XP will normally be using.
232  * We'll need to move/align txHi if we start using that ring.
233  */
234 #define __3xp_aligned   ____cacheline_aligned
235 struct typhoon_shared {
236         struct typhoon_interface        iface;
237         struct typhoon_indexes          indexes                 __3xp_aligned;
238         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
239         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
240         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
241         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
242         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
243         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
244         u32                             zeroWord;
245         struct tx_desc                  txHi[TXHI_ENTRIES];
246 } __attribute__ ((packed));
247
248 struct rxbuff_ent {
249         struct sk_buff *skb;
250         dma_addr_t      dma_addr;
251 };
252
253 struct typhoon {
254         /* Tx cache line section */
255         struct transmit_ring    txLoRing        ____cacheline_aligned;  
256         struct pci_dev *        tx_pdev;
257         unsigned long           tx_ioaddr;
258         u32                     txlo_dma_addr;
259
260         /* Irq/Rx cache line section */
261         unsigned long           ioaddr          ____cacheline_aligned;
262         struct typhoon_indexes *indexes;
263         u8                      awaiting_resp;
264         u8                      duplex;
265         u8                      speed;
266         u8                      card_state;
267         struct basic_ring       rxLoRing;
268         struct pci_dev *        pdev;
269         struct net_device *     dev;
270         spinlock_t              state_lock;
271         struct vlan_group *     vlgrp;
272         struct basic_ring       rxHiRing;
273         struct basic_ring       rxBuffRing;
274         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
275
276         /* general section */
277         spinlock_t              command_lock    ____cacheline_aligned;
278         struct basic_ring       cmdRing;
279         struct basic_ring       respRing;
280         struct net_device_stats stats;
281         struct net_device_stats stats_saved;
282         const char *            name;
283         struct typhoon_shared * shared;
284         dma_addr_t              shared_dma;
285         u16                     xcvr_select;
286         u16                     wol_events;
287         u32                     offload;
288         u32                     pci_state[16];
289
290         /* unused stuff (future use) */
291         int                     capabilities;
292         struct transmit_ring    txHiRing;
293 };
294
295 enum completion_wait_values {
296         NoWait = 0, WaitNoSleep, WaitSleep,
297 };
298
299 /* These are the values for the typhoon.card_state variable.
300  * These determine where the statistics will come from in get_stats().
301  * The sleep image does not support the statistics we need.
302  */
303 enum state_values {
304         Sleeping = 0, Running,
305 };
306
307 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
308  * cannot pass a read, so this forces current writes to post.
309  */
310 #define typhoon_post_pci_writes(x) \
311         do { readl(x + TYPHOON_REG_HEARTBEAT); } while(0)
312
313 /* We'll wait up to six seconds for a reset, and half a second normally.
314  */
315 #define TYPHOON_UDELAY                  50
316 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
317 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
318 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
319
320 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
321 #define typhoon_synchronize_irq(x) synchronize_irq()
322 #else
323 #define typhoon_synchronize_irq(x) synchronize_irq(x)
324 #endif
325
326 #if defined(NETIF_F_TSO)
327 #define skb_tso_size(x)         (skb_shinfo(x)->tso_size)
328 #define TSO_NUM_DESCRIPTORS     2
329 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
330 #else
331 #define NETIF_F_TSO             0
332 #define skb_tso_size(x)         0
333 #define TSO_NUM_DESCRIPTORS     0
334 #define TSO_OFFLOAD_ON          0
335 #endif
336
337 static inline void
338 typhoon_inc_index(u32 *index, const int count, const int num_entries)
339 {
340         /* Increment a ring index -- we can use this for all rings execept
341          * the Rx rings, as they use different size descriptors
342          * otherwise, everything is the same size as a cmd_desc
343          */
344         *index += count * sizeof(struct cmd_desc);
345         *index %= num_entries * sizeof(struct cmd_desc);
346 }
347
348 static inline void
349 typhoon_inc_cmd_index(u32 *index, const int count)
350 {
351         typhoon_inc_index(index, count, COMMAND_ENTRIES);
352 }
353
354 static inline void
355 typhoon_inc_resp_index(u32 *index, const int count)
356 {
357         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
358 }
359
360 static inline void
361 typhoon_inc_rxfree_index(u32 *index, const int count)
362 {
363         typhoon_inc_index(index, count, RXFREE_ENTRIES);
364 }
365
366 static inline void
367 typhoon_inc_tx_index(u32 *index, const int count)
368 {
369         /* if we start using the Hi Tx ring, this needs updateing */
370         typhoon_inc_index(index, count, TXLO_ENTRIES);
371 }
372
373 static inline void
374 typhoon_inc_rx_index(u32 *index, const int count)
375 {
376         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
377         *index += count * sizeof(struct rx_desc);
378         *index %= RX_ENTRIES * sizeof(struct rx_desc);
379 }
380
381 static int
382 typhoon_reset(unsigned long ioaddr, int wait_type)
383 {
384         int i, err = 0;
385         int timeout;
386
387         if(wait_type == WaitNoSleep)
388                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
389         else
390                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
391
392         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
393         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
394
395         writel(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
396         typhoon_post_pci_writes(ioaddr);
397         udelay(1);
398         writel(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
399
400         if(wait_type != NoWait) {
401                 for(i = 0; i < timeout; i++) {
402                         if(readl(ioaddr + TYPHOON_REG_STATUS) ==
403                            TYPHOON_STATUS_WAITING_FOR_HOST)
404                                 goto out;
405
406                         if(wait_type == WaitSleep) {
407                                 set_current_state(TASK_UNINTERRUPTIBLE);
408                                 schedule_timeout(1);
409                         } else
410                                 udelay(TYPHOON_UDELAY);
411                 }
412
413                 err = -ETIMEDOUT;
414         }
415
416 out:
417         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
418         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
419         udelay(100);
420         return err;
421
422         /* The 3XP seems to need a little extra time to complete the load
423          * of the sleep image before we can reliably boot it. Failure to
424          * do this occasionally results in a hung adapter after boot in
425          * typhoon_init_one() while trying to read the MAC address or
426          * putting the card to sleep. 3Com's driver waits 5ms, but
427          * that seems to be overkill -- with a 50usec delay, it survives
428          * 35000 typhoon_init_one() calls, where it only make it 25-100
429          * without it.
430          *
431          * As it turns out, still occasionally getting a hung adapter,
432          * so I'm bumping it to 100us.
433          */
434 }
435
436 static int
437 typhoon_wait_status(unsigned long ioaddr, u32 wait_value)
438 {
439         int i, err = 0;
440
441         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
442                 if(readl(ioaddr + TYPHOON_REG_STATUS) == wait_value)
443                         goto out;
444                 udelay(TYPHOON_UDELAY);
445         }
446
447         err = -ETIMEDOUT;
448
449 out:
450         return err;
451 }
452
453 static inline void
454 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
455 {
456         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
457                 netif_carrier_off(dev);
458         else
459                 netif_carrier_on(dev);
460 }
461
462 static inline void
463 typhoon_hello(struct typhoon *tp)
464 {
465         struct basic_ring *ring = &tp->cmdRing;
466         struct cmd_desc *cmd;
467
468         /* We only get a hello request if we've not sent anything to the
469          * card in a long while. If the lock is held, then we're in the
470          * process of issuing a command, so we don't need to respond.
471          */
472         if(spin_trylock(&tp->command_lock)) {
473                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
474                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
475
476                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
477                 smp_wmb();
478                 writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
479                 spin_unlock(&tp->command_lock);
480         }
481 }
482
483 static int
484 typhoon_process_response(struct typhoon *tp, int resp_size,
485                                 struct resp_desc *resp_save)
486 {
487         struct typhoon_indexes *indexes = tp->indexes;
488         struct resp_desc *resp;
489         u8 *base = tp->respRing.ringBase;
490         int count, len, wrap_len;
491         u32 cleared;
492         u32 ready;
493
494         cleared = le32_to_cpu(indexes->respCleared);
495         ready = le32_to_cpu(indexes->respReady);
496         while(cleared != ready) {
497                 resp = (struct resp_desc *)(base + cleared);
498                 count = resp->numDesc + 1;
499                 if(resp_save && resp->seqNo) {
500                         if(count > resp_size) {
501                                 resp_save->flags = TYPHOON_RESP_ERROR;
502                                 goto cleanup;
503                         }
504
505                         wrap_len = 0;
506                         len = count * sizeof(*resp);
507                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
508                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
509                                 len = RESPONSE_RING_SIZE - cleared;
510                         }
511
512                         memcpy(resp_save, resp, len);
513                         if(unlikely(wrap_len)) {
514                                 resp_save += len / sizeof(*resp);
515                                 memcpy(resp_save, base, wrap_len);
516                         }
517
518                         resp_save = NULL;
519                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
520                         typhoon_media_status(tp->dev, resp);
521                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
522                         typhoon_hello(tp);
523                 } else {
524                         printk(KERN_ERR "%s: dumping unexpected response "
525                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
526                                tp->name, le16_to_cpu(resp->cmd),
527                                resp->numDesc, resp->flags,
528                                le16_to_cpu(resp->parm1),
529                                le32_to_cpu(resp->parm2),
530                                le32_to_cpu(resp->parm3));
531                 }
532
533 cleanup:
534                 typhoon_inc_resp_index(&cleared, count);
535         }
536
537         indexes->respCleared = cpu_to_le32(cleared);
538         wmb();
539         return (resp_save == NULL);
540 }
541
542 static inline int
543 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
544 {
545         /* this works for all descriptors but rx_desc, as they are a
546          * different size than the cmd_desc -- everyone else is the same
547          */
548         lastWrite /= sizeof(struct cmd_desc);
549         lastRead /= sizeof(struct cmd_desc);
550         return (ringSize + lastRead - lastWrite - 1) % ringSize;
551 }
552
553 static inline int
554 typhoon_num_free_cmd(struct typhoon *tp)
555 {
556         int lastWrite = tp->cmdRing.lastWrite;
557         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
558
559         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
560 }
561
562 static inline int
563 typhoon_num_free_resp(struct typhoon *tp)
564 {
565         int respReady = le32_to_cpu(tp->indexes->respReady);
566         int respCleared = le32_to_cpu(tp->indexes->respCleared);
567
568         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
569 }
570
571 static inline int
572 typhoon_num_free_tx(struct transmit_ring *ring)
573 {
574         /* if we start using the Hi Tx ring, this needs updating */
575         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
576 }
577
578 static int
579 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
580                       int num_resp, struct resp_desc *resp)
581 {
582         struct typhoon_indexes *indexes = tp->indexes;
583         struct basic_ring *ring = &tp->cmdRing;
584         struct resp_desc local_resp;
585         int i, err = 0;
586         int got_resp;
587         int freeCmd, freeResp;
588         int len, wrap_len;
589
590         spin_lock(&tp->command_lock);
591
592         freeCmd = typhoon_num_free_cmd(tp);
593         freeResp = typhoon_num_free_resp(tp);
594
595         if(freeCmd < num_cmd || freeResp < num_resp) {
596                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
597                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
598                         freeResp, num_resp);
599                 err = -ENOMEM;
600                 goto out;
601         }
602
603         if(cmd->flags & TYPHOON_CMD_RESPOND) {
604                 /* If we're expecting a response, but the caller hasn't given
605                  * us a place to put it, we'll provide one.
606                  */
607                 tp->awaiting_resp = 1;
608                 if(resp == NULL) {
609                         resp = &local_resp;
610                         num_resp = 1;
611                 }
612         }
613
614         wrap_len = 0;
615         len = num_cmd * sizeof(*cmd);
616         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
617                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
618                 len = COMMAND_RING_SIZE - ring->lastWrite;
619         }
620
621         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
622         if(unlikely(wrap_len)) {
623                 struct cmd_desc *wrap_ptr = cmd;
624                 wrap_ptr += len / sizeof(*cmd);
625                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
626         }
627
628         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
629
630         /* "I feel a presence... another warrior is on the the mesa."
631          */
632         wmb();
633         writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
634         typhoon_post_pci_writes(tp->ioaddr);
635
636         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
637                 goto out;
638
639         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
640          * preempt or do anything other than take interrupts. So, don't
641          * wait for a response unless you have to.
642          *
643          * I've thought about trying to sleep here, but we're called
644          * from many contexts that don't allow that. Also, given the way
645          * 3Com has implemented irq coalescing, we would likely timeout --
646          * this has been observed in real life!
647          *
648          * The big killer is we have to wait to get stats from the card,
649          * though we could go to a periodic refresh of those if we don't
650          * mind them getting somewhat stale. The rest of the waiting
651          * commands occur during open/close/suspend/resume, so they aren't
652          * time critical. Creating SAs in the future will also have to
653          * wait here.
654          */
655         got_resp = 0;
656         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
657                 if(indexes->respCleared != indexes->respReady)
658                         got_resp = typhoon_process_response(tp, num_resp,
659                                                                 resp);
660                 udelay(TYPHOON_UDELAY);
661         }
662
663         if(!got_resp) {
664                 err = -ETIMEDOUT;
665                 goto out;
666         }
667
668         /* Collect the error response even if we don't care about the
669          * rest of the response
670          */
671         if(resp->flags & TYPHOON_RESP_ERROR)
672                 err = -EIO;
673
674 out:
675         if(tp->awaiting_resp) {
676                 tp->awaiting_resp = 0;
677                 smp_wmb();
678
679                 /* Ugh. If a response was added to the ring between
680                  * the call to typhoon_process_response() and the clearing
681                  * of tp->awaiting_resp, we could have missed the interrupt
682                  * and it could hang in the ring an indeterminate amount of
683                  * time. So, check for it, and interrupt ourselves if this
684                  * is the case.
685                  */
686                 if(indexes->respCleared != indexes->respReady)
687                         writel(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
688         }
689
690         spin_unlock(&tp->command_lock);
691         return err;
692 }
693
694 static void
695 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
696 {
697         struct typhoon *tp = (struct typhoon *) dev->priv;
698         struct cmd_desc xp_cmd;
699         int err;
700
701         spin_lock_bh(&tp->state_lock);
702         if(!tp->vlgrp != !grp) {
703                 /* We've either been turned on for the first time, or we've
704                  * been turned off. Update the 3XP.
705                  */
706                 if(grp)
707                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
708                 else
709                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
710
711                 /* If the interface is up, the runtime is running -- and we
712                  * must be up for the vlan core to call us.
713                  *
714                  * Do the command outside of the spin lock, as it is slow.
715                  */
716                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
717                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
718                 xp_cmd.parm2 = tp->offload;
719                 xp_cmd.parm3 = tp->offload;
720                 spin_unlock_bh(&tp->state_lock);
721                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
722                 if(err < 0)
723                         printk("%s: vlan offload error %d\n", tp->name, -err);
724                 spin_lock_bh(&tp->state_lock);
725         }
726
727         /* now make the change visible */
728         tp->vlgrp = grp;
729         spin_unlock_bh(&tp->state_lock);
730 }
731
732 static void
733 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
734 {
735         struct typhoon *tp = (struct typhoon *) dev->priv;
736         spin_lock_bh(&tp->state_lock);
737         if(tp->vlgrp)
738                 tp->vlgrp->vlan_devices[vid] = NULL;
739         spin_unlock_bh(&tp->state_lock);
740 }
741
742 static inline void
743 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
744                         u32 ring_dma)
745 {
746         struct tcpopt_desc *tcpd;
747         u32 tcpd_offset = ring_dma;
748
749         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
750         tcpd_offset += txRing->lastWrite;
751         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
752         typhoon_inc_tx_index(&txRing->lastWrite, 1);
753
754         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
755         tcpd->numDesc = 1;
756         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
757         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
758         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
759         tcpd->bytesTx = cpu_to_le32(skb->len);
760         tcpd->status = 0;
761 }
762
763 static int
764 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
765 {
766         struct typhoon *tp = (struct typhoon *) dev->priv;
767         struct transmit_ring *txRing;
768         struct tx_desc *txd, *first_txd;
769         dma_addr_t skb_dma;
770         int numDesc;
771
772         /* we have two rings to choose from, but we only use txLo for now
773          * If we start using the Hi ring as well, we'll need to update
774          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
775          * and TXHI_ENTIRES to match, as well as update the TSO code below
776          * to get the right DMA address
777          */
778         txRing = &tp->txLoRing;
779
780         /* We need one descriptor for each fragment of the sk_buff, plus the
781          * one for the ->data area of it.
782          *
783          * The docs say a maximum of 16 fragment descriptors per TCP option
784          * descriptor, then make a new packet descriptor and option descriptor
785          * for the next 16 fragments. The engineers say just an option
786          * descriptor is needed. I've tested up to 26 fragments with a single
787          * packet descriptor/option descriptor combo, so I use that for now.
788          *
789          * If problems develop with TSO, check this first.
790          */
791         numDesc = skb_shinfo(skb)->nr_frags + 1;
792         if(skb_tso_size(skb))
793                 numDesc++;
794
795         /* When checking for free space in the ring, we need to also
796          * account for the initial Tx descriptor, and we always must leave
797          * at least one descriptor unused in the ring so that it doesn't
798          * wrap and look empty.
799          *
800          * The only time we should loop here is when we hit the race
801          * between marking the queue awake and updating the cleared index.
802          * Just loop and it will appear. This comes from the acenic driver.
803          */
804         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
805                 smp_rmb();
806
807         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
808         typhoon_inc_tx_index(&txRing->lastWrite, 1);
809
810         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
811         first_txd->numDesc = 0;
812         first_txd->len = 0;
813         first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
814         first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
815         first_txd->processFlags = 0;
816
817         if(skb->ip_summed == CHECKSUM_HW) {
818                 /* The 3XP will figure out if this is UDP/TCP */
819                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
820                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
821                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
822         }
823
824         if(vlan_tx_tag_present(skb)) {
825                 first_txd->processFlags |=
826                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
827                 first_txd->processFlags |=
828                     cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
829                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
830         }
831
832         if(skb_tso_size(skb)) {
833                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
834                 first_txd->numDesc++;
835
836                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
837         }
838
839         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
840         typhoon_inc_tx_index(&txRing->lastWrite, 1);
841
842         /* No need to worry about padding packet -- the firmware pads
843          * it with zeros to ETH_ZLEN for us.
844          */
845         if(skb_shinfo(skb)->nr_frags == 0) {
846                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
847                                        PCI_DMA_TODEVICE);
848                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
849                 txd->len = cpu_to_le16(skb->len);
850                 txd->addr = cpu_to_le32(skb_dma);
851                 txd->addrHi = 0;
852                 first_txd->numDesc++;
853         } else {
854                 int i, len;
855
856                 len = skb_headlen(skb);
857                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
858                                          PCI_DMA_TODEVICE);
859                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
860                 txd->len = cpu_to_le16(len);
861                 txd->addr = cpu_to_le32(skb_dma);
862                 txd->addrHi = 0;
863                 first_txd->numDesc++;
864
865                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
866                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
867                         void *frag_addr;
868
869                         txd = (struct tx_desc *) (txRing->ringBase +
870                                                 txRing->lastWrite);
871                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
872
873                         len = frag->size;
874                         frag_addr = (void *) page_address(frag->page) +
875                                                 frag->page_offset;
876                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
877                                          PCI_DMA_TODEVICE);
878                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
879                         txd->len = cpu_to_le16(len);
880                         txd->addr = cpu_to_le32(skb_dma);
881                         txd->addrHi = 0;
882                         first_txd->numDesc++;
883                 }
884         }
885
886         /* Kick the 3XP
887          */
888         wmb();
889         writel(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
890
891         dev->trans_start = jiffies;
892
893         /* If we don't have room to put the worst case packet on the
894          * queue, then we must stop the queue. We need 2 extra
895          * descriptors -- one to prevent ring wrap, and one for the
896          * Tx header.
897          */
898         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
899
900         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
901                 netif_stop_queue(dev);
902
903                 /* A Tx complete IRQ could have gotten inbetween, making
904                  * the ring free again. Only need to recheck here, since
905                  * Tx is serialized.
906                  */
907                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
908                         netif_wake_queue(dev);
909         }
910
911         return 0;
912 }
913
914 static void
915 typhoon_set_rx_mode(struct net_device *dev)
916 {
917         struct typhoon *tp = (struct typhoon *) dev->priv;
918         struct cmd_desc xp_cmd;
919         u32 mc_filter[2];
920         u16 filter;
921
922         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
923         if(dev->flags & IFF_PROMISC) {
924                 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
925                        dev->name);
926                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
927         } else if((dev->mc_count > multicast_filter_limit) ||
928                   (dev->flags & IFF_ALLMULTI)) {
929                 /* Too many to match, or accept all multicasts. */
930                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
931         } else if(dev->mc_count) {
932                 struct dev_mc_list *mclist;
933                 int i;
934
935                 memset(mc_filter, 0, sizeof(mc_filter));
936                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
937                     i++, mclist = mclist->next) {
938                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
939                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
940                 }
941
942                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
943                                          TYPHOON_CMD_SET_MULTICAST_HASH);
944                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
945                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
946                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
947                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
948
949                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
950         }
951
952         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
953         xp_cmd.parm1 = filter;
954         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
955 }
956
957 static int
958 typhoon_do_get_stats(struct typhoon *tp)
959 {
960         struct net_device_stats *stats = &tp->stats;
961         struct net_device_stats *saved = &tp->stats_saved;
962         struct cmd_desc xp_cmd;
963         struct resp_desc xp_resp[7];
964         struct stats_resp *s = (struct stats_resp *) xp_resp;
965         int err;
966
967         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
968         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
969         if(err < 0)
970                 return err;
971
972         /* 3Com's Linux driver uses txMultipleCollisions as it's
973          * collisions value, but there is some other collision info as well...
974          */
975         stats->tx_packets = le32_to_cpu(s->txPackets);
976         stats->tx_bytes = le32_to_cpu(s->txBytes);
977         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
978         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
979         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
980         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
981         stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
982         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
983         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
984                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
985         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
986         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
987         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
988                         SPEED_100 : SPEED_10;
989         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
990                         DUPLEX_FULL : DUPLEX_HALF;
991
992         /* add in the saved statistics
993          */
994         stats->tx_packets += saved->tx_packets;
995         stats->tx_bytes += saved->tx_bytes;
996         stats->tx_errors += saved->tx_errors;
997         stats->collisions += saved->collisions;
998         stats->rx_packets += saved->rx_packets;
999         stats->rx_bytes += saved->rx_bytes;
1000         stats->rx_fifo_errors += saved->rx_fifo_errors;
1001         stats->rx_errors += saved->rx_errors;
1002         stats->rx_crc_errors += saved->rx_crc_errors;
1003         stats->rx_length_errors += saved->rx_length_errors;
1004
1005         return 0;
1006 }
1007
1008 static struct net_device_stats *
1009 typhoon_get_stats(struct net_device *dev)
1010 {
1011         struct typhoon *tp = (struct typhoon *) dev->priv;
1012         struct net_device_stats *stats = &tp->stats;
1013         struct net_device_stats *saved = &tp->stats_saved;
1014
1015         smp_rmb();
1016         if(tp->card_state == Sleeping)
1017                 return saved;
1018
1019         if(typhoon_do_get_stats(tp) < 0) {
1020                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1021                 return saved;
1022         }
1023
1024         return stats;
1025 }
1026
1027 static int
1028 typhoon_set_mac_address(struct net_device *dev, void *addr)
1029 {
1030         struct sockaddr *saddr = (struct sockaddr *) addr;
1031
1032         if(netif_running(dev))
1033                 return -EBUSY;
1034
1035         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1036         return 0;
1037 }
1038
1039 static inline void
1040 typhoon_ethtool_gdrvinfo(struct typhoon *tp, struct ethtool_drvinfo *info)
1041 {
1042         struct pci_dev *pci_dev = tp->pdev;
1043         struct cmd_desc xp_cmd;
1044         struct resp_desc xp_resp[3];
1045
1046         smp_rmb();
1047         if(tp->card_state == Sleeping) {
1048                 strcpy(info->fw_version, "Sleep image");
1049         } else {
1050                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1051                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1052                         strcpy(info->fw_version, "Unknown runtime");
1053                 } else {
1054                         strncpy(info->fw_version, (char *) &xp_resp[1], 32);
1055                         info->fw_version[31] = 0;
1056                 }
1057         }
1058
1059         strcpy(info->driver, DRV_MODULE_NAME);
1060         strcpy(info->version, DRV_MODULE_VERSION);
1061         strcpy(info->bus_info, pci_name(pci_dev));
1062 }
1063
1064 static inline void
1065 typhoon_ethtool_gset(struct typhoon *tp, struct ethtool_cmd *cmd)
1066 {
1067         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1068                                 SUPPORTED_Autoneg;
1069
1070         switch (tp->xcvr_select) {
1071         case TYPHOON_XCVR_10HALF:
1072                 cmd->advertising = ADVERTISED_10baseT_Half;
1073                 break;
1074         case TYPHOON_XCVR_10FULL:
1075                 cmd->advertising = ADVERTISED_10baseT_Full;
1076                 break;
1077         case TYPHOON_XCVR_100HALF:
1078                 cmd->advertising = ADVERTISED_100baseT_Half;
1079                 break;
1080         case TYPHOON_XCVR_100FULL:
1081                 cmd->advertising = ADVERTISED_100baseT_Full;
1082                 break;
1083         case TYPHOON_XCVR_AUTONEG:
1084                 cmd->advertising = ADVERTISED_10baseT_Half |
1085                                             ADVERTISED_10baseT_Full |
1086                                             ADVERTISED_100baseT_Half |
1087                                             ADVERTISED_100baseT_Full |
1088                                             ADVERTISED_Autoneg;
1089                 break;
1090         }
1091
1092         if(tp->capabilities & TYPHOON_FIBER) {
1093                 cmd->supported |= SUPPORTED_FIBRE;
1094                 cmd->advertising |= ADVERTISED_FIBRE;
1095                 cmd->port = PORT_FIBRE;
1096         } else {
1097                 cmd->supported |= SUPPORTED_10baseT_Half |
1098                                         SUPPORTED_10baseT_Full |
1099                                         SUPPORTED_TP;
1100                 cmd->advertising |= ADVERTISED_TP;
1101                 cmd->port = PORT_TP;
1102         }
1103
1104         /* need to get stats to make these link speed/duplex valid */
1105         typhoon_do_get_stats(tp);
1106         cmd->speed = tp->speed;
1107         cmd->duplex = tp->duplex;
1108         cmd->phy_address = 0;
1109         cmd->transceiver = XCVR_INTERNAL;
1110         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1111                 cmd->autoneg = AUTONEG_ENABLE;
1112         else
1113                 cmd->autoneg = AUTONEG_DISABLE;
1114         cmd->maxtxpkt = 1;
1115         cmd->maxrxpkt = 1;
1116 }
1117
1118 static inline int
1119 typhoon_ethtool_sset(struct typhoon *tp, struct ethtool_cmd *cmd)
1120 {
1121         struct cmd_desc xp_cmd;
1122         int xcvr;
1123         int err;
1124
1125         if(cmd->autoneg == AUTONEG_ENABLE) {
1126                 xcvr = TYPHOON_XCVR_AUTONEG;
1127         } else {
1128                 if(cmd->duplex == DUPLEX_HALF) {
1129                         if(cmd->speed == SPEED_10)
1130                                 xcvr = TYPHOON_XCVR_10HALF;
1131                         else if(cmd->speed == SPEED_100)
1132                                 xcvr = TYPHOON_XCVR_100HALF;
1133                         else
1134                                 return -EINVAL;
1135                 } else if(cmd->duplex == DUPLEX_FULL) {
1136                         if(cmd->speed == SPEED_10)
1137                                 xcvr = TYPHOON_XCVR_10FULL;
1138                         else if(cmd->speed == SPEED_100)
1139                                 xcvr = TYPHOON_XCVR_100FULL;
1140                         else
1141                                 return -EINVAL;
1142                 } else
1143                         return -EINVAL;
1144         }
1145
1146         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1147         xp_cmd.parm1 = cpu_to_le16(xcvr);
1148         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1149         if(err < 0)
1150                 return err;
1151
1152         tp->xcvr_select = xcvr;
1153         if(cmd->autoneg == AUTONEG_ENABLE) {
1154                 tp->speed = 0xff;       /* invalid */
1155                 tp->duplex = 0xff;      /* invalid */
1156         } else {
1157                 tp->speed = cmd->speed;
1158                 tp->duplex = cmd->duplex;
1159         }
1160
1161         return 0;
1162 }
1163
1164 static inline int
1165 typhoon_ethtool_ioctl(struct net_device *dev, void __user *useraddr)
1166 {
1167         struct typhoon *tp = (struct typhoon *) dev->priv;
1168         u32 ethcmd;
1169
1170         if(copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1171                 return -EFAULT;
1172
1173         switch (ethcmd) {
1174         case ETHTOOL_GDRVINFO: {
1175                         struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1176
1177                         typhoon_ethtool_gdrvinfo(tp, &info);
1178                         if(copy_to_user(useraddr, &info, sizeof(info)))
1179                                 return -EFAULT;
1180                         return 0;
1181                 }
1182         case ETHTOOL_GSET: {
1183                         struct ethtool_cmd cmd = { ETHTOOL_GSET };
1184
1185                         typhoon_ethtool_gset(tp, &cmd);
1186                         if(copy_to_user(useraddr, &cmd, sizeof(cmd)))
1187                                 return -EFAULT;
1188                         return 0;
1189                 }
1190         case ETHTOOL_SSET: {
1191                         struct ethtool_cmd cmd;
1192                         if(copy_from_user(&cmd, useraddr, sizeof(cmd)))
1193                                 return -EFAULT;
1194
1195                         return typhoon_ethtool_sset(tp, &cmd);
1196                 }
1197         case ETHTOOL_GLINK:{
1198                         struct ethtool_value edata = { ETHTOOL_GLINK };
1199
1200                         edata.data = netif_carrier_ok(dev) ? 1 : 0;
1201                         if(copy_to_user(useraddr, &edata, sizeof(edata)))
1202                                 return -EFAULT;
1203                         return 0;
1204                 }
1205         case ETHTOOL_GWOL: {
1206                         struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
1207
1208                         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1209                                 wol.wolopts |= WAKE_PHY;
1210                         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1211                                 wol.wolopts |= WAKE_MAGIC;
1212                         if(copy_to_user(useraddr, &wol, sizeof(wol)))
1213                                 return -EFAULT;
1214                         return 0;
1215         }
1216         case ETHTOOL_SWOL: {
1217                         struct ethtool_wolinfo wol;
1218
1219                         if(copy_from_user(&wol, useraddr, sizeof(wol)))
1220                                 return -EFAULT;
1221                         tp->wol_events = 0;
1222                         if(wol.wolopts & WAKE_PHY)
1223                                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1224                         if(wol.wolopts & WAKE_MAGIC)
1225                                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1226                         return 0;
1227         }
1228         default:
1229                 break;
1230         }
1231
1232         return -EOPNOTSUPP;
1233 }
1234
1235 static int
1236 typhoon_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1237 {
1238         switch (cmd) {
1239         case SIOCETHTOOL:
1240                 return typhoon_ethtool_ioctl(dev, ifr->ifr_data);
1241         default:
1242                 break;
1243         }
1244
1245         return -EOPNOTSUPP;
1246 }
1247
1248 static int
1249 typhoon_wait_interrupt(unsigned long ioaddr)
1250 {
1251         int i, err = 0;
1252
1253         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1254                 if(readl(ioaddr + TYPHOON_REG_INTR_STATUS) &
1255                    TYPHOON_INTR_BOOTCMD)
1256                         goto out;
1257                 udelay(TYPHOON_UDELAY);
1258         }
1259
1260         err = -ETIMEDOUT;
1261
1262 out:
1263         writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1264         return err;
1265 }
1266
1267 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1268
1269 static void
1270 typhoon_init_interface(struct typhoon *tp)
1271 {
1272         struct typhoon_interface *iface = &tp->shared->iface;
1273         dma_addr_t shared_dma;
1274
1275         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1276
1277         /* The *Hi members of iface are all init'd to zero by the memset().
1278          */
1279         shared_dma = tp->shared_dma + shared_offset(indexes);
1280         iface->ringIndex = cpu_to_le32(shared_dma);
1281
1282         shared_dma = tp->shared_dma + shared_offset(txLo);
1283         iface->txLoAddr = cpu_to_le32(shared_dma);
1284         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1285
1286         shared_dma = tp->shared_dma + shared_offset(txHi);
1287         iface->txHiAddr = cpu_to_le32(shared_dma);
1288         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1289
1290         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1291         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1292         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1293                                         sizeof(struct rx_free));
1294
1295         shared_dma = tp->shared_dma + shared_offset(rxLo);
1296         iface->rxLoAddr = cpu_to_le32(shared_dma);
1297         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1298
1299         shared_dma = tp->shared_dma + shared_offset(rxHi);
1300         iface->rxHiAddr = cpu_to_le32(shared_dma);
1301         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1302
1303         shared_dma = tp->shared_dma + shared_offset(cmd);
1304         iface->cmdAddr = cpu_to_le32(shared_dma);
1305         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1306
1307         shared_dma = tp->shared_dma + shared_offset(resp);
1308         iface->respAddr = cpu_to_le32(shared_dma);
1309         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1310
1311         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1312         iface->zeroAddr = cpu_to_le32(shared_dma);
1313
1314         tp->indexes = &tp->shared->indexes;
1315         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1316         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1317         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1318         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1319         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1320         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1321         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1322
1323         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1324         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1325
1326         tp->txlo_dma_addr = iface->txLoAddr;
1327         tp->card_state = Sleeping;
1328         smp_wmb();
1329
1330         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1331         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1332
1333         spin_lock_init(&tp->command_lock);
1334         spin_lock_init(&tp->state_lock);
1335 }
1336
1337 static void
1338 typhoon_init_rings(struct typhoon *tp)
1339 {
1340         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1341
1342         tp->txLoRing.lastWrite = 0;
1343         tp->txHiRing.lastWrite = 0;
1344         tp->rxLoRing.lastWrite = 0;
1345         tp->rxHiRing.lastWrite = 0;
1346         tp->rxBuffRing.lastWrite = 0;
1347         tp->cmdRing.lastWrite = 0;
1348         tp->cmdRing.lastWrite = 0;
1349
1350         tp->txLoRing.lastRead = 0;
1351         tp->txHiRing.lastRead = 0;
1352 }
1353
1354 static int
1355 typhoon_download_firmware(struct typhoon *tp)
1356 {
1357         unsigned long ioaddr = tp->ioaddr;
1358         struct pci_dev *pdev = tp->pdev;
1359         struct typhoon_file_header *fHdr;
1360         struct typhoon_section_header *sHdr;
1361         u8 *image_data;
1362         void *dpage;
1363         dma_addr_t dpage_dma;
1364         unsigned int csum;
1365         u32 irqEnabled;
1366         u32 irqMasked;
1367         u32 numSections;
1368         u32 section_len;
1369         u32 len;
1370         u32 load_addr;
1371         u32 hmac;
1372         int i;
1373         int err;
1374
1375         err = -EINVAL;
1376         fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1377         image_data = (u8 *) fHdr;
1378
1379         if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1380                 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1381                 goto err_out;
1382         }
1383
1384         /* Cannot just map the firmware image using pci_map_single() as
1385          * the firmware is part of the kernel/module image, so we allocate
1386          * some consistent memory to copy the sections into, as it is simpler,
1387          * and short-lived. If we ever split out and require a userland
1388          * firmware loader, then we can revisit this.
1389          */
1390         err = -ENOMEM;
1391         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1392         if(!dpage) {
1393                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1394                 goto err_out;
1395         }
1396
1397         irqEnabled = readl(ioaddr + TYPHOON_REG_INTR_ENABLE);
1398         writel(irqEnabled | TYPHOON_INTR_BOOTCMD,
1399                ioaddr + TYPHOON_REG_INTR_ENABLE);
1400         irqMasked = readl(ioaddr + TYPHOON_REG_INTR_MASK);
1401         writel(irqMasked | TYPHOON_INTR_BOOTCMD,
1402                ioaddr + TYPHOON_REG_INTR_MASK);
1403
1404         err = -ETIMEDOUT;
1405         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1406                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1407                 goto err_out_irq;
1408         }
1409
1410         numSections = le32_to_cpu(fHdr->numSections);
1411         load_addr = le32_to_cpu(fHdr->startAddr);
1412
1413         writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1414         writel(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1415         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1416         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1417         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1418         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1419         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1420         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1421         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1422         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1423         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1424         writel(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1425         typhoon_post_pci_writes(ioaddr);
1426         writel(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1427
1428         image_data += sizeof(struct typhoon_file_header);
1429
1430         /* The readl() in typhoon_wait_interrupt() will force the
1431          * last write to the command register to post, so
1432          * we don't need a typhoon_post_pci_writes() after it.
1433          */
1434         for(i = 0; i < numSections; i++) {
1435                 sHdr = (struct typhoon_section_header *) image_data;
1436                 image_data += sizeof(struct typhoon_section_header);
1437                 load_addr = le32_to_cpu(sHdr->startAddr);
1438                 section_len = le32_to_cpu(sHdr->len);
1439
1440                 while(section_len) {
1441                         len = min_t(u32, section_len, PAGE_SIZE);
1442
1443                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1444                            readl(ioaddr + TYPHOON_REG_STATUS) !=
1445                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1446                                 printk(KERN_ERR "%s: segment ready timeout\n",
1447                                        tp->name);
1448                                 goto err_out_irq;
1449                         }
1450
1451                         /* Do an pseudo IPv4 checksum on the data -- first
1452                          * need to convert each u16 to cpu order before
1453                          * summing. Fortunately, due to the properties of
1454                          * the checksum, we can do this once, at the end.
1455                          */
1456                         csum = csum_partial_copy_nocheck(image_data, dpage,
1457                                                          len, 0);
1458                         csum = csum_fold(csum);
1459                         csum = le16_to_cpu(csum);
1460
1461                         writel(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1462                         writel(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1463                         writel(load_addr, ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1464                         writel(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1465                         writel(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1466                         typhoon_post_pci_writes(ioaddr);
1467                         writel(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1468                                ioaddr + TYPHOON_REG_COMMAND);
1469
1470                         image_data += len;
1471                         load_addr += len;
1472                         section_len -= len;
1473                 }
1474         }
1475
1476         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1477            readl(ioaddr + TYPHOON_REG_STATUS) !=
1478            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1479                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1480                 goto err_out_irq;
1481         }
1482
1483         writel(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1484
1485         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1486                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1487                        tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1488                 goto err_out_irq;
1489         }
1490
1491         err = 0;
1492
1493 err_out_irq:
1494         writel(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1495         writel(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1496
1497         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1498
1499 err_out:
1500         return err;
1501 }
1502
1503 static int
1504 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1505 {
1506         unsigned long ioaddr = tp->ioaddr;
1507
1508         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1509                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1510                 goto out_timeout;
1511         }
1512
1513         writel(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1514         writel(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1515         typhoon_post_pci_writes(ioaddr);
1516         writel(TYPHOON_BOOTCMD_REG_BOOT_RECORD, ioaddr + TYPHOON_REG_COMMAND);
1517
1518         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1519                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1520                        tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1521                 goto out_timeout;
1522         }
1523
1524         /* Clear the Transmit and Command ready registers
1525          */
1526         writel(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1527         writel(0, ioaddr + TYPHOON_REG_CMD_READY);
1528         writel(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1529         typhoon_post_pci_writes(ioaddr);
1530         writel(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1531
1532         return 0;
1533
1534 out_timeout:
1535         return -ETIMEDOUT;
1536 }
1537
1538 static u32
1539 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1540                         volatile u32 * index)
1541 {
1542         u32 lastRead = txRing->lastRead;
1543         struct tx_desc *tx;
1544         dma_addr_t skb_dma;
1545         int dma_len;
1546         int type;
1547
1548         while(lastRead != le32_to_cpu(*index)) {
1549                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1550                 type = tx->flags & TYPHOON_TYPE_MASK;
1551
1552                 if(type == TYPHOON_TX_DESC) {
1553                         /* This tx_desc describes a packet.
1554                          */
1555                         unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1556                         struct sk_buff *skb = (struct sk_buff *) ptr;
1557                         dev_kfree_skb_irq(skb);
1558                 } else if(type == TYPHOON_FRAG_DESC) {
1559                         /* This tx_desc describes a memory mapping. Free it.
1560                          */
1561                         skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1562                         dma_len = le16_to_cpu(tx->len);
1563                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1564                                        PCI_DMA_TODEVICE);
1565                 }
1566
1567                 tx->flags = 0;
1568                 typhoon_inc_tx_index(&lastRead, 1);
1569         }
1570
1571         return lastRead;
1572 }
1573
1574 static void
1575 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1576                         volatile u32 * index)
1577 {
1578         u32 lastRead;
1579         int numDesc = MAX_SKB_FRAGS + 1;
1580
1581         /* This will need changing if we start to use the Hi Tx ring. */
1582         lastRead = typhoon_clean_tx(tp, txRing, index);
1583         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1584                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1585                 netif_wake_queue(tp->dev);
1586
1587         txRing->lastRead = lastRead;
1588         smp_wmb();
1589 }
1590
1591 static void
1592 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1593 {
1594         struct typhoon_indexes *indexes = tp->indexes;
1595         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1596         struct basic_ring *ring = &tp->rxBuffRing;
1597         struct rx_free *r;
1598
1599         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1600                                 indexes->rxBuffCleared) {
1601                 /* no room in ring, just drop the skb
1602                  */
1603                 dev_kfree_skb_any(rxb->skb);
1604                 rxb->skb = NULL;
1605                 return;
1606         }
1607
1608         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1609         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1610         r->virtAddr = idx;
1611         r->physAddr = cpu_to_le32(rxb->dma_addr);
1612
1613         /* Tell the card about it */
1614         wmb();
1615         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1616 }
1617
1618 static int
1619 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1620 {
1621         struct typhoon_indexes *indexes = tp->indexes;
1622         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1623         struct basic_ring *ring = &tp->rxBuffRing;
1624         struct rx_free *r;
1625         struct sk_buff *skb;
1626         dma_addr_t dma_addr;
1627
1628         rxb->skb = NULL;
1629
1630         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1631                                 indexes->rxBuffCleared)
1632                 return -ENOMEM;
1633
1634         skb = dev_alloc_skb(PKT_BUF_SZ);
1635         if(!skb)
1636                 return -ENOMEM;
1637
1638 #if 0
1639         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1640          * address! Pretty please?
1641          */
1642         skb_reserve(skb, 2);
1643 #endif
1644
1645         skb->dev = tp->dev;
1646         dma_addr = pci_map_single(tp->pdev, skb->tail,
1647                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1648
1649         /* Since no card does 64 bit DAC, the high bits will never
1650          * change from zero.
1651          */
1652         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1653         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1654         r->virtAddr = idx;
1655         r->physAddr = cpu_to_le32(dma_addr);
1656         rxb->skb = skb;
1657         rxb->dma_addr = dma_addr;
1658
1659         /* Tell the card about it */
1660         wmb();
1661         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1662         return 0;
1663 }
1664
1665 static int
1666 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1667            volatile u32 * cleared, int budget)
1668 {
1669         struct rx_desc *rx;
1670         struct sk_buff *skb, *new_skb;
1671         struct rxbuff_ent *rxb;
1672         dma_addr_t dma_addr;
1673         u32 local_ready;
1674         u32 rxaddr;
1675         int pkt_len;
1676         u32 idx;
1677         u32 csum_bits;
1678         int received;
1679
1680         received = 0;
1681         local_ready = le32_to_cpu(*ready);
1682         rxaddr = le32_to_cpu(*cleared);
1683         while(rxaddr != local_ready && budget > 0) {
1684                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1685                 idx = rx->addr;
1686                 rxb = &tp->rxbuffers[idx];
1687                 skb = rxb->skb;
1688                 dma_addr = rxb->dma_addr;
1689
1690                 rxaddr += sizeof(struct rx_desc);
1691                 rxaddr %= RX_ENTRIES * sizeof(struct rx_desc);
1692
1693                 if(rx->flags & TYPHOON_RX_ERROR) {
1694                         typhoon_recycle_rx_skb(tp, idx);
1695                         continue;
1696                 }
1697
1698                 pkt_len = le16_to_cpu(rx->frameLen);
1699
1700                 if(pkt_len < rx_copybreak &&
1701                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1702                         new_skb->dev = tp->dev;
1703                         skb_reserve(new_skb, 2);
1704                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1705                                                     PKT_BUF_SZ,
1706                                                     PCI_DMA_FROMDEVICE);
1707                         eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
1708                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1709                                                        PKT_BUF_SZ,
1710                                                        PCI_DMA_FROMDEVICE);
1711                         skb_put(new_skb, pkt_len);
1712                         typhoon_recycle_rx_skb(tp, idx);
1713                 } else {
1714                         new_skb = skb;
1715                         skb_put(new_skb, pkt_len);
1716                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1717                                        PCI_DMA_FROMDEVICE);
1718                         typhoon_alloc_rx_skb(tp, idx);
1719                 }
1720                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1721                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1722                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1723                 if(csum_bits ==
1724                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1725                    || csum_bits ==
1726                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1727                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1728                 } else
1729                         new_skb->ip_summed = CHECKSUM_NONE;
1730
1731                 spin_lock(&tp->state_lock);
1732                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1733                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1734                                                  ntohl(rx->vlanTag) & 0xffff);
1735                 else
1736                         netif_receive_skb(new_skb);
1737                 spin_unlock(&tp->state_lock);
1738
1739                 tp->dev->last_rx = jiffies;
1740                 received++;
1741                 budget--;
1742         }
1743         *cleared = cpu_to_le32(rxaddr);
1744
1745         return received;
1746 }
1747
1748 static void
1749 typhoon_fill_free_ring(struct typhoon *tp)
1750 {
1751         u32 i;
1752
1753         for(i = 0; i < RXENT_ENTRIES; i++) {
1754                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1755                 if(rxb->skb)
1756                         continue;
1757                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1758                         break;
1759         }
1760 }
1761
1762 static int
1763 typhoon_poll(struct net_device *dev, int *total_budget)
1764 {
1765         struct typhoon *tp = (struct typhoon *) dev->priv;
1766         struct typhoon_indexes *indexes = tp->indexes;
1767         int orig_budget = *total_budget;
1768         int budget, work_done, done;
1769
1770         rmb();
1771         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1772                         typhoon_process_response(tp, 0, NULL);
1773
1774         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1775                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1776
1777         if(orig_budget > dev->quota)
1778                 orig_budget = dev->quota;
1779
1780         budget = orig_budget;
1781         work_done = 0;
1782         done = 1;
1783
1784         if(indexes->rxHiCleared != indexes->rxHiReady) {
1785                 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1786                                         &indexes->rxHiCleared, budget);
1787                 budget -= work_done;
1788         }
1789
1790         if(indexes->rxLoCleared != indexes->rxLoReady) {
1791                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1792                                         &indexes->rxLoCleared, budget);
1793         }
1794
1795         if(work_done) {
1796                 *total_budget -= work_done;
1797                 dev->quota -= work_done;
1798
1799                 if(work_done >= orig_budget)
1800                         done = 0;
1801         }
1802
1803         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1804                 /* rxBuff ring is empty, try to fill it. */
1805                 typhoon_fill_free_ring(tp);
1806         }
1807
1808         if(done) {
1809                 netif_rx_complete(dev);
1810                 writel(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK);
1811                 typhoon_post_pci_writes(tp->ioaddr);
1812         }
1813
1814         return (done ? 0 : 1);
1815 }
1816
1817 static irqreturn_t
1818 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1819 {
1820         struct net_device *dev = (struct net_device *) dev_instance;
1821         unsigned long ioaddr = dev->base_addr;
1822         u32 intr_status;
1823
1824         intr_status = readl(ioaddr + TYPHOON_REG_INTR_STATUS);
1825         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1826                 return IRQ_NONE;
1827
1828         writel(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1829
1830         if(netif_rx_schedule_prep(dev)) {
1831                 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1832                 typhoon_post_pci_writes(ioaddr);
1833                 __netif_rx_schedule(dev);
1834         } else {
1835                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1836                        dev->name);
1837         }
1838         return IRQ_HANDLED;
1839 }
1840
1841 static void
1842 typhoon_free_rx_rings(struct typhoon *tp)
1843 {
1844         u32 i;
1845
1846         for(i = 0; i < RXENT_ENTRIES; i++) {
1847                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1848                 if(rxb->skb) {
1849                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1850                                        PCI_DMA_FROMDEVICE);
1851                         dev_kfree_skb(rxb->skb);
1852                         rxb->skb = NULL;
1853                 }
1854         }
1855 }
1856
1857 static int
1858 typhoon_sleep(struct typhoon *tp, int state, u16 events)
1859 {
1860         struct pci_dev *pdev = tp->pdev;
1861         unsigned long ioaddr = tp->ioaddr;
1862         struct cmd_desc xp_cmd;
1863         int err;
1864
1865         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1866         xp_cmd.parm1 = events;
1867         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1868         if(err < 0) {
1869                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1870                                 tp->name, err);
1871                 return err;
1872         }
1873
1874         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1875         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1876         if(err < 0) {
1877                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1878                                 tp->name, err);
1879                 return err;
1880         }
1881
1882         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1883                 return -ETIMEDOUT;
1884
1885         /* Since we cannot monitor the status of the link while sleeping,
1886          * tell the world it went away.
1887          */
1888         netif_carrier_off(tp->dev);
1889
1890         pci_enable_wake(tp->pdev, state, 1);
1891         pci_disable_device(pdev);
1892         return pci_set_power_state(pdev, state);
1893 }
1894
1895 static int
1896 typhoon_wakeup(struct typhoon *tp, int wait_type)
1897 {
1898         struct pci_dev *pdev = tp->pdev;
1899         unsigned long ioaddr = tp->ioaddr;
1900
1901         pci_set_power_state(pdev, 0);
1902         pci_restore_state(pdev, tp->pci_state);
1903
1904         /* Post 2.x.x versions of the Sleep Image require a reset before
1905          * we can download the Runtime Image. But let's not make users of
1906          * the old firmware pay for the reset.
1907          */
1908         writel(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1909         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1910                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1911                 return typhoon_reset(ioaddr, wait_type);
1912
1913         return 0;
1914 }
1915
1916 static int
1917 typhoon_start_runtime(struct typhoon *tp)
1918 {
1919         struct net_device *dev = tp->dev;
1920         unsigned long ioaddr = tp->ioaddr;
1921         struct cmd_desc xp_cmd;
1922         int err;
1923
1924         typhoon_init_rings(tp);
1925         typhoon_fill_free_ring(tp);
1926
1927         err = typhoon_download_firmware(tp);
1928         if(err < 0) {
1929                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1930                 goto error_out;
1931         }
1932
1933         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1934                 printk("%s: cannot boot 3XP\n", tp->name);
1935                 err = -EIO;
1936                 goto error_out;
1937         }
1938
1939         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1940         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1941         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1942         if(err < 0)
1943                 goto error_out;
1944
1945         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1946         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1947         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1948         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1949         if(err < 0)
1950                 goto error_out;
1951
1952         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1953          * us some more information on how to control it.
1954          */
1955         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1956         xp_cmd.parm1 = 0;
1957         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1958         if(err < 0)
1959                 goto error_out;
1960
1961         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1962         xp_cmd.parm1 = tp->xcvr_select;
1963         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1964         if(err < 0)
1965                 goto error_out;
1966
1967         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1968         xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1969         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1970         if(err < 0)
1971                 goto error_out;
1972
1973         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1974         spin_lock_bh(&tp->state_lock);
1975         xp_cmd.parm2 = tp->offload;
1976         xp_cmd.parm3 = tp->offload;
1977         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1978         spin_unlock_bh(&tp->state_lock);
1979         if(err < 0)
1980                 goto error_out;
1981
1982         typhoon_set_rx_mode(dev);
1983
1984         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1985         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1986         if(err < 0)
1987                 goto error_out;
1988
1989         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1990         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1991         if(err < 0)
1992                 goto error_out;
1993
1994         tp->card_state = Running;
1995         smp_wmb();
1996
1997         writel(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1998         writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1999         typhoon_post_pci_writes(ioaddr);
2000
2001         return 0;
2002
2003 error_out:
2004         typhoon_reset(ioaddr, WaitNoSleep);
2005         typhoon_free_rx_rings(tp);
2006         typhoon_init_rings(tp);
2007         return err;
2008 }
2009
2010 static int
2011 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
2012 {
2013         struct typhoon_indexes *indexes = tp->indexes;
2014         struct transmit_ring *txLo = &tp->txLoRing;
2015         unsigned long ioaddr = tp->ioaddr;
2016         struct cmd_desc xp_cmd;
2017         int i;
2018
2019         /* Disable interrupts early, since we can't schedule a poll
2020          * when called with !netif_running(). This will be posted
2021          * when we force the posting of the command.
2022          */
2023         writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2024
2025         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2026         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2027
2028         /* Wait 1/2 sec for any outstanding transmits to occur
2029          * We'll cleanup after the reset if this times out.
2030          */
2031         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2032                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2033                         break;
2034                 udelay(TYPHOON_UDELAY);
2035         }
2036
2037         if(i == TYPHOON_WAIT_TIMEOUT)
2038                 printk(KERN_ERR
2039                        "%s: halt timed out waiting for Tx to complete\n",
2040                        tp->name);
2041
2042         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2043         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2044
2045         /* save the statistics so when we bring the interface up again,
2046          * the values reported to userspace are correct.
2047          */
2048         tp->card_state = Sleeping;
2049         smp_wmb();
2050         typhoon_do_get_stats(tp);
2051         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2052
2053         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2054         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2055
2056         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2057                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2058                        tp->name);
2059
2060         if(typhoon_reset(ioaddr, wait_type) < 0) {
2061                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2062                 return -ETIMEDOUT;
2063         }
2064
2065         /* cleanup any outstanding Tx packets */
2066         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2067                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2068                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2069         }
2070
2071         return 0;
2072 }
2073
2074 static void
2075 typhoon_tx_timeout(struct net_device *dev)
2076 {
2077         struct typhoon *tp = (struct typhoon *) dev->priv;
2078
2079         if(typhoon_reset(dev->base_addr, WaitNoSleep) < 0) {
2080                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2081                                         dev->name);
2082                 goto truely_dead;
2083         }
2084
2085         /* If we ever start using the Hi ring, it will need cleaning too */
2086         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2087         typhoon_free_rx_rings(tp);
2088
2089         if(typhoon_start_runtime(tp) < 0) {
2090                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2091                                         dev->name);
2092                 goto truely_dead;
2093         }
2094
2095         netif_wake_queue(dev);
2096         return;
2097
2098 truely_dead:
2099         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2100         typhoon_reset(dev->base_addr, NoWait);
2101         netif_carrier_off(dev);
2102 }
2103
2104 static int
2105 typhoon_open(struct net_device *dev)
2106 {
2107         struct typhoon *tp = (struct typhoon *) dev->priv;
2108         int err;
2109
2110         err = typhoon_wakeup(tp, WaitSleep);
2111         if(err < 0) {
2112                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2113                 goto out_sleep;
2114         }
2115
2116         err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
2117                                 dev->name, dev);
2118         if(err < 0)
2119                 goto out_sleep;
2120
2121         err = typhoon_start_runtime(tp);
2122         if(err < 0)
2123                 goto out_irq;
2124
2125         netif_start_queue(dev);
2126         return 0;
2127
2128 out_irq:
2129         free_irq(dev->irq, dev);
2130
2131 out_sleep:
2132         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2133                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2134                                 dev->name);
2135                 typhoon_reset(dev->base_addr, NoWait);
2136                 goto out;
2137         }
2138
2139         if(typhoon_sleep(tp, 3, 0) < 0) 
2140                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2141
2142 out:
2143         return err;
2144 }
2145
2146 static int
2147 typhoon_close(struct net_device *dev)
2148 {
2149         struct typhoon *tp = (struct typhoon *) dev->priv;
2150
2151         netif_stop_queue(dev);
2152
2153         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2154                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2155
2156         /* Make sure there is no irq handler running on a different CPU. */
2157         typhoon_synchronize_irq(dev->irq);
2158         free_irq(dev->irq, dev);
2159
2160         typhoon_free_rx_rings(tp);
2161         typhoon_init_rings(tp);
2162
2163         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2164                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2165
2166         if(typhoon_sleep(tp, 3, 0) < 0)
2167                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2168
2169         return 0;
2170 }
2171
2172 #ifdef CONFIG_PM
2173 static int
2174 typhoon_resume(struct pci_dev *pdev)
2175 {
2176         struct net_device *dev = pci_get_drvdata(pdev);
2177         struct typhoon *tp = (struct typhoon *) dev->priv;
2178
2179         /* If we're down, resume when we are upped.
2180          */
2181         if(!netif_running(dev))
2182                 return 0;
2183
2184         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2185                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2186                                 dev->name);
2187                 goto reset;
2188         }
2189
2190         if(typhoon_start_runtime(tp) < 0) {
2191                 printk(KERN_ERR "%s: critical: could not start runtime in "
2192                                 "resume\n", dev->name);
2193                 goto reset;
2194         }
2195
2196         netif_device_attach(dev);
2197         netif_start_queue(dev);
2198         return 0;
2199
2200 reset:
2201         typhoon_reset(dev->base_addr, NoWait);
2202         return -EBUSY;
2203 }
2204
2205 static int
2206 typhoon_suspend(struct pci_dev *pdev, u32 state)
2207 {
2208         struct net_device *dev = pci_get_drvdata(pdev);
2209         struct typhoon *tp = (struct typhoon *) dev->priv;
2210         struct cmd_desc xp_cmd;
2211
2212         /* If we're down, we're already suspended.
2213          */
2214         if(!netif_running(dev))
2215                 return 0;
2216
2217         spin_lock_bh(&tp->state_lock);
2218         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2219                 spin_unlock_bh(&tp->state_lock);
2220                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2221                                 dev->name);
2222                 return -EBUSY;
2223         }
2224         spin_unlock_bh(&tp->state_lock);
2225
2226         netif_device_detach(dev);
2227
2228         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2229                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2230                 goto need_resume;
2231         }
2232
2233         typhoon_free_rx_rings(tp);
2234         typhoon_init_rings(tp);
2235
2236         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2237                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2238                 goto need_resume;
2239         }
2240
2241         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2242         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2243         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2244         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2245                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2246                                 dev->name);
2247                 goto need_resume;
2248         }
2249
2250         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2251         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2252         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2253                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2254                                 dev->name);
2255                 goto need_resume;
2256         }
2257
2258         if(typhoon_sleep(tp, state, tp->wol_events) < 0) {
2259                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2260                 goto need_resume;
2261         }
2262
2263         return 0;
2264
2265 need_resume:
2266         typhoon_resume(pdev);
2267         return -EBUSY;
2268 }
2269
2270 static int
2271 typhoon_enable_wake(struct pci_dev *pdev, u32 state, int enable)
2272 {
2273         return pci_enable_wake(pdev, state, enable);
2274 }
2275 #endif
2276
2277 static int __devinit
2278 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2279 {
2280         static int did_version = 0;
2281         struct net_device *dev;
2282         struct typhoon *tp;
2283         int card_id = (int) ent->driver_data;
2284         unsigned long ioaddr;
2285         void *shared;
2286         dma_addr_t shared_dma;
2287         struct cmd_desc xp_cmd;
2288         struct resp_desc xp_resp[3];
2289         int i;
2290         int err = 0;
2291
2292         if(!did_version++)
2293                 printk(KERN_INFO "%s", version);
2294
2295         dev = alloc_etherdev(sizeof(*tp));
2296         if(dev == NULL) {
2297                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2298                        pci_name(pdev));
2299                 err = -ENOMEM;
2300                 goto error_out;
2301         }
2302         SET_MODULE_OWNER(dev);
2303         SET_NETDEV_DEV(dev, &pdev->dev);
2304
2305         err = pci_enable_device(pdev);
2306         if(err < 0) {
2307                 printk(ERR_PFX "%s: unable to enable device\n",
2308                        pci_name(pdev));
2309                 goto error_out_dev;
2310         }
2311
2312         /* If we transitioned from D3->D0 in pci_enable_device(),
2313          * we lost our configuration and need to restore it to the
2314          * conditions at boot.
2315          */
2316         pci_restore_state(pdev, NULL);
2317
2318         err = pci_set_dma_mask(pdev, 0xffffffffULL);
2319         if(err < 0) {
2320                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2321                        pci_name(pdev));
2322                 goto error_out_dev;
2323         }
2324
2325         /* sanity checks, resource #1 is our mmio area
2326          */
2327         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2328                 printk(ERR_PFX
2329                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2330                        pci_name(pdev));
2331                 err = -ENODEV;
2332                 goto error_out_dev;
2333         }
2334         if(pci_resource_len(pdev, 1) < 128) {
2335                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2336                        pci_name(pdev));
2337                 err = -ENODEV;
2338                 goto error_out_dev;
2339         }
2340
2341         err = pci_request_regions(pdev, "typhoon");
2342         if(err < 0) {
2343                 printk(ERR_PFX "%s: could not request regions\n",
2344                        pci_name(pdev));
2345                 goto error_out_dev;
2346         }
2347
2348         pci_set_master(pdev);
2349         pci_set_mwi(pdev);
2350
2351         /* map our MMIO region
2352          */
2353         ioaddr = pci_resource_start(pdev, 1);
2354         ioaddr = (unsigned long) ioremap(ioaddr, 128);
2355         if(!ioaddr) {
2356                 printk(ERR_PFX "%s: cannot remap MMIO, aborting\n",
2357                        pci_name(pdev));
2358                 err = -EIO;
2359                 goto error_out_regions;
2360         }
2361         dev->base_addr = ioaddr;
2362
2363         /* allocate pci dma space for rx and tx descriptor rings
2364          */
2365         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2366                                       &shared_dma);
2367         if(!shared) {
2368                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2369                        pci_name(pdev));
2370                 err = -ENOMEM;
2371                 goto error_out_remap;
2372         }
2373
2374         dev->irq = pdev->irq;
2375         tp = dev->priv;
2376         tp->shared = (struct typhoon_shared *) shared;
2377         tp->shared_dma = shared_dma;
2378         tp->pdev = pdev;
2379         tp->tx_pdev = pdev;
2380         tp->ioaddr = dev->base_addr;
2381         tp->tx_ioaddr = dev->base_addr;
2382         tp->dev = dev;
2383
2384         /* need to be able to restore PCI state after a suspend */
2385         pci_save_state(pdev, tp->pci_state);
2386
2387         /* Init sequence:
2388          * 1) Reset the adapter to clear any bad juju
2389          * 2) Reload the sleep image
2390          * 3) Boot the sleep image
2391          * 4) Get the hardware address.
2392          * 5) Put the card to sleep.
2393          */
2394         if(typhoon_reset(ioaddr, WaitSleep) < 0) {
2395                 printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
2396                 err = -EIO;
2397                 goto error_out_dma;
2398         }
2399
2400         /* dev->name is not valid until we register, but we need to
2401          * use some common routines to initialize the card. So that those
2402          * routines print the right name, we keep our oun pointer to the name
2403          */
2404         tp->name = pci_name(pdev);
2405
2406         typhoon_init_interface(tp);
2407         typhoon_init_rings(tp);
2408
2409         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2410                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2411                        pci_name(pdev));
2412                 err = -EIO;
2413                 goto error_out_reset;
2414         }
2415
2416         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2417         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2418                 printk(ERR_PFX "%s: cannot read MAC address\n",
2419                        pci_name(pdev));
2420                 err = -EIO;
2421                 goto error_out_reset;
2422         }
2423
2424         *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2425         *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2426
2427         if(!is_valid_ether_addr(dev->dev_addr)) {
2428                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2429                        "aborting\n", pci_name(pdev));
2430                 goto error_out_reset;
2431         }
2432
2433         /* Read the Sleep Image version last, so the response is valid
2434          * later when we print out the version reported.
2435          */
2436         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2437         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2438                 printk(ERR_PFX "%s: Could not get Sleep Image version\n",
2439                         pdev->slot_name);
2440                 goto error_out_reset;
2441         }
2442
2443         tp->capabilities = typhoon_card_info[card_id].capabilities;
2444         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2445
2446         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2447          * READ_VERSIONS command. Those versions are OK after waking up
2448          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2449          * seem to need a little extra help to get started. Since we don't
2450          * know how to nudge it along, just kick it.
2451          */
2452         if(xp_resp[0].numDesc != 0)
2453                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2454
2455         if(typhoon_sleep(tp, 3, 0) < 0) {
2456                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2457                        pci_name(pdev));
2458                 err = -EIO;
2459                 goto error_out_reset;
2460         }
2461
2462         /* The chip-specific entries in the device structure. */
2463         dev->open               = typhoon_open;
2464         dev->hard_start_xmit    = typhoon_start_tx;
2465         dev->stop               = typhoon_close;
2466         dev->set_multicast_list = typhoon_set_rx_mode;
2467         dev->tx_timeout         = typhoon_tx_timeout;
2468         dev->poll               = typhoon_poll;
2469         dev->weight             = 16;
2470         dev->watchdog_timeo     = TX_TIMEOUT;
2471         dev->get_stats          = typhoon_get_stats;
2472         dev->set_mac_address    = typhoon_set_mac_address;
2473         dev->do_ioctl           = typhoon_ioctl;
2474         dev->vlan_rx_register   = typhoon_vlan_rx_register;
2475         dev->vlan_rx_kill_vid   = typhoon_vlan_rx_kill_vid;
2476
2477         /* We can handle scatter gather, up to 16 entries, and
2478          * we can do IP checksumming (only version 4, doh...)
2479          */
2480         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2481         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2482         dev->features |= NETIF_F_TSO;
2483
2484         if(register_netdev(dev) < 0)
2485                 goto error_out_reset;
2486
2487         /* fixup our local name */
2488         tp->name = dev->name;
2489
2490         pci_set_drvdata(pdev, dev);
2491
2492         printk(KERN_INFO "%s: %s at 0x%lx, ",
2493                dev->name, typhoon_card_info[card_id].name, ioaddr);
2494         for(i = 0; i < 5; i++)
2495                 printk("%2.2x:", dev->dev_addr[i]);
2496         printk("%2.2x\n", dev->dev_addr[i]);
2497
2498         /* xp_resp still contains the response to the READ_VERSIONS command.
2499          * For debugging, let the user know what version he has.
2500          */
2501         if(xp_resp[0].numDesc == 0) {
2502                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2503                  * of version is Month/Day of build.
2504                  */
2505                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2506                 printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
2507                         "%02u/%02u/2000\n", dev->name, monthday >> 8,
2508                         monthday & 0xff);
2509         } else if(xp_resp[0].numDesc == 2) {
2510                 /* This is the Typhoon 1.1+ type Sleep Image
2511                  */
2512                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2513                 u8 *ver_string = (u8 *) &xp_resp[1];
2514                 ver_string[25] = 0;
2515                 printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
2516                         "%u.%u.%u.%u %s\n", dev->name, HIPQUAD(sleep_ver),
2517                         ver_string);
2518         } else {
2519                 printk(KERN_WARNING "%s: Unknown Sleep Image version "
2520                         "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
2521                         le32_to_cpu(xp_resp[0].parm2));
2522         }
2523                 
2524         return 0;
2525
2526 error_out_reset:
2527         typhoon_reset(ioaddr, NoWait);
2528
2529 error_out_dma:
2530         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2531                             shared, shared_dma);
2532 error_out_remap:
2533         iounmap((void *) ioaddr);
2534 error_out_regions:
2535         pci_release_regions(pdev);
2536 error_out_dev:
2537         free_netdev(dev);
2538 error_out:
2539         return err;
2540 }
2541
2542 static void __devexit
2543 typhoon_remove_one(struct pci_dev *pdev)
2544 {
2545         struct net_device *dev = pci_get_drvdata(pdev);
2546         struct typhoon *tp = (struct typhoon *) (dev->priv);
2547
2548         unregister_netdev(dev);
2549         pci_set_power_state(pdev, 0);
2550         pci_restore_state(pdev, tp->pci_state);
2551         typhoon_reset(dev->base_addr, NoWait);
2552         iounmap((char *) (dev->base_addr));
2553         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2554                             tp->shared, tp->shared_dma);
2555         pci_release_regions(pdev);
2556         pci_disable_device(pdev);
2557         pci_set_drvdata(pdev, NULL);
2558         free_netdev(dev);
2559 }
2560
2561 static struct pci_driver typhoon_driver = {
2562         .name           = DRV_MODULE_NAME,
2563         .id_table       = typhoon_pci_tbl,
2564         .probe          = typhoon_init_one,
2565         .remove         = __devexit_p(typhoon_remove_one),
2566 #ifdef CONFIG_PM
2567         .suspend        = typhoon_suspend,
2568         .resume         = typhoon_resume,
2569         .enable_wake    = typhoon_enable_wake,
2570 #endif
2571 };
2572
2573 static int __init
2574 typhoon_init(void)
2575 {
2576         return pci_module_init(&typhoon_driver);
2577 }
2578
2579 static void __exit
2580 typhoon_cleanup(void)
2581 {
2582         pci_unregister_driver(&typhoon_driver);
2583 }
2584
2585 module_init(typhoon_init);
2586 module_exit(typhoon_cleanup);