VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / drivers / message / fusion / mptlan.c
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with PCI chip/adapter(s):
5  *          LSIFC9xx/LSI409xx Fibre Channel
6  *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7  *
8  *  Credits:
9  *      This driver would not exist if not for Alan Cox's development
10  *      of the linux i2o driver.
11  *
12  *      Special thanks goes to the I2O LAN driver people at the
13  *      University of Helsinki, who, unbeknownst to them, provided
14  *      the inspiration and initial structure for this driver.
15  *
16  *      A huge debt of gratitude is owed to David S. Miller (DaveM)
17  *      for fixing much of the stupid and broken stuff in the early
18  *      driver while porting to sparc64 platform.  THANK YOU!
19  *
20  *      A really huge debt of gratitude is owed to Eddie C. Dost
21  *      for gobs of hard work fixing and optimizing LAN code.
22  *      THANK YOU!
23  *
24  *      (see also mptbase.c)
25  *
26  *  Copyright (c) 2000-2004 LSI Logic Corporation
27  *  Originally By: Noah Romer
28  *  (mailto:mpt_linux_developer@lsil.com)
29  *
30  *  $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $
31  */
32 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
33 /*
34     This program is free software; you can redistribute it and/or modify
35     it under the terms of the GNU General Public License as published by
36     the Free Software Foundation; version 2 of the License.
37
38     This program is distributed in the hope that it will be useful,
39     but WITHOUT ANY WARRANTY; without even the implied warranty of
40     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41     GNU General Public License for more details.
42
43     NO WARRANTY
44     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
45     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
46     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
47     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
48     solely responsible for determining the appropriateness of using and
49     distributing the Program and assumes all risks associated with its
50     exercise of rights under this Agreement, including but not limited to
51     the risks and costs of program errors, damage to or loss of data,
52     programs or equipment, and unavailability or interruption of operations.
53
54     DISCLAIMER OF LIABILITY
55     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
58     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
59     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
60     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
61     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
62
63     You should have received a copy of the GNU General Public License
64     along with this program; if not, write to the Free Software
65     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
66 */
67
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * Define statements used for debugging
71  */
72 //#define MPT_LAN_IO_DEBUG
73
74 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
75
76 #include "mptlan.h"
77 #include <linux/init.h>
78 #include <linux/module.h>
79 #include <linux/fs.h>
80
81 #define MYNAM           "mptlan"
82
83 MODULE_LICENSE("GPL");
84
85 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
86 /*
87  * MPT LAN message sizes without variable part.
88  */
89 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
90         (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
91
92 #define MPT_LAN_TRANSACTION32_SIZE \
93         (sizeof(SGETransaction32_t) - sizeof(u32))
94
95 /*
96  *  Fusion MPT LAN private structures
97  */
98
99 struct NAA_Hosed {
100         u16 NAA;
101         u8 ieee[FC_ALEN];
102         struct NAA_Hosed *next;
103 };
104
105 struct BufferControl {
106         struct sk_buff  *skb;
107         dma_addr_t      dma;
108         unsigned int    len;
109 };
110
111 struct mpt_lan_priv {
112         MPT_ADAPTER *mpt_dev;
113         u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
114
115         atomic_t buckets_out;           /* number of unused buckets on IOC */
116         int bucketthresh;               /* Send more when this many left */
117
118         int *mpt_txfidx; /* Free Tx Context list */
119         int mpt_txfidx_tail;
120         spinlock_t txfidx_lock;
121
122         int *mpt_rxfidx; /* Free Rx Context list */
123         int mpt_rxfidx_tail;
124         spinlock_t rxfidx_lock;
125
126         struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
127         struct BufferControl *SendCtl;  /* Send BufferControl structs */
128
129         int max_buckets_out;            /* Max buckets to send to IOC */
130         int tx_max_out;                 /* IOC's Tx queue len */
131
132         u32 total_posted;
133         u32 total_received;
134         struct net_device_stats stats;  /* Per device statistics */
135
136         struct work_struct post_buckets_task;
137         unsigned long post_buckets_active;
138 };
139
140 struct mpt_lan_ohdr {
141         u16     dtype;
142         u8      daddr[FC_ALEN];
143         u16     stype;
144         u8      saddr[FC_ALEN];
145 };
146
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
148
149 /*
150  *  Forward protos...
151  */
152 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
153                        MPT_FRAME_HDR *reply);
154 static int  mpt_lan_open(struct net_device *dev);
155 static int  mpt_lan_reset(struct net_device *dev);
156 static int  mpt_lan_close(struct net_device *dev);
157 static void mpt_lan_post_receive_buckets(void *dev_id);
158 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
159                                            int priority);
160 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
161 static int  mpt_lan_receive_post_reply(struct net_device *dev,
162                                        LANReceivePostReply_t *pRecvRep);
163 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
164 static int  mpt_lan_send_reply(struct net_device *dev,
165                                LANSendReply_t *pSendRep);
166 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
167 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
168 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
169                                          struct net_device *dev);
170
171 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
172 /*
173  *  Fusion MPT LAN private data
174  */
175 static int LanCtx = -1;
176
177 static u32 max_buckets_out = 127;
178 static u32 tx_max_out_p = 127 - 16;
179
180 static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1];
181
182 #ifdef QLOGIC_NAA_WORKAROUND
183 static struct NAA_Hosed *mpt_bad_naa = NULL;
184 rwlock_t bad_naa_lock;
185 #endif
186
187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
188 /*
189  * Fusion MPT LAN external data
190  */
191 extern int mpt_lan_index;
192
193 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
194 /**
195  *      lan_reply - Handle all data sent from the hardware.
196  *      @ioc: Pointer to MPT_ADAPTER structure
197  *      @mf: Pointer to original MPT request frame (NULL if TurboReply)
198  *      @reply: Pointer to MPT reply frame
199  *
200  *      Returns 1 indicating original alloc'd request frame ptr
201  *      should be freed, or 0 if it shouldn't.
202  */
203 static int
204 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
205 {
206         struct net_device *dev = mpt_landev[ioc->id];
207         int FreeReqFrame = 0;
208
209         dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
210                   IOC_AND_NETDEV_NAMES_s_s(dev)));
211
212 //      dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
213 //                      mf, reply));
214
215         if (mf == NULL) {
216                 u32 tmsg = CAST_PTR_TO_U32(reply);
217
218                 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
219                                 IOC_AND_NETDEV_NAMES_s_s(dev),
220                                 tmsg));
221
222                 switch (GET_LAN_FORM(tmsg)) {
223
224                 // NOTE!  (Optimization) First case here is now caught in
225                 //  mptbase.c::mpt_interrupt() routine and callcack here
226                 //  is now skipped for this case!  20001218 -sralston
227 #if 0
228                 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
229 //                      dioprintk((KERN_INFO MYNAM "/lan_reply: "
230 //                                "MessageContext turbo reply received\n"));
231                         FreeReqFrame = 1;
232                         break;
233 #endif
234
235                 case LAN_REPLY_FORM_SEND_SINGLE:
236 //                      dioprintk((MYNAM "/lan_reply: "
237 //                                "calling mpt_lan_send_reply (turbo)\n"));
238
239                         // Potential BUG here?  -sralston
240                         //      FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
241                         //  If/when mpt_lan_send_turbo would return 1 here,
242                         //  calling routine (mptbase.c|mpt_interrupt)
243                         //  would Oops because mf has already been set
244                         //  to NULL.  So after return from this func,
245                         //  mpt_interrupt() will attempt to put (NULL) mf ptr
246                         //  item back onto its adapter FreeQ - Oops!:-(
247                         //  It's Ok, since mpt_lan_send_turbo() *currently*
248                         //  always returns 0, but..., just in case:
249
250                         (void) mpt_lan_send_turbo(dev, tmsg);
251                         FreeReqFrame = 0;
252
253                         break;
254
255                 case LAN_REPLY_FORM_RECEIVE_SINGLE:
256 //                      dioprintk((KERN_INFO MYNAM "@lan_reply: "
257 //                                "rcv-Turbo = %08x\n", tmsg));
258                         mpt_lan_receive_post_turbo(dev, tmsg);
259                         break;
260
261                 default:
262                         printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
263                                 "that I don't know what to do with\n");
264
265                         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
266
267                         break;
268                 }
269
270                 return FreeReqFrame;
271         }
272
273 //      msg = (u32 *) reply;
274 //      dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
275 //                le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
276 //                le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
277 //      dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
278 //                reply->u.hdr.Function));
279
280         switch (reply->u.hdr.Function) {
281
282         case MPI_FUNCTION_LAN_SEND:
283         {
284                 LANSendReply_t *pSendRep;
285
286                 pSendRep = (LANSendReply_t *) reply;
287                 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
288                 break;
289         }
290
291         case MPI_FUNCTION_LAN_RECEIVE:
292         {
293                 LANReceivePostReply_t *pRecvRep;
294
295                 pRecvRep = (LANReceivePostReply_t *) reply;
296                 if (pRecvRep->NumberOfContexts) {
297                         mpt_lan_receive_post_reply(dev, pRecvRep);
298                         if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
299                                 FreeReqFrame = 1;
300                 } else
301                         dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
302                                   "ReceivePostReply received.\n"));
303                 break;
304         }
305
306         case MPI_FUNCTION_LAN_RESET:
307                 /* Just a default reply. Might want to check it to
308                  * make sure that everything went ok.
309                  */
310                 FreeReqFrame = 1;
311                 break;
312
313         case MPI_FUNCTION_EVENT_NOTIFICATION:
314         case MPI_FUNCTION_EVENT_ACK:
315                 /* UPDATE!  20010120 -sralston
316                  *  _EVENT_NOTIFICATION should NOT come down this path any more.
317                  *  Should be routed to mpt_lan_event_process(), but just in case...
318                  */
319                 FreeReqFrame = 1;
320                 break;
321
322         default:
323                 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
324                         "reply that I don't know what to do with\n");
325
326                 /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
327                 FreeReqFrame = 1;
328
329                 break;
330         }
331
332         return FreeReqFrame;
333 }
334
335 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
336 static int
337 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
338 {
339         struct net_device *dev = mpt_landev[ioc->id];
340         struct mpt_lan_priv *priv = netdev_priv(dev);
341
342         dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
343                         reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
344                         reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
345
346         if (priv->mpt_rxfidx == NULL)
347                 return (1);
348
349         if (reset_phase == MPT_IOC_SETUP_RESET) {
350                 ;
351         } else if (reset_phase == MPT_IOC_PRE_RESET) {
352                 int i;
353                 unsigned long flags;
354
355                 netif_stop_queue(dev);
356
357                 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
358
359                 atomic_set(&priv->buckets_out, 0);
360
361                 /* Reset Rx Free Tail index and re-populate the queue. */
362                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
363                 priv->mpt_rxfidx_tail = -1;
364                 for (i = 0; i < priv->max_buckets_out; i++)
365                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
366                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
367         } else {
368                 mpt_lan_post_receive_buckets(dev);
369                 netif_wake_queue(dev);
370         }
371
372         return 1;
373 }
374
375 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
376 static int
377 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
378 {
379         dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
380
381         switch (le32_to_cpu(pEvReply->Event)) {
382         case MPI_EVENT_NONE:                            /* 00 */
383         case MPI_EVENT_LOG_DATA:                        /* 01 */
384         case MPI_EVENT_STATE_CHANGE:                    /* 02 */
385         case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
386         case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
387         case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
388         case MPI_EVENT_RESCAN:                          /* 06 */
389                 /* Ok, do we need to do anything here? As far as
390                    I can tell, this is when a new device gets added
391                    to the loop. */
392         case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
393         case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
394         case MPI_EVENT_LOGOUT:                          /* 09 */
395         case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
396         default:
397                 break;
398         }
399
400         /*
401          *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
402          *  Do NOT do it here now!
403          */
404
405         return 1;
406 }
407
408 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
409 static int
410 mpt_lan_open(struct net_device *dev)
411 {
412         struct mpt_lan_priv *priv = netdev_priv(dev);
413         int i;
414
415         if (mpt_lan_reset(dev) != 0) {
416                 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
417
418                 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
419
420                 if (mpt_dev->active)
421                         printk ("The ioc is active. Perhaps it needs to be"
422                                 " reset?\n");
423                 else
424                         printk ("The ioc in inactive, most likely in the "
425                                 "process of being reset. Please try again in "
426                                 "a moment.\n");
427         }
428
429         priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
430         if (priv->mpt_txfidx == NULL)
431                 goto out;
432         priv->mpt_txfidx_tail = -1;
433
434         priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
435                                 GFP_KERNEL);
436         if (priv->SendCtl == NULL)
437                 goto out_mpt_txfidx;
438         for (i = 0; i < priv->tx_max_out; i++) {
439                 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
440                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
441         }
442
443         dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
444
445         priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
446                                    GFP_KERNEL);
447         if (priv->mpt_rxfidx == NULL)
448                 goto out_SendCtl;
449         priv->mpt_rxfidx_tail = -1;
450
451         priv->RcvCtl = kmalloc(priv->max_buckets_out *
452                                                 sizeof(struct BufferControl),
453                                GFP_KERNEL);
454         if (priv->RcvCtl == NULL)
455                 goto out_mpt_rxfidx;
456         for (i = 0; i < priv->max_buckets_out; i++) {
457                 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
458                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
459         }
460
461 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
462 /**/    for (i = 0; i < priv->tx_max_out; i++)
463 /**/            dlprintk((" %xh", priv->mpt_txfidx[i]));
464 /**/    dlprintk(("\n"));
465
466         dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
467
468         mpt_lan_post_receive_buckets(dev);
469         printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
470                         IOC_AND_NETDEV_NAMES_s_s(dev));
471
472         if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
473                 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
474                         " Notifications. This is a bad thing! We're not going "
475                         "to go ahead, but I'd be leery of system stability at "
476                         "this point.\n");
477         }
478
479         netif_start_queue(dev);
480         dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
481
482         return 0;
483 out_mpt_rxfidx:
484         kfree(priv->mpt_rxfidx);
485         priv->mpt_rxfidx = NULL;
486 out_SendCtl:
487         kfree(priv->SendCtl);
488         priv->SendCtl = NULL;
489 out_mpt_txfidx:
490         kfree(priv->mpt_txfidx);
491         priv->mpt_txfidx = NULL;
492 out:    return -ENOMEM;
493 }
494
495 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
496 /* Send a LanReset message to the FW. This should result in the FW returning
497    any buckets it still has. */
498 static int
499 mpt_lan_reset(struct net_device *dev)
500 {
501         MPT_FRAME_HDR *mf;
502         LANResetRequest_t *pResetReq;
503         struct mpt_lan_priv *priv = netdev_priv(dev);
504
505         mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
506
507         if (mf == NULL) {
508 /*              dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
509                 "Unable to allocate a request frame.\n"));
510 */
511                 return -1;
512         }
513
514         pResetReq = (LANResetRequest_t *) mf;
515
516         pResetReq->Function     = MPI_FUNCTION_LAN_RESET;
517         pResetReq->ChainOffset  = 0;
518         pResetReq->Reserved     = 0;
519         pResetReq->PortNumber   = priv->pnum;
520         pResetReq->MsgFlags     = 0;
521         pResetReq->Reserved2    = 0;
522
523         mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
524
525         return 0;
526 }
527
528 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
529 static int
530 mpt_lan_close(struct net_device *dev)
531 {
532         struct mpt_lan_priv *priv = netdev_priv(dev);
533         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
534         unsigned int timeout;
535         int i;
536
537         dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
538
539         mpt_event_deregister(LanCtx);
540
541         dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
542                   "since driver was loaded, %d still out\n",
543                   priv->total_posted,atomic_read(&priv->buckets_out)));
544
545         netif_stop_queue(dev);
546
547         mpt_lan_reset(dev);
548
549         timeout = 2 * HZ;
550         while (atomic_read(&priv->buckets_out) && --timeout) {
551                 set_current_state(TASK_INTERRUPTIBLE);
552                 schedule_timeout(1);
553         }
554
555         for (i = 0; i < priv->max_buckets_out; i++) {
556                 if (priv->RcvCtl[i].skb != NULL) {
557 /**/                    dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
558 /**/                              "is still out\n", i));
559                         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
560                                          priv->RcvCtl[i].len,
561                                          PCI_DMA_FROMDEVICE);
562                         dev_kfree_skb(priv->RcvCtl[i].skb);
563                 }
564         }
565
566         kfree (priv->RcvCtl);
567         kfree (priv->mpt_rxfidx);
568
569         for (i = 0; i < priv->tx_max_out; i++) {
570                 if (priv->SendCtl[i].skb != NULL) {
571                         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
572                                          priv->SendCtl[i].len,
573                                          PCI_DMA_TODEVICE);
574                         dev_kfree_skb(priv->SendCtl[i].skb);
575                 }
576         }
577
578         kfree(priv->SendCtl);
579         kfree(priv->mpt_txfidx);
580
581         atomic_set(&priv->buckets_out, 0);
582
583         printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
584                         IOC_AND_NETDEV_NAMES_s_s(dev));
585
586         return 0;
587 }
588
589 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
590 static struct net_device_stats *
591 mpt_lan_get_stats(struct net_device *dev)
592 {
593         struct mpt_lan_priv *priv = netdev_priv(dev);
594
595         return (struct net_device_stats *) &priv->stats;
596 }
597
598 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
599 static int
600 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
601 {
602         if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
603                 return -EINVAL;
604         dev->mtu = new_mtu;
605         return 0;
606 }
607
608 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
609 /* Tx timeout handler. */
610 static void
611 mpt_lan_tx_timeout(struct net_device *dev)
612 {
613         struct mpt_lan_priv *priv = netdev_priv(dev);
614         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
615
616         if (mpt_dev->active) {
617                 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
618                 netif_wake_queue(dev);
619         }
620 }
621
622 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
623 //static inline int
624 static int
625 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
626 {
627         struct mpt_lan_priv *priv = netdev_priv(dev);
628         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
629         struct sk_buff *sent;
630         unsigned long flags;
631         u32 ctx;
632
633         ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
634         sent = priv->SendCtl[ctx].skb;
635
636         priv->stats.tx_packets++;
637         priv->stats.tx_bytes += sent->len;
638
639         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
640                         IOC_AND_NETDEV_NAMES_s_s(dev),
641                         __FUNCTION__, sent));
642
643         priv->SendCtl[ctx].skb = NULL;
644         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
645                          priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
646         dev_kfree_skb_irq(sent);
647
648         spin_lock_irqsave(&priv->txfidx_lock, flags);
649         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
650         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
651
652         netif_wake_queue(dev);
653         return 0;
654 }
655
656 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
657 static int
658 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
659 {
660         struct mpt_lan_priv *priv = netdev_priv(dev);
661         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
662         struct sk_buff *sent;
663         unsigned long flags;
664         int FreeReqFrame = 0;
665         u32 *pContext;
666         u32 ctx;
667         u8 count;
668
669         count = pSendRep->NumberOfContexts;
670
671         dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
672                  le16_to_cpu(pSendRep->IOCStatus)));
673
674         /* Add check for Loginfo Flag in IOCStatus */
675
676         switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
677         case MPI_IOCSTATUS_SUCCESS:
678                 priv->stats.tx_packets += count;
679                 break;
680
681         case MPI_IOCSTATUS_LAN_CANCELED:
682         case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
683                 break;
684
685         case MPI_IOCSTATUS_INVALID_SGL:
686                 priv->stats.tx_errors += count;
687                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
688                                 IOC_AND_NETDEV_NAMES_s_s(dev));
689                 goto out;
690
691         default:
692                 priv->stats.tx_errors += count;
693                 break;
694         }
695
696         pContext = &pSendRep->BufferContext;
697
698         spin_lock_irqsave(&priv->txfidx_lock, flags);
699         while (count > 0) {
700                 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
701
702                 sent = priv->SendCtl[ctx].skb;
703                 priv->stats.tx_bytes += sent->len;
704
705                 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
706                                 IOC_AND_NETDEV_NAMES_s_s(dev),
707                                 __FUNCTION__, sent));
708
709                 priv->SendCtl[ctx].skb = NULL;
710                 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
711                                  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
712                 dev_kfree_skb_irq(sent);
713
714                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
715
716                 pContext++;
717                 count--;
718         }
719         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
720
721 out:
722         if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
723                 FreeReqFrame = 1;
724
725         netif_wake_queue(dev);
726         return FreeReqFrame;
727 }
728
729 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
730 static int
731 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
732 {
733         struct mpt_lan_priv *priv = netdev_priv(dev);
734         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
735         MPT_FRAME_HDR *mf;
736         LANSendRequest_t *pSendReq;
737         SGETransaction32_t *pTrans;
738         SGESimple64_t *pSimple;
739         dma_addr_t dma;
740         unsigned long flags;
741         int ctx;
742         u16 cur_naa = 0x1000;
743
744         dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
745                         __FUNCTION__, skb));
746
747         spin_lock_irqsave(&priv->txfidx_lock, flags);
748         if (priv->mpt_txfidx_tail < 0) {
749                 netif_stop_queue(dev);
750                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
751
752                 printk (KERN_ERR "%s: no tx context available: %u\n",
753                         __FUNCTION__, priv->mpt_txfidx_tail);
754                 return 1;
755         }
756
757         mf = mpt_get_msg_frame(LanCtx, mpt_dev);
758         if (mf == NULL) {
759                 netif_stop_queue(dev);
760                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
761
762                 printk (KERN_ERR "%s: Unable to alloc request frame\n",
763                         __FUNCTION__);
764                 return 1;
765         }
766
767         ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
768         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
769
770 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
771 //                      IOC_AND_NETDEV_NAMES_s_s(dev)));
772
773         pSendReq = (LANSendRequest_t *) mf;
774
775         /* Set the mac.raw pointer, since this apparently isn't getting
776          * done before we get the skb. Pull the data pointer past the mac data.
777          */
778         skb->mac.raw = skb->data;
779         skb_pull(skb, 12);
780
781         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
782                              PCI_DMA_TODEVICE);
783
784         priv->SendCtl[ctx].skb = skb;
785         priv->SendCtl[ctx].dma = dma;
786         priv->SendCtl[ctx].len = skb->len;
787
788         /* Message Header */
789         pSendReq->Reserved    = 0;
790         pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
791         pSendReq->ChainOffset = 0;
792         pSendReq->Reserved2   = 0;
793         pSendReq->MsgFlags    = 0;
794         pSendReq->PortNumber  = priv->pnum;
795
796         /* Transaction Context Element */
797         pTrans = (SGETransaction32_t *) pSendReq->SG_List;
798
799         /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
800         pTrans->ContextSize   = sizeof(u32);
801         pTrans->DetailsLength = 2 * sizeof(u32);
802         pTrans->Flags         = 0;
803         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
804
805 //      dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
806 //                      IOC_AND_NETDEV_NAMES_s_s(dev),
807 //                      ctx, skb, skb->data));
808
809 #ifdef QLOGIC_NAA_WORKAROUND
810 {
811         struct NAA_Hosed *nh;
812
813         /* Munge the NAA for Tx packets to QLogic boards, which don't follow
814            RFC 2625. The longer I look at this, the more my opinion of Qlogic
815            drops. */
816         read_lock_irq(&bad_naa_lock);
817         for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
818                 if ((nh->ieee[0] == skb->mac.raw[0]) &&
819                     (nh->ieee[1] == skb->mac.raw[1]) &&
820                     (nh->ieee[2] == skb->mac.raw[2]) &&
821                     (nh->ieee[3] == skb->mac.raw[3]) &&
822                     (nh->ieee[4] == skb->mac.raw[4]) &&
823                     (nh->ieee[5] == skb->mac.raw[5])) {
824                         cur_naa = nh->NAA;
825                         dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
826                                   "= %04x.\n", cur_naa));
827                         break;
828                 }
829         }
830         read_unlock_irq(&bad_naa_lock);
831 }
832 #endif
833
834         pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
835                                                     (skb->mac.raw[0] <<  8) |
836                                                     (skb->mac.raw[1] <<  0));
837         pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
838                                                     (skb->mac.raw[3] << 16) |
839                                                     (skb->mac.raw[4] <<  8) |
840                                                     (skb->mac.raw[5] <<  0));
841
842         pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
843
844         /* If we ever decide to send more than one Simple SGE per LANSend, then
845            we will need to make sure that LAST_ELEMENT only gets set on the
846            last one. Otherwise, bad voodoo and evil funkiness will commence. */
847         pSimple->FlagsLength = cpu_to_le32(
848                         ((MPI_SGE_FLAGS_LAST_ELEMENT |
849                           MPI_SGE_FLAGS_END_OF_BUFFER |
850                           MPI_SGE_FLAGS_SIMPLE_ELEMENT |
851                           MPI_SGE_FLAGS_SYSTEM_ADDRESS |
852                           MPI_SGE_FLAGS_HOST_TO_IOC |
853                           MPI_SGE_FLAGS_64_BIT_ADDRESSING |
854                           MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
855                         skb->len);
856         pSimple->Address.Low = cpu_to_le32((u32) dma);
857         if (sizeof(dma_addr_t) > sizeof(u32))
858                 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
859         else
860                 pSimple->Address.High = 0;
861
862         mpt_put_msg_frame (LanCtx, mpt_dev, mf);
863         dev->trans_start = jiffies;
864
865         dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
866                         IOC_AND_NETDEV_NAMES_s_s(dev),
867                         le32_to_cpu(pSimple->FlagsLength)));
868
869         return 0;
870 }
871
872 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
873 static inline void
874 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
875 /*
876  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
877  */
878 {
879         struct mpt_lan_priv *priv = dev->priv;
880         
881         if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
882                 if (priority) {
883                         schedule_work(&priv->post_buckets_task);
884                 } else {
885                         schedule_delayed_work(&priv->post_buckets_task, 1);
886                         dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
887                                    "timer.\n"));
888                 }
889                 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
890                            IOC_AND_NETDEV_NAMES_s_s(dev) ));
891         }
892 }
893
894 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
895 static inline int
896 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
897 {
898         struct mpt_lan_priv *priv = dev->priv;
899
900         skb->protocol = mpt_lan_type_trans(skb, dev);
901
902         dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
903                  "delivered to upper level.\n",
904                         IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
905
906         priv->stats.rx_bytes += skb->len;
907         priv->stats.rx_packets++;
908
909         skb->dev = dev;
910         netif_rx(skb);
911
912         dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
913                  atomic_read(&priv->buckets_out)));
914
915         if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
916                 mpt_lan_wake_post_buckets_task(dev, 1);
917
918         dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
919                   "remaining, %d received back since sod\n",
920                   atomic_read(&priv->buckets_out), priv->total_received));
921
922         return 0;
923 }
924
925 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
926 //static inline int
927 static int
928 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
929 {
930         struct mpt_lan_priv *priv = dev->priv;
931         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
932         struct sk_buff *skb, *old_skb;
933         unsigned long flags;
934         u32 ctx, len;
935
936         ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
937         skb = priv->RcvCtl[ctx].skb;
938
939         len = GET_LAN_PACKET_LENGTH(tmsg);
940
941         if (len < MPT_LAN_RX_COPYBREAK) {
942                 old_skb = skb;
943
944                 skb = (struct sk_buff *)dev_alloc_skb(len);
945                 if (!skb) {
946                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
947                                         IOC_AND_NETDEV_NAMES_s_s(dev),
948                                         __FILE__, __LINE__);
949                         return -ENOMEM;
950                 }
951
952                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
953                                             priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
954
955                 memcpy(skb_put(skb, len), old_skb->data, len);
956
957                 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
958                                                priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
959                 goto out;
960         }
961
962         skb_put(skb, len);
963
964         priv->RcvCtl[ctx].skb = NULL;
965
966         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
967                          priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
968
969 out:
970         spin_lock_irqsave(&priv->rxfidx_lock, flags);
971         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
972         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
973
974         atomic_dec(&priv->buckets_out);
975         priv->total_received++;
976
977         return mpt_lan_receive_skb(dev, skb);
978 }
979
980 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
981 static int
982 mpt_lan_receive_post_free(struct net_device *dev,
983                           LANReceivePostReply_t *pRecvRep)
984 {
985         struct mpt_lan_priv *priv = dev->priv;
986         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
987         unsigned long flags;
988         struct sk_buff *skb;
989         u32 ctx;
990         int count;
991         int i;
992
993         count = pRecvRep->NumberOfContexts;
994
995 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
996                   "IOC returned %d buckets, freeing them...\n", count));
997
998         spin_lock_irqsave(&priv->rxfidx_lock, flags);
999         for (i = 0; i < count; i++) {
1000                 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1001
1002                 skb = priv->RcvCtl[ctx].skb;
1003
1004 //              dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
1005 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1006 //              dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
1007 //                              priv, &(priv->buckets_out)));
1008 //              dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
1009
1010                 priv->RcvCtl[ctx].skb = NULL;
1011                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1012                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1013                 dev_kfree_skb_any(skb);
1014
1015                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1016         }
1017         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1018
1019         atomic_sub(count, &priv->buckets_out);
1020
1021 //      for (i = 0; i < priv->max_buckets_out; i++)
1022 //              if (priv->RcvCtl[i].skb != NULL)
1023 //                      dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1024 //                                "is still out\n", i));
1025
1026 /*      dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1027                   count));
1028 */
1029 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1030 /**/              "remaining, %d received back since sod.\n",
1031 /**/              atomic_read(&priv->buckets_out), priv->total_received));
1032         return 0;
1033 }
1034
1035 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1036 static int
1037 mpt_lan_receive_post_reply(struct net_device *dev,
1038                            LANReceivePostReply_t *pRecvRep)
1039 {
1040         struct mpt_lan_priv *priv = dev->priv;
1041         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1042         struct sk_buff *skb, *old_skb;
1043         unsigned long flags;
1044         u32 len, ctx, offset;
1045         u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1046         int count;
1047         int i, l;
1048
1049         dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1050         dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1051                  le16_to_cpu(pRecvRep->IOCStatus)));
1052
1053         if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1054                                                 MPI_IOCSTATUS_LAN_CANCELED)
1055                 return mpt_lan_receive_post_free(dev, pRecvRep);
1056
1057         len = le32_to_cpu(pRecvRep->PacketLength);
1058         if (len == 0) {
1059                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1060                         "ReceivePostReply w/ PacketLength zero!\n",
1061                                 IOC_AND_NETDEV_NAMES_s_s(dev));
1062                 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1063                                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1064                 return -1;
1065         }
1066
1067         ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1068         count  = pRecvRep->NumberOfContexts;
1069         skb    = priv->RcvCtl[ctx].skb;
1070
1071         offset = le32_to_cpu(pRecvRep->PacketOffset);
1072 //      if (offset != 0) {
1073 //              printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1074 //                      "w/ PacketOffset %u\n",
1075 //                              IOC_AND_NETDEV_NAMES_s_s(dev),
1076 //                              offset);
1077 //      }
1078
1079         dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1080                         IOC_AND_NETDEV_NAMES_s_s(dev),
1081                         offset, len));
1082
1083         if (count > 1) {
1084                 int szrem = len;
1085
1086 //              dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1087 //                      "for single packet, concatenating...\n",
1088 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1089
1090                 skb = (struct sk_buff *)dev_alloc_skb(len);
1091                 if (!skb) {
1092                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1093                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1094                                         __FILE__, __LINE__);
1095                         return -ENOMEM;
1096                 }
1097
1098                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1099                 for (i = 0; i < count; i++) {
1100
1101                         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1102                         old_skb = priv->RcvCtl[ctx].skb;
1103
1104                         l = priv->RcvCtl[ctx].len;
1105                         if (szrem < l)
1106                                 l = szrem;
1107
1108 //                      dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1109 //                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1110 //                                      i, l));
1111
1112                         pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1113                                                     priv->RcvCtl[ctx].dma,
1114                                                     priv->RcvCtl[ctx].len,
1115                                                     PCI_DMA_FROMDEVICE);
1116                         memcpy(skb_put(skb, l), old_skb->data, l);
1117
1118                         pci_dma_sync_single_for_device(mpt_dev->pcidev,
1119                                                        priv->RcvCtl[ctx].dma,
1120                                                        priv->RcvCtl[ctx].len,
1121                                                        PCI_DMA_FROMDEVICE);
1122
1123                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1124                         szrem -= l;
1125                 }
1126                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1127
1128         } else if (len < MPT_LAN_RX_COPYBREAK) {
1129
1130                 old_skb = skb;
1131
1132                 skb = (struct sk_buff *)dev_alloc_skb(len);
1133                 if (!skb) {
1134                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1135                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1136                                         __FILE__, __LINE__);
1137                         return -ENOMEM;
1138                 }
1139
1140                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1141                                             priv->RcvCtl[ctx].dma,
1142                                             priv->RcvCtl[ctx].len,
1143                                             PCI_DMA_FROMDEVICE);
1144
1145                 memcpy(skb_put(skb, len), old_skb->data, len);
1146
1147                 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1148                                                priv->RcvCtl[ctx].dma,
1149                                                priv->RcvCtl[ctx].len,
1150                                                PCI_DMA_FROMDEVICE);
1151
1152                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1153                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1154                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1155
1156         } else {
1157                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1158
1159                 priv->RcvCtl[ctx].skb = NULL;
1160
1161                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1162                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1163                 priv->RcvCtl[ctx].dma = 0;
1164
1165                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1166                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1167
1168                 skb_put(skb,len);
1169         }
1170
1171         atomic_sub(count, &priv->buckets_out);
1172         priv->total_received += count;
1173
1174         if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1175                 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1176                         "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1177                                 IOC_AND_NETDEV_NAMES_s_s(dev),
1178                                 priv->mpt_rxfidx_tail,
1179                                 MPT_LAN_MAX_BUCKETS_OUT);
1180
1181                 panic("Damn it Jim! I'm a doctor, not a programmer! "
1182                                 "Oh, wait a sec, I am a programmer. "
1183                                 "And, who's Jim?!?!\n"
1184                                 "Arrgghh! We've done it again!\n");
1185         }
1186
1187         if (remaining == 0)
1188                 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1189                         "(priv->buckets_out = %d)\n",
1190                         IOC_AND_NETDEV_NAMES_s_s(dev),
1191                         atomic_read(&priv->buckets_out));
1192         else if (remaining < 10)
1193                 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1194                         "(priv->buckets_out = %d)\n",
1195                         IOC_AND_NETDEV_NAMES_s_s(dev),
1196                         remaining, atomic_read(&priv->buckets_out));
1197         
1198         if ((remaining < priv->bucketthresh) &&
1199             ((atomic_read(&priv->buckets_out) - remaining) >
1200              MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1201                 
1202                 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1203                         "buckets_out count and fw's BucketsRemaining "
1204                         "count has crossed the threshold, issuing a "
1205                         "LanReset to clear the fw's hashtable. You may "
1206                         "want to check your /var/log/messages for \"CRC "
1207                         "error\" event notifications.\n");
1208                 
1209                 mpt_lan_reset(dev);
1210                 mpt_lan_wake_post_buckets_task(dev, 0);
1211         }
1212         
1213         return mpt_lan_receive_skb(dev, skb);
1214 }
1215
1216 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1217 /* Simple SGE's only at the moment */
1218
1219 static void
1220 mpt_lan_post_receive_buckets(void *dev_id)
1221 {
1222         struct net_device *dev = dev_id;
1223         struct mpt_lan_priv *priv = dev->priv;
1224         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1225         MPT_FRAME_HDR *mf;
1226         LANReceivePostRequest_t *pRecvReq;
1227         SGETransaction32_t *pTrans;
1228         SGESimple64_t *pSimple;
1229         struct sk_buff *skb;
1230         dma_addr_t dma;
1231         u32 curr, buckets, count, max;
1232         u32 len = (dev->mtu + dev->hard_header_len + 4);
1233         unsigned long flags;
1234         int i;
1235
1236         curr = atomic_read(&priv->buckets_out);
1237         buckets = (priv->max_buckets_out - curr);
1238
1239         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1240                         IOC_AND_NETDEV_NAMES_s_s(dev),
1241                         __FUNCTION__, buckets, curr));
1242
1243         max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1244                         (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1245
1246         while (buckets) {
1247                 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1248                 if (mf == NULL) {
1249                         printk (KERN_ERR "%s: Unable to alloc request frame\n",
1250                                 __FUNCTION__);
1251                         dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1252                                  __FUNCTION__, buckets));
1253                         goto out;
1254                 }
1255                 pRecvReq = (LANReceivePostRequest_t *) mf;
1256
1257                 count = buckets;
1258                 if (count > max)
1259                         count = max;
1260
1261                 pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1262                 pRecvReq->ChainOffset = 0;
1263                 pRecvReq->MsgFlags    = 0;
1264                 pRecvReq->PortNumber  = priv->pnum;
1265
1266                 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1267                 pSimple = NULL;
1268
1269                 for (i = 0; i < count; i++) {
1270                         int ctx;
1271
1272                         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1273                         if (priv->mpt_rxfidx_tail < 0) {
1274                                 printk (KERN_ERR "%s: Can't alloc context\n",
1275                                         __FUNCTION__);
1276                                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1277                                                        flags);
1278                                 break;
1279                         }
1280
1281                         ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1282
1283                         skb = priv->RcvCtl[ctx].skb;
1284                         if (skb && (priv->RcvCtl[ctx].len != len)) {
1285                                 pci_unmap_single(mpt_dev->pcidev,
1286                                                  priv->RcvCtl[ctx].dma,
1287                                                  priv->RcvCtl[ctx].len,
1288                                                  PCI_DMA_FROMDEVICE);
1289                                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1290                                 skb = priv->RcvCtl[ctx].skb = NULL;
1291                         }
1292
1293                         if (skb == NULL) {
1294                                 skb = dev_alloc_skb(len);
1295                                 if (skb == NULL) {
1296                                         printk (KERN_WARNING
1297                                                 MYNAM "/%s: Can't alloc skb\n",
1298                                                 __FUNCTION__);
1299                                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1300                                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1301                                         break;
1302                                 }
1303
1304                                 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1305                                                      len, PCI_DMA_FROMDEVICE);
1306
1307                                 priv->RcvCtl[ctx].skb = skb;
1308                                 priv->RcvCtl[ctx].dma = dma;
1309                                 priv->RcvCtl[ctx].len = len;
1310                         }
1311
1312                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1313
1314                         pTrans->ContextSize   = sizeof(u32);
1315                         pTrans->DetailsLength = 0;
1316                         pTrans->Flags         = 0;
1317                         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1318
1319                         pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1320
1321                         pSimple->FlagsLength = cpu_to_le32(
1322                                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1323                                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1324                                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1325                         pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1326                         if (sizeof(dma_addr_t) > sizeof(u32))
1327                                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1328                         else
1329                                 pSimple->Address.High = 0;
1330
1331                         pTrans = (SGETransaction32_t *) (pSimple + 1);
1332                 }
1333
1334                 if (pSimple == NULL) {
1335 /**/                    printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1336 /**/                            __FUNCTION__);
1337                         mpt_free_msg_frame(LanCtx, mpt_dev, mf);
1338                         goto out;
1339                 }
1340
1341                 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1342
1343                 pRecvReq->BucketCount = cpu_to_le32(i);
1344
1345 /*      printk(KERN_INFO MYNAM ": posting buckets\n   ");
1346  *      for (i = 0; i < j + 2; i ++)
1347  *          printk (" %08x", le32_to_cpu(msg[i]));
1348  *      printk ("\n");
1349  */
1350
1351                 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1352
1353                 priv->total_posted += i;
1354                 buckets -= i;
1355                 atomic_add(i, &priv->buckets_out);
1356         }
1357
1358 out:
1359         dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1360                   __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1361         dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1362         __FUNCTION__, priv->total_posted, priv->total_received));
1363
1364         clear_bit(0, &priv->post_buckets_active);
1365 }
1366
1367 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1368 static struct net_device *
1369 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1370 {
1371         struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1372         struct mpt_lan_priv *priv = NULL;
1373         u8 HWaddr[FC_ALEN], *a;
1374
1375         if (!dev)
1376                 return NULL;
1377
1378         dev->mtu = MPT_LAN_MTU;
1379
1380         priv = netdev_priv(dev);
1381
1382         priv->mpt_dev = mpt_dev;
1383         priv->pnum = pnum;
1384
1385         memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
1386         INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1387         priv->post_buckets_active = 0;
1388
1389         dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1390                         __LINE__, dev->mtu + dev->hard_header_len + 4));
1391
1392         atomic_set(&priv->buckets_out, 0);
1393         priv->total_posted = 0;
1394         priv->total_received = 0;
1395         priv->max_buckets_out = max_buckets_out;
1396         if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1397                 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1398
1399         dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1400                         __LINE__,
1401                         mpt_dev->pfacts[0].MaxLanBuckets,
1402                         max_buckets_out,
1403                         priv->max_buckets_out));
1404
1405         priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1406         priv->txfidx_lock = SPIN_LOCK_UNLOCKED;
1407         priv->rxfidx_lock = SPIN_LOCK_UNLOCKED;
1408
1409         memset(&priv->stats, 0, sizeof(priv->stats));
1410
1411         /*  Grab pre-fetched LANPage1 stuff. :-) */
1412         a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1413
1414         HWaddr[0] = a[5];
1415         HWaddr[1] = a[4];
1416         HWaddr[2] = a[3];
1417         HWaddr[3] = a[2];
1418         HWaddr[4] = a[1];
1419         HWaddr[5] = a[0];
1420
1421         dev->addr_len = FC_ALEN;
1422         memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1423         memset(dev->broadcast, 0xff, FC_ALEN);
1424
1425         /* The Tx queue is 127 deep on the 909.
1426          * Give ourselves some breathing room.
1427          */
1428         priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1429                             tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1430
1431         dev->open = mpt_lan_open;
1432         dev->stop = mpt_lan_close;
1433         dev->get_stats = mpt_lan_get_stats;
1434         dev->set_multicast_list = NULL;
1435         dev->change_mtu = mpt_lan_change_mtu;
1436         dev->hard_start_xmit = mpt_lan_sdu_send;
1437
1438 /* Not in 2.3.42. Need 2.3.45+ */
1439         dev->tx_timeout = mpt_lan_tx_timeout;
1440         dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1441
1442         dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1443                 "and setting initial values\n"));
1444
1445         SET_MODULE_OWNER(dev);
1446
1447         if (register_netdev(dev) != 0) {
1448                 free_netdev(dev);
1449                 dev = NULL;
1450         }
1451         return dev;
1452 }
1453
1454 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1455 static int __init mpt_lan_init (void)
1456 {
1457         struct net_device *dev;
1458         MPT_ADAPTER *p;
1459         int i, j;
1460
1461         show_mptmod_ver(LANAME, LANVER);
1462
1463 #ifdef QLOGIC_NAA_WORKAROUND
1464         /* Init the global r/w lock for the bad_naa list. We want to do this
1465            before any boards are initialized and may be used. */
1466         rwlock_init(&bad_naa_lock);
1467 #endif
1468
1469         if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1470                 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1471                 return -EBUSY;
1472         }
1473
1474         /* Set the callback index to be used by driver core for turbo replies */
1475         mpt_lan_index = LanCtx;
1476
1477         dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1478
1479         if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) {
1480                 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1481         } else {
1482                 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1483                        "handler with mptbase! The world is at an end! "
1484                        "Everything is fading to black! Goodbye.\n");
1485                 return -EBUSY;
1486         }
1487
1488         for (j = 0; j < MPT_MAX_ADAPTERS; j++) {
1489                 mpt_landev[j] = NULL;
1490         }
1491
1492         list_for_each_entry(p, &ioc_list, list) {
1493                 for (i = 0; i < p->facts.NumberOfPorts; i++) {
1494                         printk (KERN_INFO MYNAM ": %s: PortNum=%x, ProtocolFlags=%02Xh (%c%c%c%c)\n",
1495                                         p->name,
1496                                         p->pfacts[i].PortNumber,
1497                                         p->pfacts[i].ProtocolFlags,
1498                                         MPT_PROTOCOL_FLAGS_c_c_c_c(p->pfacts[i].ProtocolFlags));
1499
1500                         if (!(p->pfacts[i].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
1501                                 printk (KERN_INFO MYNAM ": %s: Hmmm... LAN protocol seems to be disabled on this adapter port!\n",
1502                                                 p->name);
1503                                 continue;
1504                         }
1505
1506                         dev = mpt_register_lan_device (p, i);
1507                         if (!dev) {
1508                                 printk (KERN_ERR MYNAM ": %s: Unable to register port%d as a LAN device\n",
1509                                                 p->name,
1510                                                 p->pfacts[i].PortNumber);
1511                         }
1512                         printk (KERN_INFO MYNAM ": %s: Fusion MPT LAN device registered as '%s'\n",
1513                                         p->name, dev->name);
1514                         printk (KERN_INFO MYNAM ": %s/%s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1515                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1516                                         dev->dev_addr[0], dev->dev_addr[1],
1517                                         dev->dev_addr[2], dev->dev_addr[3],
1518                                         dev->dev_addr[4], dev->dev_addr[5]);
1519 //                                      printk (KERN_INFO MYNAM ": %s/%s: Max_TX_outstanding = %d\n",
1520 //                                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1521 //                                                      NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out);
1522                         j = p->id;
1523                         mpt_landev[j] = dev;
1524                         dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n",
1525                                         dev, j,  mpt_landev[j]));
1526
1527                 }
1528         }
1529
1530         return 0;
1531 }
1532
1533 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1534 static void __exit mpt_lan_exit(void)
1535 {
1536         int i;
1537
1538         mpt_reset_deregister(LanCtx);
1539
1540         for (i = 0; mpt_landev[i] != NULL; i++) {
1541                 struct net_device *dev = mpt_landev[i];
1542
1543                 printk (KERN_INFO ": %s/%s: Fusion MPT LAN device unregistered\n",
1544                                IOC_AND_NETDEV_NAMES_s_s(dev));
1545                 unregister_netdev(dev);
1546                 free_netdev(dev);
1547                 mpt_landev[i] = NULL;
1548         }
1549
1550         if (LanCtx >= 0) {
1551                 mpt_deregister(LanCtx);
1552                 LanCtx = -1;
1553                 mpt_lan_index = 0;
1554         }
1555
1556         /* deregister any send/receive handler structs. I2Oism? */
1557 }
1558
1559 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1560
1561 module_init(mpt_lan_init);
1562 module_exit(mpt_lan_exit);
1563
1564 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1565 static unsigned short
1566 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1567 {
1568         struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1569         struct fcllc *fcllc;
1570
1571         skb->mac.raw = skb->data;
1572         skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1573
1574         if (fch->dtype == htons(0xffff)) {
1575                 u32 *p = (u32 *) fch;
1576
1577                 swab32s(p + 0);
1578                 swab32s(p + 1);
1579                 swab32s(p + 2);
1580                 swab32s(p + 3);
1581
1582                 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1583                                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1584                 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1585                                 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1586                                 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1587         }
1588
1589         if (*fch->daddr & 1) {
1590                 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1591                         skb->pkt_type = PACKET_BROADCAST;
1592                 } else {
1593                         skb->pkt_type = PACKET_MULTICAST;
1594                 }
1595         } else {
1596                 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1597                         skb->pkt_type = PACKET_OTHERHOST;
1598                 } else {
1599                         skb->pkt_type = PACKET_HOST;
1600                 }
1601         }
1602
1603         fcllc = (struct fcllc *)skb->data;
1604
1605 #ifdef QLOGIC_NAA_WORKAROUND
1606 {
1607         u16 source_naa = fch->stype, found = 0;
1608
1609         /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1610            value. */
1611
1612         if ((source_naa & 0xF000) == 0)
1613                 source_naa = swab16(source_naa);
1614
1615         if (fcllc->ethertype == htons(ETH_P_ARP))
1616             dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1617                       "%04x.\n", source_naa));
1618
1619         if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1620            ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1621                 struct NAA_Hosed *nh, *prevnh;
1622                 int i;
1623
1624                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1625                           "system with non-RFC 2625 NAA value (%04x).\n",
1626                           source_naa));
1627
1628                 write_lock_irq(&bad_naa_lock);
1629                 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1630                      prevnh=nh, nh=nh->next) {
1631                         if ((nh->ieee[0] == fch->saddr[0]) &&
1632                             (nh->ieee[1] == fch->saddr[1]) &&
1633                             (nh->ieee[2] == fch->saddr[2]) &&
1634                             (nh->ieee[3] == fch->saddr[3]) &&
1635                             (nh->ieee[4] == fch->saddr[4]) &&
1636                             (nh->ieee[5] == fch->saddr[5])) {
1637                                 found = 1;
1638                                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1639                                          "q/Rep w/ bad NAA from system already"
1640                                          " in DB.\n"));
1641                                 break;
1642                         }
1643                 }
1644
1645                 if ((!found) && (nh == NULL)) {
1646
1647                         nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1648                         dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1649                                  " bad NAA from system not yet in DB.\n"));
1650
1651                         if (nh != NULL) {
1652                                 nh->next = NULL;
1653                                 if (!mpt_bad_naa)
1654                                         mpt_bad_naa = nh;
1655                                 if (prevnh)
1656                                         prevnh->next = nh;
1657
1658                                 nh->NAA = source_naa; /* Set the S_NAA value. */
1659                                 for (i = 0; i < FC_ALEN; i++)
1660                                         nh->ieee[i] = fch->saddr[i];
1661                                 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1662                                           "%02x:%02x with non-compliant S_NAA value.\n",
1663                                           fch->saddr[0], fch->saddr[1], fch->saddr[2],
1664                                           fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1665                         } else {
1666                                 printk (KERN_ERR "mptlan/type_trans: Unable to"
1667                                         " kmalloc a NAA_Hosed struct.\n");
1668                         }
1669                 } else if (!found) {
1670                         printk (KERN_ERR "mptlan/type_trans: found not"
1671                                 " set, but nh isn't null. Evil "
1672                                 "funkiness abounds.\n");
1673                 }
1674                 write_unlock_irq(&bad_naa_lock);
1675         }
1676 }
1677 #endif
1678
1679         /* Strip the SNAP header from ARP packets since we don't
1680          * pass them through to the 802.2/SNAP layers.
1681          */
1682         if (fcllc->dsap == EXTENDED_SAP &&
1683                 (fcllc->ethertype == htons(ETH_P_IP) ||
1684                  fcllc->ethertype == htons(ETH_P_ARP))) {
1685                 skb_pull(skb, sizeof(struct fcllc));
1686                 return fcllc->ethertype;
1687         }
1688
1689         return htons(ETH_P_802_2);
1690 }
1691
1692 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/