ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / message / fusion / mptlan.c
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with PCI chip/adapter(s):
5  *          LSIFC9xx/LSI409xx Fibre Channel
6  *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
7  *
8  *  Credits:
9  *      This driver would not exist if not for Alan Cox's development
10  *      of the linux i2o driver.
11  *
12  *      Special thanks goes to the I2O LAN driver people at the
13  *      University of Helsinki, who, unbeknownst to them, provided
14  *      the inspiration and initial structure for this driver.
15  *
16  *      A huge debt of gratitude is owed to David S. Miller (DaveM)
17  *      for fixing much of the stupid and broken stuff in the early
18  *      driver while porting to sparc64 platform.  THANK YOU!
19  *
20  *      A really huge debt of gratitude is owed to Eddie C. Dost
21  *      for gobs of hard work fixing and optimizing LAN code.
22  *      THANK YOU!
23  *
24  *      (see also mptbase.c)
25  *
26  *  Copyright (c) 2000-2004 LSI Logic Corporation
27  *  Originally By: Noah Romer
28  *  (mailto:mpt_linux_developer@lsil.com)
29  *
30  *  $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $
31  */
32 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
33 /*
34     This program is free software; you can redistribute it and/or modify
35     it under the terms of the GNU General Public License as published by
36     the Free Software Foundation; version 2 of the License.
37
38     This program is distributed in the hope that it will be useful,
39     but WITHOUT ANY WARRANTY; without even the implied warranty of
40     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41     GNU General Public License for more details.
42
43     NO WARRANTY
44     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
45     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
46     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
47     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
48     solely responsible for determining the appropriateness of using and
49     distributing the Program and assumes all risks associated with its
50     exercise of rights under this Agreement, including but not limited to
51     the risks and costs of program errors, damage to or loss of data,
52     programs or equipment, and unavailability or interruption of operations.
53
54     DISCLAIMER OF LIABILITY
55     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
58     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
59     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
60     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
61     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
62
63     You should have received a copy of the GNU General Public License
64     along with this program; if not, write to the Free Software
65     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
66 */
67
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * Define statements used for debugging
71  */
72 //#define MPT_LAN_IO_DEBUG
73
74 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
75
76 #include "mptlan.h"
77 #include <linux/init.h>
78 #include <linux/module.h>
79 #include <linux/fs.h>
80
81 #define MYNAM           "mptlan"
82
83 MODULE_LICENSE("GPL");
84
85 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
86 /*
87  * MPT LAN message sizes without variable part.
88  */
89 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
90         (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
91
92 #define MPT_LAN_TRANSACTION32_SIZE \
93         (sizeof(SGETransaction32_t) - sizeof(u32))
94
95 /*
96  *  Fusion MPT LAN private structures
97  */
98
99 struct NAA_Hosed {
100         u16 NAA;
101         u8 ieee[FC_ALEN];
102         struct NAA_Hosed *next;
103 };
104
105 struct BufferControl {
106         struct sk_buff  *skb;
107         dma_addr_t      dma;
108         unsigned int    len;
109 };
110
111 struct mpt_lan_priv {
112         MPT_ADAPTER *mpt_dev;
113         u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
114
115         atomic_t buckets_out;           /* number of unused buckets on IOC */
116         int bucketthresh;               /* Send more when this many left */
117
118         int *mpt_txfidx; /* Free Tx Context list */
119         int mpt_txfidx_tail;
120         spinlock_t txfidx_lock;
121
122         int *mpt_rxfidx; /* Free Rx Context list */
123         int mpt_rxfidx_tail;
124         spinlock_t rxfidx_lock;
125
126         struct BufferControl *RcvCtl;   /* Receive BufferControl structs */
127         struct BufferControl *SendCtl;  /* Send BufferControl structs */
128
129         int max_buckets_out;            /* Max buckets to send to IOC */
130         int tx_max_out;                 /* IOC's Tx queue len */
131
132         u32 total_posted;
133         u32 total_received;
134         struct net_device_stats stats;  /* Per device statistics */
135
136         struct mpt_work_struct post_buckets_task;
137         unsigned long post_buckets_active;
138 };
139
140 struct mpt_lan_ohdr {
141         u16     dtype;
142         u8      daddr[FC_ALEN];
143         u16     stype;
144         u8      saddr[FC_ALEN];
145 };
146
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
148
149 /*
150  *  Forward protos...
151  */
152 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
153                        MPT_FRAME_HDR *reply);
154 static int  mpt_lan_open(struct net_device *dev);
155 static int  mpt_lan_reset(struct net_device *dev);
156 static int  mpt_lan_close(struct net_device *dev);
157 static void mpt_lan_post_receive_buckets(void *dev_id);
158 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
159                                            int priority);
160 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
161 static int  mpt_lan_receive_post_reply(struct net_device *dev,
162                                        LANReceivePostReply_t *pRecvRep);
163 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
164 static int  mpt_lan_send_reply(struct net_device *dev,
165                                LANSendReply_t *pSendRep);
166 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
167 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
168 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
169                                          struct net_device *dev);
170
171 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
172 /*
173  *  Fusion MPT LAN private data
174  */
175 static int LanCtx = -1;
176
177 static u32 max_buckets_out = 127;
178 static u32 tx_max_out_p = 127 - 16;
179
180 static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1];
181
182 #ifdef QLOGIC_NAA_WORKAROUND
183 static struct NAA_Hosed *mpt_bad_naa = NULL;
184 rwlock_t bad_naa_lock;
185 #endif
186
187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
188 /*
189  * Fusion MPT LAN external data
190  */
191 extern int mpt_lan_index;
192
193 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
194 /**
195  *      lan_reply - Handle all data sent from the hardware.
196  *      @ioc: Pointer to MPT_ADAPTER structure
197  *      @mf: Pointer to original MPT request frame (NULL if TurboReply)
198  *      @reply: Pointer to MPT reply frame
199  *
200  *      Returns 1 indicating original alloc'd request frame ptr
201  *      should be freed, or 0 if it shouldn't.
202  */
203 static int
204 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
205 {
206         struct net_device *dev = mpt_landev[ioc->id];
207         int FreeReqFrame = 0;
208
209         dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
210                   IOC_AND_NETDEV_NAMES_s_s(dev)));
211
212 //      dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
213 //                      mf, reply));
214
215         if (mf == NULL) {
216                 u32 tmsg = CAST_PTR_TO_U32(reply);
217
218                 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
219                                 IOC_AND_NETDEV_NAMES_s_s(dev),
220                                 tmsg));
221
222                 switch (GET_LAN_FORM(tmsg)) {
223
224                 // NOTE!  (Optimization) First case here is now caught in
225                 //  mptbase.c::mpt_interrupt() routine and callcack here
226                 //  is now skipped for this case!  20001218 -sralston
227 #if 0
228                 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
229 //                      dioprintk((KERN_INFO MYNAM "/lan_reply: "
230 //                                "MessageContext turbo reply received\n"));
231                         FreeReqFrame = 1;
232                         break;
233 #endif
234
235                 case LAN_REPLY_FORM_SEND_SINGLE:
236 //                      dioprintk((MYNAM "/lan_reply: "
237 //                                "calling mpt_lan_send_reply (turbo)\n"));
238
239                         // Potential BUG here?  -sralston
240                         //      FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
241                         //  If/when mpt_lan_send_turbo would return 1 here,
242                         //  calling routine (mptbase.c|mpt_interrupt)
243                         //  would Oops because mf has already been set
244                         //  to NULL.  So after return from this func,
245                         //  mpt_interrupt() will attempt to put (NULL) mf ptr
246                         //  item back onto its adapter FreeQ - Oops!:-(
247                         //  It's Ok, since mpt_lan_send_turbo() *currently*
248                         //  always returns 0, but..., just in case:
249
250                         (void) mpt_lan_send_turbo(dev, tmsg);
251                         FreeReqFrame = 0;
252
253                         break;
254
255                 case LAN_REPLY_FORM_RECEIVE_SINGLE:
256 //                      dioprintk((KERN_INFO MYNAM "@lan_reply: "
257 //                                "rcv-Turbo = %08x\n", tmsg));
258                         mpt_lan_receive_post_turbo(dev, tmsg);
259                         break;
260
261                 default:
262                         printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
263                                 "that I don't know what to do with\n");
264
265                         /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
266
267                         break;
268                 }
269
270                 return FreeReqFrame;
271         }
272
273 //      msg = (u32 *) reply;
274 //      dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
275 //                le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
276 //                le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
277 //      dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
278 //                reply->u.hdr.Function));
279
280         switch (reply->u.hdr.Function) {
281
282         case MPI_FUNCTION_LAN_SEND:
283         {
284                 LANSendReply_t *pSendRep;
285
286                 pSendRep = (LANSendReply_t *) reply;
287                 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
288                 break;
289         }
290
291         case MPI_FUNCTION_LAN_RECEIVE:
292         {
293                 LANReceivePostReply_t *pRecvRep;
294
295                 pRecvRep = (LANReceivePostReply_t *) reply;
296                 if (pRecvRep->NumberOfContexts) {
297                         mpt_lan_receive_post_reply(dev, pRecvRep);
298                         if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
299                                 FreeReqFrame = 1;
300                 } else
301                         dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
302                                   "ReceivePostReply received.\n"));
303                 break;
304         }
305
306         case MPI_FUNCTION_LAN_RESET:
307                 /* Just a default reply. Might want to check it to
308                  * make sure that everything went ok.
309                  */
310                 FreeReqFrame = 1;
311                 break;
312
313         case MPI_FUNCTION_EVENT_NOTIFICATION:
314         case MPI_FUNCTION_EVENT_ACK:
315                 /* UPDATE!  20010120 -sralston
316                  *  _EVENT_NOTIFICATION should NOT come down this path any more.
317                  *  Should be routed to mpt_lan_event_process(), but just in case...
318                  */
319                 FreeReqFrame = 1;
320                 break;
321
322         default:
323                 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
324                         "reply that I don't know what to do with\n");
325
326                 /* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
327                 FreeReqFrame = 1;
328
329                 break;
330         }
331
332         return FreeReqFrame;
333 }
334
335 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
336 static int
337 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
338 {
339         struct net_device *dev = mpt_landev[ioc->id];
340         struct mpt_lan_priv *priv = netdev_priv(dev);
341
342         dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
343                         reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
344                         reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
345
346         if (priv->mpt_rxfidx == NULL)
347                 return (1);
348
349         if (reset_phase == MPT_IOC_SETUP_RESET) {
350                 ;
351         } else if (reset_phase == MPT_IOC_PRE_RESET) {
352                 int i;
353                 unsigned long flags;
354
355                 netif_stop_queue(dev);
356
357                 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
358
359                 atomic_set(&priv->buckets_out, 0);
360
361                 /* Reset Rx Free Tail index and re-populate the queue. */
362                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
363                 priv->mpt_rxfidx_tail = -1;
364                 for (i = 0; i < priv->max_buckets_out; i++)
365                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
366                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
367         } else {
368                 mpt_lan_post_receive_buckets(dev);
369                 netif_wake_queue(dev);
370         }
371
372         return 1;
373 }
374
375 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
376 static int
377 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
378 {
379         dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
380
381         switch (le32_to_cpu(pEvReply->Event)) {
382         case MPI_EVENT_NONE:                            /* 00 */
383         case MPI_EVENT_LOG_DATA:                        /* 01 */
384         case MPI_EVENT_STATE_CHANGE:                    /* 02 */
385         case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
386         case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
387         case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
388         case MPI_EVENT_RESCAN:                          /* 06 */
389                 /* Ok, do we need to do anything here? As far as
390                    I can tell, this is when a new device gets added
391                    to the loop. */
392         case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
393         case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
394         case MPI_EVENT_LOGOUT:                          /* 09 */
395         case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
396         default:
397                 break;
398         }
399
400         /*
401          *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
402          *  Do NOT do it here now!
403          */
404
405         return 1;
406 }
407
408 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
409 static int
410 mpt_lan_open(struct net_device *dev)
411 {
412         struct mpt_lan_priv *priv = netdev_priv(dev);
413         int i;
414
415         if (mpt_lan_reset(dev) != 0) {
416                 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
417
418                 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
419
420                 if (mpt_dev->active)
421                         printk ("The ioc is active. Perhaps it needs to be"
422                                 " reset?\n");
423                 else
424                         printk ("The ioc in inactive, most likely in the "
425                                 "process of being reset. Please try again in "
426                                 "a moment.\n");
427         }
428
429         priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
430         if (priv->mpt_txfidx == NULL)
431                 goto out;
432         priv->mpt_txfidx_tail = -1;
433
434         priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
435                                 GFP_KERNEL);
436         if (priv->SendCtl == NULL)
437                 goto out_mpt_txfidx;
438         for (i = 0; i < priv->tx_max_out; i++) {
439                 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
440                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
441         }
442
443         dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
444
445         priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
446                                    GFP_KERNEL);
447         if (priv->mpt_rxfidx == NULL)
448                 goto out_SendCtl;
449         priv->mpt_rxfidx_tail = -1;
450
451         priv->RcvCtl = kmalloc(priv->max_buckets_out *
452                                                 sizeof(struct BufferControl),
453                                GFP_KERNEL);
454         if (priv->RcvCtl == NULL)
455                 goto out_mpt_rxfidx;
456         for (i = 0; i < priv->max_buckets_out; i++) {
457                 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
458                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
459         }
460
461 /**/    dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
462 /**/    for (i = 0; i < priv->tx_max_out; i++)
463 /**/            dlprintk((" %xh", priv->mpt_txfidx[i]));
464 /**/    dlprintk(("\n"));
465
466         dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
467
468         mpt_lan_post_receive_buckets(dev);
469         printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
470                         IOC_AND_NETDEV_NAMES_s_s(dev));
471
472         if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
473                 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
474                         " Notifications. This is a bad thing! We're not going "
475                         "to go ahead, but I'd be leery of system stability at "
476                         "this point.\n");
477         }
478
479         netif_start_queue(dev);
480         dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
481
482         return 0;
483 out_mpt_rxfidx:
484         kfree(priv->mpt_rxfidx);
485         priv->mpt_rxfidx = NULL;
486 out_SendCtl:
487         kfree(priv->SendCtl);
488         priv->SendCtl = NULL;
489 out_mpt_txfidx:
490         kfree(priv->mpt_txfidx);
491         priv->mpt_txfidx = NULL;
492 out:    return -ENOMEM;
493 }
494
495 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
496 /* Send a LanReset message to the FW. This should result in the FW returning
497    any buckets it still has. */
498 static int
499 mpt_lan_reset(struct net_device *dev)
500 {
501         MPT_FRAME_HDR *mf;
502         LANResetRequest_t *pResetReq;
503         struct mpt_lan_priv *priv = netdev_priv(dev);
504
505         mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev->id);
506
507         if (mf == NULL) {
508 /*              dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
509                 "Unable to allocate a request frame.\n"));
510 */
511                 return -1;
512         }
513
514         pResetReq = (LANResetRequest_t *) mf;
515
516         pResetReq->Function     = MPI_FUNCTION_LAN_RESET;
517         pResetReq->ChainOffset  = 0;
518         pResetReq->Reserved     = 0;
519         pResetReq->PortNumber   = priv->pnum;
520         pResetReq->MsgFlags     = 0;
521         pResetReq->Reserved2    = 0;
522
523         mpt_put_msg_frame(LanCtx, priv->mpt_dev->id, mf);
524
525         return 0;
526 }
527
528 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
529 static int
530 mpt_lan_close(struct net_device *dev)
531 {
532         struct mpt_lan_priv *priv = netdev_priv(dev);
533         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
534         unsigned int timeout;
535         int i;
536
537         dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
538
539         mpt_event_deregister(LanCtx);
540
541         dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
542                   "since driver was loaded, %d still out\n",
543                   priv->total_posted,atomic_read(&priv->buckets_out)));
544
545         netif_stop_queue(dev);
546
547         mpt_lan_reset(dev);
548
549         timeout = 2 * HZ;
550         while (atomic_read(&priv->buckets_out) && --timeout) {
551                 set_current_state(TASK_INTERRUPTIBLE);
552                 schedule_timeout(1);
553         }
554
555         for (i = 0; i < priv->max_buckets_out; i++) {
556                 if (priv->RcvCtl[i].skb != NULL) {
557 /**/                    dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
558 /**/                              "is still out\n", i));
559                         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
560                                          priv->RcvCtl[i].len,
561                                          PCI_DMA_FROMDEVICE);
562                         dev_kfree_skb(priv->RcvCtl[i].skb);
563                 }
564         }
565
566         kfree (priv->RcvCtl);
567         kfree (priv->mpt_rxfidx);
568
569         for (i = 0; i < priv->tx_max_out; i++) {
570                 if (priv->SendCtl[i].skb != NULL) {
571                         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
572                                          priv->SendCtl[i].len,
573                                          PCI_DMA_TODEVICE);
574                         dev_kfree_skb(priv->SendCtl[i].skb);
575                 }
576         }
577
578         kfree(priv->SendCtl);
579         kfree(priv->mpt_txfidx);
580
581         atomic_set(&priv->buckets_out, 0);
582
583         printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
584                         IOC_AND_NETDEV_NAMES_s_s(dev));
585
586         return 0;
587 }
588
589 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
590 static struct net_device_stats *
591 mpt_lan_get_stats(struct net_device *dev)
592 {
593         struct mpt_lan_priv *priv = netdev_priv(dev);
594
595         return (struct net_device_stats *) &priv->stats;
596 }
597
598 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
599 static int
600 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
601 {
602         if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
603                 return -EINVAL;
604         dev->mtu = new_mtu;
605         return 0;
606 }
607
608 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
609 /* Tx timeout handler. */
610 static void
611 mpt_lan_tx_timeout(struct net_device *dev)
612 {
613         struct mpt_lan_priv *priv = netdev_priv(dev);
614         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
615
616         if (mpt_dev->active) {
617                 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
618                 netif_wake_queue(dev);
619         }
620 }
621
622 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
623 //static inline int
624 static int
625 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
626 {
627         struct mpt_lan_priv *priv = netdev_priv(dev);
628         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
629         struct sk_buff *sent;
630         unsigned long flags;
631         u32 ctx;
632
633         ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
634         sent = priv->SendCtl[ctx].skb;
635
636         priv->stats.tx_packets++;
637         priv->stats.tx_bytes += sent->len;
638
639         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
640                         IOC_AND_NETDEV_NAMES_s_s(dev),
641                         __FUNCTION__, sent));
642
643         priv->SendCtl[ctx].skb = NULL;
644         pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
645                          priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
646         dev_kfree_skb_irq(sent);
647
648         spin_lock_irqsave(&priv->txfidx_lock, flags);
649         priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
650         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
651
652         netif_wake_queue(dev);
653         return 0;
654 }
655
656 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
657 static int
658 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
659 {
660         struct mpt_lan_priv *priv = netdev_priv(dev);
661         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
662         struct sk_buff *sent;
663         unsigned long flags;
664         int FreeReqFrame = 0;
665         u32 *pContext;
666         u32 ctx;
667         u8 count;
668
669         count = pSendRep->NumberOfContexts;
670
671         dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
672                  le16_to_cpu(pSendRep->IOCStatus)));
673
674         /* Add check for Loginfo Flag in IOCStatus */
675
676         switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
677         case MPI_IOCSTATUS_SUCCESS:
678                 priv->stats.tx_packets += count;
679                 break;
680
681         case MPI_IOCSTATUS_LAN_CANCELED:
682         case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
683                 break;
684
685         case MPI_IOCSTATUS_INVALID_SGL:
686                 priv->stats.tx_errors += count;
687                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
688                                 IOC_AND_NETDEV_NAMES_s_s(dev));
689                 goto out;
690
691         default:
692                 priv->stats.tx_errors += count;
693                 break;
694         }
695
696         pContext = &pSendRep->BufferContext;
697
698         spin_lock_irqsave(&priv->txfidx_lock, flags);
699         while (count > 0) {
700                 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
701
702                 sent = priv->SendCtl[ctx].skb;
703                 priv->stats.tx_bytes += sent->len;
704
705                 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
706                                 IOC_AND_NETDEV_NAMES_s_s(dev),
707                                 __FUNCTION__, sent));
708
709                 priv->SendCtl[ctx].skb = NULL;
710                 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
711                                  priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
712                 dev_kfree_skb_irq(sent);
713
714                 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
715
716                 pContext++;
717                 count--;
718         }
719         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
720
721 out:
722         if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
723                 FreeReqFrame = 1;
724
725         netif_wake_queue(dev);
726         return FreeReqFrame;
727 }
728
729 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
730 static int
731 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
732 {
733         struct mpt_lan_priv *priv = netdev_priv(dev);
734         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
735         MPT_FRAME_HDR *mf;
736         LANSendRequest_t *pSendReq;
737         SGETransaction32_t *pTrans;
738         SGESimple64_t *pSimple;
739         dma_addr_t dma;
740         unsigned long flags;
741         int ctx;
742         u16 cur_naa = 0x1000;
743
744         dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
745                         __FUNCTION__, skb));
746
747         spin_lock_irqsave(&priv->txfidx_lock, flags);
748         if (priv->mpt_txfidx_tail < 0) {
749                 netif_stop_queue(dev);
750                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
751
752                 printk (KERN_ERR "%s: no tx context available: %u\n",
753                         __FUNCTION__, priv->mpt_txfidx_tail);
754                 return 1;
755         }
756
757         mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
758         if (mf == NULL) {
759                 netif_stop_queue(dev);
760                 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
761
762                 printk (KERN_ERR "%s: Unable to alloc request frame\n",
763                         __FUNCTION__);
764                 return 1;
765         }
766
767         ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
768         spin_unlock_irqrestore(&priv->txfidx_lock, flags);
769
770 //      dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
771 //                      IOC_AND_NETDEV_NAMES_s_s(dev)));
772
773         pSendReq = (LANSendRequest_t *) mf;
774
775         /* Set the mac.raw pointer, since this apparently isn't getting
776          * done before we get the skb. Pull the data pointer past the mac data.
777          */
778         skb->mac.raw = skb->data;
779         skb_pull(skb, 12);
780
781         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
782                              PCI_DMA_TODEVICE);
783
784         priv->SendCtl[ctx].skb = skb;
785         priv->SendCtl[ctx].dma = dma;
786         priv->SendCtl[ctx].len = skb->len;
787
788         /* Message Header */
789         pSendReq->Reserved    = 0;
790         pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
791         pSendReq->ChainOffset = 0;
792         pSendReq->Reserved2   = 0;
793         pSendReq->MsgFlags    = 0;
794         pSendReq->PortNumber  = priv->pnum;
795
796         /* Transaction Context Element */
797         pTrans = (SGETransaction32_t *) pSendReq->SG_List;
798
799         /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
800         pTrans->ContextSize   = sizeof(u32);
801         pTrans->DetailsLength = 2 * sizeof(u32);
802         pTrans->Flags         = 0;
803         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
804
805 //      dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
806 //                      IOC_AND_NETDEV_NAMES_s_s(dev),
807 //                      ctx, skb, skb->data));
808
809 #ifdef QLOGIC_NAA_WORKAROUND
810 {
811         struct NAA_Hosed *nh;
812
813         /* Munge the NAA for Tx packets to QLogic boards, which don't follow
814            RFC 2625. The longer I look at this, the more my opinion of Qlogic
815            drops. */
816         read_lock_irq(&bad_naa_lock);
817         for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
818                 if ((nh->ieee[0] == skb->mac.raw[0]) &&
819                     (nh->ieee[1] == skb->mac.raw[1]) &&
820                     (nh->ieee[2] == skb->mac.raw[2]) &&
821                     (nh->ieee[3] == skb->mac.raw[3]) &&
822                     (nh->ieee[4] == skb->mac.raw[4]) &&
823                     (nh->ieee[5] == skb->mac.raw[5])) {
824                         cur_naa = nh->NAA;
825                         dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
826                                   "= %04x.\n", cur_naa));
827                         break;
828                 }
829         }
830         read_unlock_irq(&bad_naa_lock);
831 }
832 #endif
833
834         pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
835                                                     (skb->mac.raw[0] <<  8) |
836                                                     (skb->mac.raw[1] <<  0));
837         pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
838                                                     (skb->mac.raw[3] << 16) |
839                                                     (skb->mac.raw[4] <<  8) |
840                                                     (skb->mac.raw[5] <<  0));
841
842         pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
843
844         /* If we ever decide to send more than one Simple SGE per LANSend, then
845            we will need to make sure that LAST_ELEMENT only gets set on the
846            last one. Otherwise, bad voodoo and evil funkiness will commence. */
847         pSimple->FlagsLength = cpu_to_le32(
848                         ((MPI_SGE_FLAGS_LAST_ELEMENT |
849                           MPI_SGE_FLAGS_END_OF_BUFFER |
850                           MPI_SGE_FLAGS_SIMPLE_ELEMENT |
851                           MPI_SGE_FLAGS_SYSTEM_ADDRESS |
852                           MPI_SGE_FLAGS_HOST_TO_IOC |
853                           MPI_SGE_FLAGS_64_BIT_ADDRESSING |
854                           MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
855                         skb->len);
856         pSimple->Address.Low = cpu_to_le32((u32) dma);
857         if (sizeof(dma_addr_t) > sizeof(u32))
858                 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
859         else
860                 pSimple->Address.High = 0;
861
862         mpt_put_msg_frame (LanCtx, mpt_dev->id, mf);
863         dev->trans_start = jiffies;
864
865         dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
866                         IOC_AND_NETDEV_NAMES_s_s(dev),
867                         le32_to_cpu(pSimple->FlagsLength)));
868
869         return 0;
870 }
871
872 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
873 static inline void
874 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
875 /*
876  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
877  */
878 {
879         struct mpt_lan_priv *priv = dev->priv;
880         
881         if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
882                 if (priority) {
883 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
884                         schedule_work(&priv->post_buckets_task);
885 #else
886                         queue_task(&priv->post_buckets_task, &tq_immediate);
887                         mark_bh(IMMEDIATE_BH);
888 #endif
889                 } else {
890 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
891                         schedule_delayed_work(&priv->post_buckets_task, 1);
892 #else
893                         queue_task(&priv->post_buckets_task, &tq_timer);
894 #endif
895                         dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
896                                    "timer.\n"));
897                 }
898                 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
899                            IOC_AND_NETDEV_NAMES_s_s(dev) ));
900         }
901 }
902
903 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
904 static inline int
905 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
906 {
907         struct mpt_lan_priv *priv = dev->priv;
908
909         skb->protocol = mpt_lan_type_trans(skb, dev);
910
911         dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
912                  "delivered to upper level.\n",
913                         IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
914
915         priv->stats.rx_bytes += skb->len;
916         priv->stats.rx_packets++;
917
918         skb->dev = dev;
919         netif_rx(skb);
920
921         dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
922                  atomic_read(&priv->buckets_out)));
923
924         if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
925                 mpt_lan_wake_post_buckets_task(dev, 1);
926
927         dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
928                   "remaining, %d received back since sod\n",
929                   atomic_read(&priv->buckets_out), priv->total_received));
930
931         return 0;
932 }
933
934 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
935 //static inline int
936 static int
937 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
938 {
939         struct mpt_lan_priv *priv = dev->priv;
940         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
941         struct sk_buff *skb, *old_skb;
942         unsigned long flags;
943         u32 ctx, len;
944
945         ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
946         skb = priv->RcvCtl[ctx].skb;
947
948         len = GET_LAN_PACKET_LENGTH(tmsg);
949
950         if (len < MPT_LAN_RX_COPYBREAK) {
951                 old_skb = skb;
952
953                 skb = (struct sk_buff *)dev_alloc_skb(len);
954                 if (!skb) {
955                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
956                                         IOC_AND_NETDEV_NAMES_s_s(dev),
957                                         __FILE__, __LINE__);
958                         return -ENOMEM;
959                 }
960
961                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
962                                             priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
963
964                 memcpy(skb_put(skb, len), old_skb->data, len);
965
966                 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
967                                                priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
968                 goto out;
969         }
970
971         skb_put(skb, len);
972
973         priv->RcvCtl[ctx].skb = NULL;
974
975         pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
976                          priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
977
978 out:
979         spin_lock_irqsave(&priv->rxfidx_lock, flags);
980         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
981         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
982
983         atomic_dec(&priv->buckets_out);
984         priv->total_received++;
985
986         return mpt_lan_receive_skb(dev, skb);
987 }
988
989 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
990 static int
991 mpt_lan_receive_post_free(struct net_device *dev,
992                           LANReceivePostReply_t *pRecvRep)
993 {
994         struct mpt_lan_priv *priv = dev->priv;
995         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
996         unsigned long flags;
997         struct sk_buff *skb;
998         u32 ctx;
999         int count;
1000         int i;
1001
1002         count = pRecvRep->NumberOfContexts;
1003
1004 /**/    dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
1005                   "IOC returned %d buckets, freeing them...\n", count));
1006
1007         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1008         for (i = 0; i < count; i++) {
1009                 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1010
1011                 skb = priv->RcvCtl[ctx].skb;
1012
1013 //              dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
1014 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1015 //              dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
1016 //                              priv, &(priv->buckets_out)));
1017 //              dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
1018
1019                 priv->RcvCtl[ctx].skb = NULL;
1020                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1021                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1022                 dev_kfree_skb_any(skb);
1023
1024                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1025         }
1026         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1027
1028         atomic_sub(count, &priv->buckets_out);
1029
1030 //      for (i = 0; i < priv->max_buckets_out; i++)
1031 //              if (priv->RcvCtl[i].skb != NULL)
1032 //                      dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1033 //                                "is still out\n", i));
1034
1035 /*      dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1036                   count));
1037 */
1038 /**/    dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1039 /**/              "remaining, %d received back since sod.\n",
1040 /**/              atomic_read(&priv->buckets_out), priv->total_received));
1041         return 0;
1042 }
1043
1044 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1045 static int
1046 mpt_lan_receive_post_reply(struct net_device *dev,
1047                            LANReceivePostReply_t *pRecvRep)
1048 {
1049         struct mpt_lan_priv *priv = dev->priv;
1050         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1051         struct sk_buff *skb, *old_skb;
1052         unsigned long flags;
1053         u32 len, ctx, offset;
1054         u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1055         int count;
1056         int i, l;
1057
1058         dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1059         dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1060                  le16_to_cpu(pRecvRep->IOCStatus)));
1061
1062         if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1063                                                 MPI_IOCSTATUS_LAN_CANCELED)
1064                 return mpt_lan_receive_post_free(dev, pRecvRep);
1065
1066         len = le32_to_cpu(pRecvRep->PacketLength);
1067         if (len == 0) {
1068                 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1069                         "ReceivePostReply w/ PacketLength zero!\n",
1070                                 IOC_AND_NETDEV_NAMES_s_s(dev));
1071                 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1072                                 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1073                 return -1;
1074         }
1075
1076         ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
1077         count  = pRecvRep->NumberOfContexts;
1078         skb    = priv->RcvCtl[ctx].skb;
1079
1080         offset = le32_to_cpu(pRecvRep->PacketOffset);
1081 //      if (offset != 0) {
1082 //              printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1083 //                      "w/ PacketOffset %u\n",
1084 //                              IOC_AND_NETDEV_NAMES_s_s(dev),
1085 //                              offset);
1086 //      }
1087
1088         dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1089                         IOC_AND_NETDEV_NAMES_s_s(dev),
1090                         offset, len));
1091
1092         if (count > 1) {
1093                 int szrem = len;
1094
1095 //              dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1096 //                      "for single packet, concatenating...\n",
1097 //                              IOC_AND_NETDEV_NAMES_s_s(dev)));
1098
1099                 skb = (struct sk_buff *)dev_alloc_skb(len);
1100                 if (!skb) {
1101                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1102                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1103                                         __FILE__, __LINE__);
1104                         return -ENOMEM;
1105                 }
1106
1107                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1108                 for (i = 0; i < count; i++) {
1109
1110                         ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1111                         old_skb = priv->RcvCtl[ctx].skb;
1112
1113                         l = priv->RcvCtl[ctx].len;
1114                         if (szrem < l)
1115                                 l = szrem;
1116
1117 //                      dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1118 //                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1119 //                                      i, l));
1120
1121                         pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1122                                                     priv->RcvCtl[ctx].dma,
1123                                                     priv->RcvCtl[ctx].len,
1124                                                     PCI_DMA_FROMDEVICE);
1125                         memcpy(skb_put(skb, l), old_skb->data, l);
1126
1127                         pci_dma_sync_single_for_device(mpt_dev->pcidev,
1128                                                        priv->RcvCtl[ctx].dma,
1129                                                        priv->RcvCtl[ctx].len,
1130                                                        PCI_DMA_FROMDEVICE);
1131
1132                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1133                         szrem -= l;
1134                 }
1135                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1136
1137         } else if (len < MPT_LAN_RX_COPYBREAK) {
1138
1139                 old_skb = skb;
1140
1141                 skb = (struct sk_buff *)dev_alloc_skb(len);
1142                 if (!skb) {
1143                         printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1144                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1145                                         __FILE__, __LINE__);
1146                         return -ENOMEM;
1147                 }
1148
1149                 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1150                                             priv->RcvCtl[ctx].dma,
1151                                             priv->RcvCtl[ctx].len,
1152                                             PCI_DMA_FROMDEVICE);
1153
1154                 memcpy(skb_put(skb, len), old_skb->data, len);
1155
1156                 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1157                                                priv->RcvCtl[ctx].dma,
1158                                                priv->RcvCtl[ctx].len,
1159                                                PCI_DMA_FROMDEVICE);
1160
1161                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1162                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1163                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1164
1165         } else {
1166                 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1167
1168                 priv->RcvCtl[ctx].skb = NULL;
1169
1170                 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1171                                  priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1172                 priv->RcvCtl[ctx].dma = 0;
1173
1174                 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1175                 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1176
1177                 skb_put(skb,len);
1178         }
1179
1180         atomic_sub(count, &priv->buckets_out);
1181         priv->total_received += count;
1182
1183         if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1184                 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1185                         "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1186                                 IOC_AND_NETDEV_NAMES_s_s(dev),
1187                                 priv->mpt_rxfidx_tail,
1188                                 MPT_LAN_MAX_BUCKETS_OUT);
1189
1190                 panic("Damn it Jim! I'm a doctor, not a programmer! "
1191                                 "Oh, wait a sec, I am a programmer. "
1192                                 "And, who's Jim?!?!\n"
1193                                 "Arrgghh! We've done it again!\n");
1194         }
1195
1196         if (remaining == 0)
1197                 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1198                         "(priv->buckets_out = %d)\n",
1199                         IOC_AND_NETDEV_NAMES_s_s(dev),
1200                         atomic_read(&priv->buckets_out));
1201         else if (remaining < 10)
1202                 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1203                         "(priv->buckets_out = %d)\n",
1204                         IOC_AND_NETDEV_NAMES_s_s(dev),
1205                         remaining, atomic_read(&priv->buckets_out));
1206         
1207         if ((remaining < priv->bucketthresh) &&
1208             ((atomic_read(&priv->buckets_out) - remaining) >
1209              MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1210                 
1211                 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1212                         "buckets_out count and fw's BucketsRemaining "
1213                         "count has crossed the threshold, issuing a "
1214                         "LanReset to clear the fw's hashtable. You may "
1215                         "want to check your /var/log/messages for \"CRC "
1216                         "error\" event notifications.\n");
1217                 
1218                 mpt_lan_reset(dev);
1219                 mpt_lan_wake_post_buckets_task(dev, 0);
1220         }
1221         
1222         return mpt_lan_receive_skb(dev, skb);
1223 }
1224
1225 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1226 /* Simple SGE's only at the moment */
1227
1228 static void
1229 mpt_lan_post_receive_buckets(void *dev_id)
1230 {
1231         struct net_device *dev = dev_id;
1232         struct mpt_lan_priv *priv = dev->priv;
1233         MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1234         MPT_FRAME_HDR *mf;
1235         LANReceivePostRequest_t *pRecvReq;
1236         SGETransaction32_t *pTrans;
1237         SGESimple64_t *pSimple;
1238         struct sk_buff *skb;
1239         dma_addr_t dma;
1240         u32 curr, buckets, count, max;
1241         u32 len = (dev->mtu + dev->hard_header_len + 4);
1242         unsigned long flags;
1243         int i;
1244
1245         curr = atomic_read(&priv->buckets_out);
1246         buckets = (priv->max_buckets_out - curr);
1247
1248         dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1249                         IOC_AND_NETDEV_NAMES_s_s(dev),
1250                         __FUNCTION__, buckets, curr));
1251
1252         max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1253                         (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1254
1255         while (buckets) {
1256                 mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
1257                 if (mf == NULL) {
1258                         printk (KERN_ERR "%s: Unable to alloc request frame\n",
1259                                 __FUNCTION__);
1260                         dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1261                                  __FUNCTION__, buckets));
1262                         goto out;
1263                 }
1264                 pRecvReq = (LANReceivePostRequest_t *) mf;
1265
1266                 count = buckets;
1267                 if (count > max)
1268                         count = max;
1269
1270                 pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1271                 pRecvReq->ChainOffset = 0;
1272                 pRecvReq->MsgFlags    = 0;
1273                 pRecvReq->PortNumber  = priv->pnum;
1274
1275                 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1276                 pSimple = NULL;
1277
1278                 for (i = 0; i < count; i++) {
1279                         int ctx;
1280
1281                         spin_lock_irqsave(&priv->rxfidx_lock, flags);
1282                         if (priv->mpt_rxfidx_tail < 0) {
1283                                 printk (KERN_ERR "%s: Can't alloc context\n",
1284                                         __FUNCTION__);
1285                                 spin_unlock_irqrestore(&priv->rxfidx_lock,
1286                                                        flags);
1287                                 break;
1288                         }
1289
1290                         ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1291
1292                         skb = priv->RcvCtl[ctx].skb;
1293                         if (skb && (priv->RcvCtl[ctx].len != len)) {
1294                                 pci_unmap_single(mpt_dev->pcidev,
1295                                                  priv->RcvCtl[ctx].dma,
1296                                                  priv->RcvCtl[ctx].len,
1297                                                  PCI_DMA_FROMDEVICE);
1298                                 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1299                                 skb = priv->RcvCtl[ctx].skb = NULL;
1300                         }
1301
1302                         if (skb == NULL) {
1303                                 skb = dev_alloc_skb(len);
1304                                 if (skb == NULL) {
1305                                         printk (KERN_WARNING
1306                                                 MYNAM "/%s: Can't alloc skb\n",
1307                                                 __FUNCTION__);
1308                                         priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1309                                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1310                                         break;
1311                                 }
1312
1313                                 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1314                                                      len, PCI_DMA_FROMDEVICE);
1315
1316                                 priv->RcvCtl[ctx].skb = skb;
1317                                 priv->RcvCtl[ctx].dma = dma;
1318                                 priv->RcvCtl[ctx].len = len;
1319                         }
1320
1321                         spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1322
1323                         pTrans->ContextSize   = sizeof(u32);
1324                         pTrans->DetailsLength = 0;
1325                         pTrans->Flags         = 0;
1326                         pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1327
1328                         pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1329
1330                         pSimple->FlagsLength = cpu_to_le32(
1331                                 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1332                                   MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1333                                   MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1334                         pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1335                         if (sizeof(dma_addr_t) > sizeof(u32))
1336                                 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1337                         else
1338                                 pSimple->Address.High = 0;
1339
1340                         pTrans = (SGETransaction32_t *) (pSimple + 1);
1341                 }
1342
1343                 if (pSimple == NULL) {
1344 /**/                    printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1345 /**/                            __FUNCTION__);
1346                         mpt_free_msg_frame(LanCtx, mpt_dev->id, mf);
1347                         goto out;
1348                 }
1349
1350                 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1351
1352                 pRecvReq->BucketCount = cpu_to_le32(i);
1353
1354 /*      printk(KERN_INFO MYNAM ": posting buckets\n   ");
1355  *      for (i = 0; i < j + 2; i ++)
1356  *          printk (" %08x", le32_to_cpu(msg[i]));
1357  *      printk ("\n");
1358  */
1359
1360                 mpt_put_msg_frame(LanCtx, mpt_dev->id, mf);
1361
1362                 priv->total_posted += i;
1363                 buckets -= i;
1364                 atomic_add(i, &priv->buckets_out);
1365         }
1366
1367 out:
1368         dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1369                   __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1370         dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1371         __FUNCTION__, priv->total_posted, priv->total_received));
1372
1373         clear_bit(0, &priv->post_buckets_active);
1374 }
1375
1376 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1377 static struct net_device *
1378 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1379 {
1380         struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1381         struct mpt_lan_priv *priv = NULL;
1382         u8 HWaddr[FC_ALEN], *a;
1383
1384         if (!dev)
1385                 return NULL;
1386
1387         dev->mtu = MPT_LAN_MTU;
1388
1389         priv = netdev_priv(dev);
1390
1391         priv->mpt_dev = mpt_dev;
1392         priv->pnum = pnum;
1393
1394         memset(&priv->post_buckets_task, 0, sizeof(struct mpt_work_struct));
1395         MPT_INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1396         priv->post_buckets_active = 0;
1397
1398         dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1399                         __LINE__, dev->mtu + dev->hard_header_len + 4));
1400
1401         atomic_set(&priv->buckets_out, 0);
1402         priv->total_posted = 0;
1403         priv->total_received = 0;
1404         priv->max_buckets_out = max_buckets_out;
1405         if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1406                 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1407
1408         dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1409                         __LINE__,
1410                         mpt_dev->pfacts[0].MaxLanBuckets,
1411                         max_buckets_out,
1412                         priv->max_buckets_out));
1413
1414         priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1415         priv->txfidx_lock = SPIN_LOCK_UNLOCKED;
1416         priv->rxfidx_lock = SPIN_LOCK_UNLOCKED;
1417
1418         memset(&priv->stats, 0, sizeof(priv->stats));
1419
1420         /*  Grab pre-fetched LANPage1 stuff. :-) */
1421         a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1422
1423         HWaddr[0] = a[5];
1424         HWaddr[1] = a[4];
1425         HWaddr[2] = a[3];
1426         HWaddr[3] = a[2];
1427         HWaddr[4] = a[1];
1428         HWaddr[5] = a[0];
1429
1430         dev->addr_len = FC_ALEN;
1431         memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1432         memset(dev->broadcast, 0xff, FC_ALEN);
1433
1434         /* The Tx queue is 127 deep on the 909.
1435          * Give ourselves some breathing room.
1436          */
1437         priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1438                             tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1439
1440         dev->open = mpt_lan_open;
1441         dev->stop = mpt_lan_close;
1442         dev->get_stats = mpt_lan_get_stats;
1443         dev->set_multicast_list = NULL;
1444         dev->change_mtu = mpt_lan_change_mtu;
1445         dev->hard_start_xmit = mpt_lan_sdu_send;
1446
1447 /* Not in 2.3.42. Need 2.3.45+ */
1448         dev->tx_timeout = mpt_lan_tx_timeout;
1449         dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1450
1451         dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1452                 "and setting initial values\n"));
1453
1454         SET_MODULE_OWNER(dev);
1455
1456         if (register_netdev(dev) != 0) {
1457                 free_netdev(dev);
1458                 dev = NULL;
1459         }
1460         return dev;
1461 }
1462
1463 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1464 static int __init mpt_lan_init (void)
1465 {
1466         struct net_device *dev;
1467         MPT_ADAPTER *p;
1468         int i, j;
1469
1470         show_mptmod_ver(LANAME, LANVER);
1471
1472 #ifdef QLOGIC_NAA_WORKAROUND
1473         /* Init the global r/w lock for the bad_naa list. We want to do this
1474            before any boards are initialized and may be used. */
1475         rwlock_init(&bad_naa_lock);
1476 #endif
1477
1478         if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1479                 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1480                 return -EBUSY;
1481         }
1482
1483         /* Set the callback index to be used by driver core for turbo replies */
1484         mpt_lan_index = LanCtx;
1485
1486         dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1487
1488         if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) {
1489                 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1490         } else {
1491                 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1492                        "handler with mptbase! The world is at an end! "
1493                        "Everything is fading to black! Goodbye.\n");
1494                 return -EBUSY;
1495         }
1496
1497         for (j = 0; j < MPT_MAX_ADAPTERS; j++) {
1498                 mpt_landev[j] = NULL;
1499         }
1500
1501         for (p = mpt_adapter_find_first(); p; p = mpt_adapter_find_next(p)) {
1502                 for (i = 0; i < p->facts.NumberOfPorts; i++) {
1503                         printk (KERN_INFO MYNAM ": %s: PortNum=%x, ProtocolFlags=%02Xh (%c%c%c%c)\n",
1504                                         p->name,
1505                                         p->pfacts[i].PortNumber,
1506                                         p->pfacts[i].ProtocolFlags,
1507                                         MPT_PROTOCOL_FLAGS_c_c_c_c(p->pfacts[i].ProtocolFlags));
1508
1509                         if (!(p->pfacts[i].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
1510                                 printk (KERN_INFO MYNAM ": %s: Hmmm... LAN protocol seems to be disabled on this adapter port!\n",
1511                                                 p->name);
1512                                 continue;
1513                         }
1514
1515                         dev = mpt_register_lan_device (p, i);
1516                         if (!dev) {
1517                                 printk (KERN_ERR MYNAM ": %s: Unable to register port%d as a LAN device\n",
1518                                                 p->name,
1519                                                 p->pfacts[i].PortNumber);
1520                         }
1521                         printk (KERN_INFO MYNAM ": %s: Fusion MPT LAN device registered as '%s'\n",
1522                                         p->name, dev->name);
1523                         printk (KERN_INFO MYNAM ": %s/%s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1524                                         IOC_AND_NETDEV_NAMES_s_s(dev),
1525                                         dev->dev_addr[0], dev->dev_addr[1],
1526                                         dev->dev_addr[2], dev->dev_addr[3],
1527                                         dev->dev_addr[4], dev->dev_addr[5]);
1528 //                                      printk (KERN_INFO MYNAM ": %s/%s: Max_TX_outstanding = %d\n",
1529 //                                                      IOC_AND_NETDEV_NAMES_s_s(dev),
1530 //                                                      NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out);
1531                         j = p->id;
1532                         mpt_landev[j] = dev;
1533                         dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n",
1534                                         dev, j,  mpt_landev[j]));
1535
1536                 }
1537         }
1538
1539         return 0;
1540 }
1541
1542 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1543 static void __exit mpt_lan_exit(void)
1544 {
1545         int i;
1546
1547         mpt_reset_deregister(LanCtx);
1548
1549         for (i = 0; mpt_landev[i] != NULL; i++) {
1550                 struct net_device *dev = mpt_landev[i];
1551
1552                 printk (KERN_INFO ": %s/%s: Fusion MPT LAN device unregistered\n",
1553                                IOC_AND_NETDEV_NAMES_s_s(dev));
1554                 unregister_netdev(dev);
1555                 free_netdev(dev);
1556                 mpt_landev[i] = NULL;
1557         }
1558
1559         if (LanCtx >= 0) {
1560                 mpt_deregister(LanCtx);
1561                 LanCtx = -1;
1562                 mpt_lan_index = 0;
1563         }
1564
1565         /* deregister any send/receive handler structs. I2Oism? */
1566 }
1567
1568 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1569 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,59)
1570 MODULE_PARM(tx_max_out_p, "i");
1571 MODULE_PARM(max_buckets_out, "i"); // Debug stuff. FIXME!
1572 #endif
1573
1574 module_init(mpt_lan_init);
1575 module_exit(mpt_lan_exit);
1576
1577 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1578 static unsigned short
1579 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1580 {
1581         struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1582         struct fcllc *fcllc;
1583
1584         skb->mac.raw = skb->data;
1585         skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1586
1587         if (fch->dtype == htons(0xffff)) {
1588                 u32 *p = (u32 *) fch;
1589
1590                 swab32s(p + 0);
1591                 swab32s(p + 1);
1592                 swab32s(p + 2);
1593                 swab32s(p + 3);
1594
1595                 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1596                                 NETDEV_PTR_TO_IOC_NAME_s(dev));
1597                 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1598                                 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1599                                 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1600         }
1601
1602         if (*fch->daddr & 1) {
1603                 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1604                         skb->pkt_type = PACKET_BROADCAST;
1605                 } else {
1606                         skb->pkt_type = PACKET_MULTICAST;
1607                 }
1608         } else {
1609                 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1610                         skb->pkt_type = PACKET_OTHERHOST;
1611                 } else {
1612                         skb->pkt_type = PACKET_HOST;
1613                 }
1614         }
1615
1616         fcllc = (struct fcllc *)skb->data;
1617
1618 #ifdef QLOGIC_NAA_WORKAROUND
1619 {
1620         u16 source_naa = fch->stype, found = 0;
1621
1622         /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1623            value. */
1624
1625         if ((source_naa & 0xF000) == 0)
1626                 source_naa = swab16(source_naa);
1627
1628         if (fcllc->ethertype == htons(ETH_P_ARP))
1629             dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1630                       "%04x.\n", source_naa));
1631
1632         if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1633            ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
1634                 struct NAA_Hosed *nh, *prevnh;
1635                 int i;
1636
1637                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1638                           "system with non-RFC 2625 NAA value (%04x).\n",
1639                           source_naa));
1640
1641                 write_lock_irq(&bad_naa_lock);
1642                 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1643                      prevnh=nh, nh=nh->next) {
1644                         if ((nh->ieee[0] == fch->saddr[0]) &&
1645                             (nh->ieee[1] == fch->saddr[1]) &&
1646                             (nh->ieee[2] == fch->saddr[2]) &&
1647                             (nh->ieee[3] == fch->saddr[3]) &&
1648                             (nh->ieee[4] == fch->saddr[4]) &&
1649                             (nh->ieee[5] == fch->saddr[5])) {
1650                                 found = 1;
1651                                 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1652                                          "q/Rep w/ bad NAA from system already"
1653                                          " in DB.\n"));
1654                                 break;
1655                         }
1656                 }
1657
1658                 if ((!found) && (nh == NULL)) {
1659
1660                         nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1661                         dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1662                                  " bad NAA from system not yet in DB.\n"));
1663
1664                         if (nh != NULL) {
1665                                 nh->next = NULL;
1666                                 if (!mpt_bad_naa)
1667                                         mpt_bad_naa = nh;
1668                                 if (prevnh)
1669                                         prevnh->next = nh;
1670
1671                                 nh->NAA = source_naa; /* Set the S_NAA value. */
1672                                 for (i = 0; i < FC_ALEN; i++)
1673                                         nh->ieee[i] = fch->saddr[i];
1674                                 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1675                                           "%02x:%02x with non-compliant S_NAA value.\n",
1676                                           fch->saddr[0], fch->saddr[1], fch->saddr[2],
1677                                           fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1678                         } else {
1679                                 printk (KERN_ERR "mptlan/type_trans: Unable to"
1680                                         " kmalloc a NAA_Hosed struct.\n");
1681                         }
1682                 } else if (!found) {
1683                         printk (KERN_ERR "mptlan/type_trans: found not"
1684                                 " set, but nh isn't null. Evil "
1685                                 "funkiness abounds.\n");
1686                 }
1687                 write_unlock_irq(&bad_naa_lock);
1688         }
1689 }
1690 #endif
1691
1692         /* Strip the SNAP header from ARP packets since we don't
1693          * pass them through to the 802.2/SNAP layers.
1694          */
1695         if (fcllc->dsap == EXTENDED_SAP &&
1696                 (fcllc->ethertype == htons(ETH_P_IP) ||
1697                  fcllc->ethertype == htons(ETH_P_ARP))) {
1698                 skb_pull(skb, sizeof(struct fcllc));
1699                 return fcllc->ethertype;
1700         }
1701
1702         return htons(ETH_P_802_2);
1703 }
1704
1705 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/