2 * linux/drivers/message/fusion/mptlan.c
3 * IP Over Fibre Channel device driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
12 * Special thanks goes to the I2O LAN driver people at the
13 * University of Helsinki, who, unbeknownst to them, provided
14 * the inspiration and initial structure for this driver.
16 * A huge debt of gratitude is owed to David S. Miller (DaveM)
17 * for fixing much of the stupid and broken stuff in the early
18 * driver while porting to sparc64 platform. THANK YOU!
20 * A really huge debt of gratitude is owed to Eddie C. Dost
21 * for gobs of hard work fixing and optimizing LAN code.
24 * (see also mptbase.c)
26 * Copyright (c) 2000-2004 LSI Logic Corporation
27 * Originally By: Noah Romer
28 * (mailto:mpt_linux_developer@lsil.com)
30 * $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $
32 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
34 This program is free software; you can redistribute it and/or modify
35 it under the terms of the GNU General Public License as published by
36 the Free Software Foundation; version 2 of the License.
38 This program is distributed in the hope that it will be useful,
39 but WITHOUT ANY WARRANTY; without even the implied warranty of
40 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 GNU General Public License for more details.
44 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
45 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
46 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
47 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
48 solely responsible for determining the appropriateness of using and
49 distributing the Program and assumes all risks associated with its
50 exercise of rights under this Agreement, including but not limited to
51 the risks and costs of program errors, damage to or loss of data,
52 programs or equipment, and unavailability or interruption of operations.
54 DISCLAIMER OF LIABILITY
55 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
58 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
59 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
60 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
61 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
63 You should have received a copy of the GNU General Public License
64 along with this program; if not, write to the Free Software
65 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
70 * Define statements used for debugging
72 //#define MPT_LAN_IO_DEBUG
74 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
77 #include <linux/init.h>
78 #include <linux/module.h>
81 #define MYNAM "mptlan"
83 MODULE_LICENSE("GPL");
85 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
87 * MPT LAN message sizes without variable part.
89 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
90 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
92 #define MPT_LAN_TRANSACTION32_SIZE \
93 (sizeof(SGETransaction32_t) - sizeof(u32))
96 * Fusion MPT LAN private structures
102 struct NAA_Hosed *next;
105 struct BufferControl {
111 struct mpt_lan_priv {
112 MPT_ADAPTER *mpt_dev;
113 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
115 atomic_t buckets_out; /* number of unused buckets on IOC */
116 int bucketthresh; /* Send more when this many left */
118 int *mpt_txfidx; /* Free Tx Context list */
120 spinlock_t txfidx_lock;
122 int *mpt_rxfidx; /* Free Rx Context list */
124 spinlock_t rxfidx_lock;
126 struct BufferControl *RcvCtl; /* Receive BufferControl structs */
127 struct BufferControl *SendCtl; /* Send BufferControl structs */
129 int max_buckets_out; /* Max buckets to send to IOC */
130 int tx_max_out; /* IOC's Tx queue len */
134 struct net_device_stats stats; /* Per device statistics */
136 struct work_struct post_buckets_task;
137 unsigned long post_buckets_active;
140 struct mpt_lan_ohdr {
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
152 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
153 MPT_FRAME_HDR *reply);
154 static int mpt_lan_open(struct net_device *dev);
155 static int mpt_lan_reset(struct net_device *dev);
156 static int mpt_lan_close(struct net_device *dev);
157 static void mpt_lan_post_receive_buckets(void *dev_id);
158 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
160 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
161 static int mpt_lan_receive_post_reply(struct net_device *dev,
162 LANReceivePostReply_t *pRecvRep);
163 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
164 static int mpt_lan_send_reply(struct net_device *dev,
165 LANSendReply_t *pSendRep);
166 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
167 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
168 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
169 struct net_device *dev);
171 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
173 * Fusion MPT LAN private data
175 static int LanCtx = -1;
177 static u32 max_buckets_out = 127;
178 static u32 tx_max_out_p = 127 - 16;
180 static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1];
182 #ifdef QLOGIC_NAA_WORKAROUND
183 static struct NAA_Hosed *mpt_bad_naa = NULL;
184 rwlock_t bad_naa_lock;
187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
189 * Fusion MPT LAN external data
191 extern int mpt_lan_index;
193 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
195 * lan_reply - Handle all data sent from the hardware.
196 * @ioc: Pointer to MPT_ADAPTER structure
197 * @mf: Pointer to original MPT request frame (NULL if TurboReply)
198 * @reply: Pointer to MPT reply frame
200 * Returns 1 indicating original alloc'd request frame ptr
201 * should be freed, or 0 if it shouldn't.
204 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
206 struct net_device *dev = mpt_landev[ioc->id];
207 int FreeReqFrame = 0;
209 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
210 IOC_AND_NETDEV_NAMES_s_s(dev)));
212 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
216 u32 tmsg = CAST_PTR_TO_U32(reply);
218 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
219 IOC_AND_NETDEV_NAMES_s_s(dev),
222 switch (GET_LAN_FORM(tmsg)) {
224 // NOTE! (Optimization) First case here is now caught in
225 // mptbase.c::mpt_interrupt() routine and callcack here
226 // is now skipped for this case! 20001218 -sralston
228 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
229 // dioprintk((KERN_INFO MYNAM "/lan_reply: "
230 // "MessageContext turbo reply received\n"));
235 case LAN_REPLY_FORM_SEND_SINGLE:
236 // dioprintk((MYNAM "/lan_reply: "
237 // "calling mpt_lan_send_reply (turbo)\n"));
239 // Potential BUG here? -sralston
240 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
241 // If/when mpt_lan_send_turbo would return 1 here,
242 // calling routine (mptbase.c|mpt_interrupt)
243 // would Oops because mf has already been set
244 // to NULL. So after return from this func,
245 // mpt_interrupt() will attempt to put (NULL) mf ptr
246 // item back onto its adapter FreeQ - Oops!:-(
247 // It's Ok, since mpt_lan_send_turbo() *currently*
248 // always returns 0, but..., just in case:
250 (void) mpt_lan_send_turbo(dev, tmsg);
255 case LAN_REPLY_FORM_RECEIVE_SINGLE:
256 // dioprintk((KERN_INFO MYNAM "@lan_reply: "
257 // "rcv-Turbo = %08x\n", tmsg));
258 mpt_lan_receive_post_turbo(dev, tmsg);
262 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
263 "that I don't know what to do with\n");
265 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
273 // msg = (u32 *) reply;
274 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
275 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
276 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
277 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
278 // reply->u.hdr.Function));
280 switch (reply->u.hdr.Function) {
282 case MPI_FUNCTION_LAN_SEND:
284 LANSendReply_t *pSendRep;
286 pSendRep = (LANSendReply_t *) reply;
287 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
291 case MPI_FUNCTION_LAN_RECEIVE:
293 LANReceivePostReply_t *pRecvRep;
295 pRecvRep = (LANReceivePostReply_t *) reply;
296 if (pRecvRep->NumberOfContexts) {
297 mpt_lan_receive_post_reply(dev, pRecvRep);
298 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
301 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
302 "ReceivePostReply received.\n"));
306 case MPI_FUNCTION_LAN_RESET:
307 /* Just a default reply. Might want to check it to
308 * make sure that everything went ok.
313 case MPI_FUNCTION_EVENT_NOTIFICATION:
314 case MPI_FUNCTION_EVENT_ACK:
315 /* UPDATE! 20010120 -sralston
316 * _EVENT_NOTIFICATION should NOT come down this path any more.
317 * Should be routed to mpt_lan_event_process(), but just in case...
323 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
324 "reply that I don't know what to do with\n");
326 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
335 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
337 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
339 struct net_device *dev = mpt_landev[ioc->id];
340 struct mpt_lan_priv *priv = netdev_priv(dev);
342 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
343 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
344 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
346 if (priv->mpt_rxfidx == NULL)
349 if (reset_phase == MPT_IOC_SETUP_RESET) {
351 } else if (reset_phase == MPT_IOC_PRE_RESET) {
355 netif_stop_queue(dev);
357 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
359 atomic_set(&priv->buckets_out, 0);
361 /* Reset Rx Free Tail index and re-populate the queue. */
362 spin_lock_irqsave(&priv->rxfidx_lock, flags);
363 priv->mpt_rxfidx_tail = -1;
364 for (i = 0; i < priv->max_buckets_out; i++)
365 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
366 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
368 mpt_lan_post_receive_buckets(dev);
369 netif_wake_queue(dev);
375 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
377 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
379 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
381 switch (le32_to_cpu(pEvReply->Event)) {
382 case MPI_EVENT_NONE: /* 00 */
383 case MPI_EVENT_LOG_DATA: /* 01 */
384 case MPI_EVENT_STATE_CHANGE: /* 02 */
385 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
386 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
387 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
388 case MPI_EVENT_RESCAN: /* 06 */
389 /* Ok, do we need to do anything here? As far as
390 I can tell, this is when a new device gets added
392 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
393 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
394 case MPI_EVENT_LOGOUT: /* 09 */
395 case MPI_EVENT_EVENT_CHANGE: /* 0A */
401 * NOTE: pEvent->AckRequired handling now done in mptbase.c;
402 * Do NOT do it here now!
408 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
410 mpt_lan_open(struct net_device *dev)
412 struct mpt_lan_priv *priv = netdev_priv(dev);
415 if (mpt_lan_reset(dev) != 0) {
416 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
418 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
421 printk ("The ioc is active. Perhaps it needs to be"
424 printk ("The ioc in inactive, most likely in the "
425 "process of being reset. Please try again in "
429 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
430 if (priv->mpt_txfidx == NULL)
432 priv->mpt_txfidx_tail = -1;
434 priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
436 if (priv->SendCtl == NULL)
438 for (i = 0; i < priv->tx_max_out; i++) {
439 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
440 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
443 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
445 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
447 if (priv->mpt_rxfidx == NULL)
449 priv->mpt_rxfidx_tail = -1;
451 priv->RcvCtl = kmalloc(priv->max_buckets_out *
452 sizeof(struct BufferControl),
454 if (priv->RcvCtl == NULL)
456 for (i = 0; i < priv->max_buckets_out; i++) {
457 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
458 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
461 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
462 /**/ for (i = 0; i < priv->tx_max_out; i++)
463 /**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
464 /**/ dlprintk(("\n"));
466 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
468 mpt_lan_post_receive_buckets(dev);
469 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
470 IOC_AND_NETDEV_NAMES_s_s(dev));
472 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
473 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
474 " Notifications. This is a bad thing! We're not going "
475 "to go ahead, but I'd be leery of system stability at "
479 netif_start_queue(dev);
480 dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
484 kfree(priv->mpt_rxfidx);
485 priv->mpt_rxfidx = NULL;
487 kfree(priv->SendCtl);
488 priv->SendCtl = NULL;
490 kfree(priv->mpt_txfidx);
491 priv->mpt_txfidx = NULL;
495 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
496 /* Send a LanReset message to the FW. This should result in the FW returning
497 any buckets it still has. */
499 mpt_lan_reset(struct net_device *dev)
502 LANResetRequest_t *pResetReq;
503 struct mpt_lan_priv *priv = netdev_priv(dev);
505 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
508 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
509 "Unable to allocate a request frame.\n"));
514 pResetReq = (LANResetRequest_t *) mf;
516 pResetReq->Function = MPI_FUNCTION_LAN_RESET;
517 pResetReq->ChainOffset = 0;
518 pResetReq->Reserved = 0;
519 pResetReq->PortNumber = priv->pnum;
520 pResetReq->MsgFlags = 0;
521 pResetReq->Reserved2 = 0;
523 mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
528 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
530 mpt_lan_close(struct net_device *dev)
532 struct mpt_lan_priv *priv = netdev_priv(dev);
533 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
534 unsigned int timeout;
537 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
539 mpt_event_deregister(LanCtx);
541 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
542 "since driver was loaded, %d still out\n",
543 priv->total_posted,atomic_read(&priv->buckets_out)));
545 netif_stop_queue(dev);
550 while (atomic_read(&priv->buckets_out) && --timeout) {
551 set_current_state(TASK_INTERRUPTIBLE);
555 for (i = 0; i < priv->max_buckets_out; i++) {
556 if (priv->RcvCtl[i].skb != NULL) {
557 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
558 /**/ "is still out\n", i));
559 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
562 dev_kfree_skb(priv->RcvCtl[i].skb);
566 kfree (priv->RcvCtl);
567 kfree (priv->mpt_rxfidx);
569 for (i = 0; i < priv->tx_max_out; i++) {
570 if (priv->SendCtl[i].skb != NULL) {
571 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
572 priv->SendCtl[i].len,
574 dev_kfree_skb(priv->SendCtl[i].skb);
578 kfree(priv->SendCtl);
579 kfree(priv->mpt_txfidx);
581 atomic_set(&priv->buckets_out, 0);
583 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
584 IOC_AND_NETDEV_NAMES_s_s(dev));
589 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
590 static struct net_device_stats *
591 mpt_lan_get_stats(struct net_device *dev)
593 struct mpt_lan_priv *priv = netdev_priv(dev);
595 return (struct net_device_stats *) &priv->stats;
598 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
600 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
602 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
608 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
609 /* Tx timeout handler. */
611 mpt_lan_tx_timeout(struct net_device *dev)
613 struct mpt_lan_priv *priv = netdev_priv(dev);
614 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
616 if (mpt_dev->active) {
617 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
618 netif_wake_queue(dev);
622 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
625 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
627 struct mpt_lan_priv *priv = netdev_priv(dev);
628 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
629 struct sk_buff *sent;
633 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
634 sent = priv->SendCtl[ctx].skb;
636 priv->stats.tx_packets++;
637 priv->stats.tx_bytes += sent->len;
639 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
640 IOC_AND_NETDEV_NAMES_s_s(dev),
641 __FUNCTION__, sent));
643 priv->SendCtl[ctx].skb = NULL;
644 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
645 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
646 dev_kfree_skb_irq(sent);
648 spin_lock_irqsave(&priv->txfidx_lock, flags);
649 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
650 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
652 netif_wake_queue(dev);
656 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
658 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
660 struct mpt_lan_priv *priv = netdev_priv(dev);
661 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
662 struct sk_buff *sent;
664 int FreeReqFrame = 0;
669 count = pSendRep->NumberOfContexts;
671 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
672 le16_to_cpu(pSendRep->IOCStatus)));
674 /* Add check for Loginfo Flag in IOCStatus */
676 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
677 case MPI_IOCSTATUS_SUCCESS:
678 priv->stats.tx_packets += count;
681 case MPI_IOCSTATUS_LAN_CANCELED:
682 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
685 case MPI_IOCSTATUS_INVALID_SGL:
686 priv->stats.tx_errors += count;
687 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
688 IOC_AND_NETDEV_NAMES_s_s(dev));
692 priv->stats.tx_errors += count;
696 pContext = &pSendRep->BufferContext;
698 spin_lock_irqsave(&priv->txfidx_lock, flags);
700 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
702 sent = priv->SendCtl[ctx].skb;
703 priv->stats.tx_bytes += sent->len;
705 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
706 IOC_AND_NETDEV_NAMES_s_s(dev),
707 __FUNCTION__, sent));
709 priv->SendCtl[ctx].skb = NULL;
710 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
711 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
712 dev_kfree_skb_irq(sent);
714 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
719 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
722 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
725 netif_wake_queue(dev);
729 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
731 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
733 struct mpt_lan_priv *priv = netdev_priv(dev);
734 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
736 LANSendRequest_t *pSendReq;
737 SGETransaction32_t *pTrans;
738 SGESimple64_t *pSimple;
742 u16 cur_naa = 0x1000;
744 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
747 spin_lock_irqsave(&priv->txfidx_lock, flags);
748 if (priv->mpt_txfidx_tail < 0) {
749 netif_stop_queue(dev);
750 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
752 printk (KERN_ERR "%s: no tx context available: %u\n",
753 __FUNCTION__, priv->mpt_txfidx_tail);
757 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
759 netif_stop_queue(dev);
760 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
762 printk (KERN_ERR "%s: Unable to alloc request frame\n",
767 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
768 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
770 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
771 // IOC_AND_NETDEV_NAMES_s_s(dev)));
773 pSendReq = (LANSendRequest_t *) mf;
775 /* Set the mac.raw pointer, since this apparently isn't getting
776 * done before we get the skb. Pull the data pointer past the mac data.
778 skb->mac.raw = skb->data;
781 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
784 priv->SendCtl[ctx].skb = skb;
785 priv->SendCtl[ctx].dma = dma;
786 priv->SendCtl[ctx].len = skb->len;
789 pSendReq->Reserved = 0;
790 pSendReq->Function = MPI_FUNCTION_LAN_SEND;
791 pSendReq->ChainOffset = 0;
792 pSendReq->Reserved2 = 0;
793 pSendReq->MsgFlags = 0;
794 pSendReq->PortNumber = priv->pnum;
796 /* Transaction Context Element */
797 pTrans = (SGETransaction32_t *) pSendReq->SG_List;
799 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
800 pTrans->ContextSize = sizeof(u32);
801 pTrans->DetailsLength = 2 * sizeof(u32);
803 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
805 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
806 // IOC_AND_NETDEV_NAMES_s_s(dev),
807 // ctx, skb, skb->data));
809 #ifdef QLOGIC_NAA_WORKAROUND
811 struct NAA_Hosed *nh;
813 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
814 RFC 2625. The longer I look at this, the more my opinion of Qlogic
816 read_lock_irq(&bad_naa_lock);
817 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
818 if ((nh->ieee[0] == skb->mac.raw[0]) &&
819 (nh->ieee[1] == skb->mac.raw[1]) &&
820 (nh->ieee[2] == skb->mac.raw[2]) &&
821 (nh->ieee[3] == skb->mac.raw[3]) &&
822 (nh->ieee[4] == skb->mac.raw[4]) &&
823 (nh->ieee[5] == skb->mac.raw[5])) {
825 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
826 "= %04x.\n", cur_naa));
830 read_unlock_irq(&bad_naa_lock);
834 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
835 (skb->mac.raw[0] << 8) |
836 (skb->mac.raw[1] << 0));
837 pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
838 (skb->mac.raw[3] << 16) |
839 (skb->mac.raw[4] << 8) |
840 (skb->mac.raw[5] << 0));
842 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
844 /* If we ever decide to send more than one Simple SGE per LANSend, then
845 we will need to make sure that LAST_ELEMENT only gets set on the
846 last one. Otherwise, bad voodoo and evil funkiness will commence. */
847 pSimple->FlagsLength = cpu_to_le32(
848 ((MPI_SGE_FLAGS_LAST_ELEMENT |
849 MPI_SGE_FLAGS_END_OF_BUFFER |
850 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
851 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
852 MPI_SGE_FLAGS_HOST_TO_IOC |
853 MPI_SGE_FLAGS_64_BIT_ADDRESSING |
854 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
856 pSimple->Address.Low = cpu_to_le32((u32) dma);
857 if (sizeof(dma_addr_t) > sizeof(u32))
858 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
860 pSimple->Address.High = 0;
862 mpt_put_msg_frame (LanCtx, mpt_dev, mf);
863 dev->trans_start = jiffies;
865 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
866 IOC_AND_NETDEV_NAMES_s_s(dev),
867 le32_to_cpu(pSimple->FlagsLength)));
872 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
874 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
876 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
879 struct mpt_lan_priv *priv = dev->priv;
881 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
883 schedule_work(&priv->post_buckets_task);
885 schedule_delayed_work(&priv->post_buckets_task, 1);
886 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
889 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
890 IOC_AND_NETDEV_NAMES_s_s(dev) ));
894 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
896 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
898 struct mpt_lan_priv *priv = dev->priv;
900 skb->protocol = mpt_lan_type_trans(skb, dev);
902 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
903 "delivered to upper level.\n",
904 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
906 priv->stats.rx_bytes += skb->len;
907 priv->stats.rx_packets++;
912 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
913 atomic_read(&priv->buckets_out)));
915 if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
916 mpt_lan_wake_post_buckets_task(dev, 1);
918 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
919 "remaining, %d received back since sod\n",
920 atomic_read(&priv->buckets_out), priv->total_received));
925 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
928 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
930 struct mpt_lan_priv *priv = dev->priv;
931 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
932 struct sk_buff *skb, *old_skb;
936 ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
937 skb = priv->RcvCtl[ctx].skb;
939 len = GET_LAN_PACKET_LENGTH(tmsg);
941 if (len < MPT_LAN_RX_COPYBREAK) {
944 skb = (struct sk_buff *)dev_alloc_skb(len);
946 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
947 IOC_AND_NETDEV_NAMES_s_s(dev),
952 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
953 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
955 memcpy(skb_put(skb, len), old_skb->data, len);
957 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
958 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
964 priv->RcvCtl[ctx].skb = NULL;
966 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
967 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
970 spin_lock_irqsave(&priv->rxfidx_lock, flags);
971 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
972 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
974 atomic_dec(&priv->buckets_out);
975 priv->total_received++;
977 return mpt_lan_receive_skb(dev, skb);
980 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
982 mpt_lan_receive_post_free(struct net_device *dev,
983 LANReceivePostReply_t *pRecvRep)
985 struct mpt_lan_priv *priv = dev->priv;
986 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
993 count = pRecvRep->NumberOfContexts;
995 /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
996 "IOC returned %d buckets, freeing them...\n", count));
998 spin_lock_irqsave(&priv->rxfidx_lock, flags);
999 for (i = 0; i < count; i++) {
1000 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1002 skb = priv->RcvCtl[ctx].skb;
1004 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
1005 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1006 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
1007 // priv, &(priv->buckets_out)));
1008 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
1010 priv->RcvCtl[ctx].skb = NULL;
1011 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1012 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1013 dev_kfree_skb_any(skb);
1015 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1017 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1019 atomic_sub(count, &priv->buckets_out);
1021 // for (i = 0; i < priv->max_buckets_out; i++)
1022 // if (priv->RcvCtl[i].skb != NULL)
1023 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1024 // "is still out\n", i));
1026 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1029 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1030 /**/ "remaining, %d received back since sod.\n",
1031 /**/ atomic_read(&priv->buckets_out), priv->total_received));
1035 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1037 mpt_lan_receive_post_reply(struct net_device *dev,
1038 LANReceivePostReply_t *pRecvRep)
1040 struct mpt_lan_priv *priv = dev->priv;
1041 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1042 struct sk_buff *skb, *old_skb;
1043 unsigned long flags;
1044 u32 len, ctx, offset;
1045 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1049 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1050 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1051 le16_to_cpu(pRecvRep->IOCStatus)));
1053 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1054 MPI_IOCSTATUS_LAN_CANCELED)
1055 return mpt_lan_receive_post_free(dev, pRecvRep);
1057 len = le32_to_cpu(pRecvRep->PacketLength);
1059 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1060 "ReceivePostReply w/ PacketLength zero!\n",
1061 IOC_AND_NETDEV_NAMES_s_s(dev));
1062 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1063 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1067 ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
1068 count = pRecvRep->NumberOfContexts;
1069 skb = priv->RcvCtl[ctx].skb;
1071 offset = le32_to_cpu(pRecvRep->PacketOffset);
1072 // if (offset != 0) {
1073 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1074 // "w/ PacketOffset %u\n",
1075 // IOC_AND_NETDEV_NAMES_s_s(dev),
1079 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1080 IOC_AND_NETDEV_NAMES_s_s(dev),
1086 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1087 // "for single packet, concatenating...\n",
1088 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1090 skb = (struct sk_buff *)dev_alloc_skb(len);
1092 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1093 IOC_AND_NETDEV_NAMES_s_s(dev),
1094 __FILE__, __LINE__);
1098 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1099 for (i = 0; i < count; i++) {
1101 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1102 old_skb = priv->RcvCtl[ctx].skb;
1104 l = priv->RcvCtl[ctx].len;
1108 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1109 // IOC_AND_NETDEV_NAMES_s_s(dev),
1112 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1113 priv->RcvCtl[ctx].dma,
1114 priv->RcvCtl[ctx].len,
1115 PCI_DMA_FROMDEVICE);
1116 memcpy(skb_put(skb, l), old_skb->data, l);
1118 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1119 priv->RcvCtl[ctx].dma,
1120 priv->RcvCtl[ctx].len,
1121 PCI_DMA_FROMDEVICE);
1123 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1126 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1128 } else if (len < MPT_LAN_RX_COPYBREAK) {
1132 skb = (struct sk_buff *)dev_alloc_skb(len);
1134 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1135 IOC_AND_NETDEV_NAMES_s_s(dev),
1136 __FILE__, __LINE__);
1140 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1141 priv->RcvCtl[ctx].dma,
1142 priv->RcvCtl[ctx].len,
1143 PCI_DMA_FROMDEVICE);
1145 memcpy(skb_put(skb, len), old_skb->data, len);
1147 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1148 priv->RcvCtl[ctx].dma,
1149 priv->RcvCtl[ctx].len,
1150 PCI_DMA_FROMDEVICE);
1152 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1153 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1154 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1157 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1159 priv->RcvCtl[ctx].skb = NULL;
1161 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1162 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1163 priv->RcvCtl[ctx].dma = 0;
1165 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1166 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1171 atomic_sub(count, &priv->buckets_out);
1172 priv->total_received += count;
1174 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1175 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1176 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1177 IOC_AND_NETDEV_NAMES_s_s(dev),
1178 priv->mpt_rxfidx_tail,
1179 MPT_LAN_MAX_BUCKETS_OUT);
1181 panic("Damn it Jim! I'm a doctor, not a programmer! "
1182 "Oh, wait a sec, I am a programmer. "
1183 "And, who's Jim?!?!\n"
1184 "Arrgghh! We've done it again!\n");
1188 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1189 "(priv->buckets_out = %d)\n",
1190 IOC_AND_NETDEV_NAMES_s_s(dev),
1191 atomic_read(&priv->buckets_out));
1192 else if (remaining < 10)
1193 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1194 "(priv->buckets_out = %d)\n",
1195 IOC_AND_NETDEV_NAMES_s_s(dev),
1196 remaining, atomic_read(&priv->buckets_out));
1198 if ((remaining < priv->bucketthresh) &&
1199 ((atomic_read(&priv->buckets_out) - remaining) >
1200 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1202 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1203 "buckets_out count and fw's BucketsRemaining "
1204 "count has crossed the threshold, issuing a "
1205 "LanReset to clear the fw's hashtable. You may "
1206 "want to check your /var/log/messages for \"CRC "
1207 "error\" event notifications.\n");
1210 mpt_lan_wake_post_buckets_task(dev, 0);
1213 return mpt_lan_receive_skb(dev, skb);
1216 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1217 /* Simple SGE's only at the moment */
1220 mpt_lan_post_receive_buckets(void *dev_id)
1222 struct net_device *dev = dev_id;
1223 struct mpt_lan_priv *priv = dev->priv;
1224 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1226 LANReceivePostRequest_t *pRecvReq;
1227 SGETransaction32_t *pTrans;
1228 SGESimple64_t *pSimple;
1229 struct sk_buff *skb;
1231 u32 curr, buckets, count, max;
1232 u32 len = (dev->mtu + dev->hard_header_len + 4);
1233 unsigned long flags;
1236 curr = atomic_read(&priv->buckets_out);
1237 buckets = (priv->max_buckets_out - curr);
1239 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1240 IOC_AND_NETDEV_NAMES_s_s(dev),
1241 __FUNCTION__, buckets, curr));
1243 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1244 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1247 mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1249 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1251 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1252 __FUNCTION__, buckets));
1255 pRecvReq = (LANReceivePostRequest_t *) mf;
1261 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1262 pRecvReq->ChainOffset = 0;
1263 pRecvReq->MsgFlags = 0;
1264 pRecvReq->PortNumber = priv->pnum;
1266 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1269 for (i = 0; i < count; i++) {
1272 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1273 if (priv->mpt_rxfidx_tail < 0) {
1274 printk (KERN_ERR "%s: Can't alloc context\n",
1276 spin_unlock_irqrestore(&priv->rxfidx_lock,
1281 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1283 skb = priv->RcvCtl[ctx].skb;
1284 if (skb && (priv->RcvCtl[ctx].len != len)) {
1285 pci_unmap_single(mpt_dev->pcidev,
1286 priv->RcvCtl[ctx].dma,
1287 priv->RcvCtl[ctx].len,
1288 PCI_DMA_FROMDEVICE);
1289 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1290 skb = priv->RcvCtl[ctx].skb = NULL;
1294 skb = dev_alloc_skb(len);
1296 printk (KERN_WARNING
1297 MYNAM "/%s: Can't alloc skb\n",
1299 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1300 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1304 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1305 len, PCI_DMA_FROMDEVICE);
1307 priv->RcvCtl[ctx].skb = skb;
1308 priv->RcvCtl[ctx].dma = dma;
1309 priv->RcvCtl[ctx].len = len;
1312 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1314 pTrans->ContextSize = sizeof(u32);
1315 pTrans->DetailsLength = 0;
1317 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1319 pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1321 pSimple->FlagsLength = cpu_to_le32(
1322 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1323 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1324 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1325 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1326 if (sizeof(dma_addr_t) > sizeof(u32))
1327 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1329 pSimple->Address.High = 0;
1331 pTrans = (SGETransaction32_t *) (pSimple + 1);
1334 if (pSimple == NULL) {
1335 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1337 mpt_free_msg_frame(LanCtx, mpt_dev, mf);
1341 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1343 pRecvReq->BucketCount = cpu_to_le32(i);
1345 /* printk(KERN_INFO MYNAM ": posting buckets\n ");
1346 * for (i = 0; i < j + 2; i ++)
1347 * printk (" %08x", le32_to_cpu(msg[i]));
1351 mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1353 priv->total_posted += i;
1355 atomic_add(i, &priv->buckets_out);
1359 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1360 __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1361 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1362 __FUNCTION__, priv->total_posted, priv->total_received));
1364 clear_bit(0, &priv->post_buckets_active);
1367 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1368 static struct net_device *
1369 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1371 struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1372 struct mpt_lan_priv *priv = NULL;
1373 u8 HWaddr[FC_ALEN], *a;
1378 dev->mtu = MPT_LAN_MTU;
1380 priv = netdev_priv(dev);
1382 priv->mpt_dev = mpt_dev;
1385 memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
1386 INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1387 priv->post_buckets_active = 0;
1389 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1390 __LINE__, dev->mtu + dev->hard_header_len + 4));
1392 atomic_set(&priv->buckets_out, 0);
1393 priv->total_posted = 0;
1394 priv->total_received = 0;
1395 priv->max_buckets_out = max_buckets_out;
1396 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1397 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1399 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1401 mpt_dev->pfacts[0].MaxLanBuckets,
1403 priv->max_buckets_out));
1405 priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1406 priv->txfidx_lock = SPIN_LOCK_UNLOCKED;
1407 priv->rxfidx_lock = SPIN_LOCK_UNLOCKED;
1409 memset(&priv->stats, 0, sizeof(priv->stats));
1411 /* Grab pre-fetched LANPage1 stuff. :-) */
1412 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1421 dev->addr_len = FC_ALEN;
1422 memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1423 memset(dev->broadcast, 0xff, FC_ALEN);
1425 /* The Tx queue is 127 deep on the 909.
1426 * Give ourselves some breathing room.
1428 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1429 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1431 dev->open = mpt_lan_open;
1432 dev->stop = mpt_lan_close;
1433 dev->get_stats = mpt_lan_get_stats;
1434 dev->set_multicast_list = NULL;
1435 dev->change_mtu = mpt_lan_change_mtu;
1436 dev->hard_start_xmit = mpt_lan_sdu_send;
1438 /* Not in 2.3.42. Need 2.3.45+ */
1439 dev->tx_timeout = mpt_lan_tx_timeout;
1440 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1442 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1443 "and setting initial values\n"));
1445 SET_MODULE_OWNER(dev);
1447 if (register_netdev(dev) != 0) {
1454 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1455 static int __init mpt_lan_init (void)
1457 struct net_device *dev;
1461 show_mptmod_ver(LANAME, LANVER);
1463 #ifdef QLOGIC_NAA_WORKAROUND
1464 /* Init the global r/w lock for the bad_naa list. We want to do this
1465 before any boards are initialized and may be used. */
1466 rwlock_init(&bad_naa_lock);
1469 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1470 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1474 /* Set the callback index to be used by driver core for turbo replies */
1475 mpt_lan_index = LanCtx;
1477 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1479 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) {
1480 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1482 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1483 "handler with mptbase! The world is at an end! "
1484 "Everything is fading to black! Goodbye.\n");
1488 for (j = 0; j < MPT_MAX_ADAPTERS; j++) {
1489 mpt_landev[j] = NULL;
1492 list_for_each_entry(p, &ioc_list, list) {
1493 for (i = 0; i < p->facts.NumberOfPorts; i++) {
1494 printk (KERN_INFO MYNAM ": %s: PortNum=%x, ProtocolFlags=%02Xh (%c%c%c%c)\n",
1496 p->pfacts[i].PortNumber,
1497 p->pfacts[i].ProtocolFlags,
1498 MPT_PROTOCOL_FLAGS_c_c_c_c(p->pfacts[i].ProtocolFlags));
1500 if (!(p->pfacts[i].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
1501 printk (KERN_INFO MYNAM ": %s: Hmmm... LAN protocol seems to be disabled on this adapter port!\n",
1506 dev = mpt_register_lan_device (p, i);
1508 printk (KERN_ERR MYNAM ": %s: Unable to register port%d as a LAN device\n",
1510 p->pfacts[i].PortNumber);
1512 printk (KERN_INFO MYNAM ": %s: Fusion MPT LAN device registered as '%s'\n",
1513 p->name, dev->name);
1514 printk (KERN_INFO MYNAM ": %s/%s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1515 IOC_AND_NETDEV_NAMES_s_s(dev),
1516 dev->dev_addr[0], dev->dev_addr[1],
1517 dev->dev_addr[2], dev->dev_addr[3],
1518 dev->dev_addr[4], dev->dev_addr[5]);
1519 // printk (KERN_INFO MYNAM ": %s/%s: Max_TX_outstanding = %d\n",
1520 // IOC_AND_NETDEV_NAMES_s_s(dev),
1521 // NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out);
1523 mpt_landev[j] = dev;
1524 dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n",
1525 dev, j, mpt_landev[j]));
1533 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1534 static void __exit mpt_lan_exit(void)
1538 mpt_reset_deregister(LanCtx);
1540 for (i = 0; mpt_landev[i] != NULL; i++) {
1541 struct net_device *dev = mpt_landev[i];
1543 printk (KERN_INFO ": %s/%s: Fusion MPT LAN device unregistered\n",
1544 IOC_AND_NETDEV_NAMES_s_s(dev));
1545 unregister_netdev(dev);
1547 mpt_landev[i] = NULL;
1551 mpt_deregister(LanCtx);
1556 /* deregister any send/receive handler structs. I2Oism? */
1559 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1561 module_init(mpt_lan_init);
1562 module_exit(mpt_lan_exit);
1564 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1565 static unsigned short
1566 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1568 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1569 struct fcllc *fcllc;
1571 skb->mac.raw = skb->data;
1572 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1574 if (fch->dtype == htons(0xffff)) {
1575 u32 *p = (u32 *) fch;
1582 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1583 NETDEV_PTR_TO_IOC_NAME_s(dev));
1584 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1585 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1586 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1589 if (*fch->daddr & 1) {
1590 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1591 skb->pkt_type = PACKET_BROADCAST;
1593 skb->pkt_type = PACKET_MULTICAST;
1596 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1597 skb->pkt_type = PACKET_OTHERHOST;
1599 skb->pkt_type = PACKET_HOST;
1603 fcllc = (struct fcllc *)skb->data;
1605 #ifdef QLOGIC_NAA_WORKAROUND
1607 u16 source_naa = fch->stype, found = 0;
1609 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1612 if ((source_naa & 0xF000) == 0)
1613 source_naa = swab16(source_naa);
1615 if (fcllc->ethertype == htons(ETH_P_ARP))
1616 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1617 "%04x.\n", source_naa));
1619 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1620 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1621 struct NAA_Hosed *nh, *prevnh;
1624 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1625 "system with non-RFC 2625 NAA value (%04x).\n",
1628 write_lock_irq(&bad_naa_lock);
1629 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1630 prevnh=nh, nh=nh->next) {
1631 if ((nh->ieee[0] == fch->saddr[0]) &&
1632 (nh->ieee[1] == fch->saddr[1]) &&
1633 (nh->ieee[2] == fch->saddr[2]) &&
1634 (nh->ieee[3] == fch->saddr[3]) &&
1635 (nh->ieee[4] == fch->saddr[4]) &&
1636 (nh->ieee[5] == fch->saddr[5])) {
1638 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1639 "q/Rep w/ bad NAA from system already"
1645 if ((!found) && (nh == NULL)) {
1647 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1648 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1649 " bad NAA from system not yet in DB.\n"));
1658 nh->NAA = source_naa; /* Set the S_NAA value. */
1659 for (i = 0; i < FC_ALEN; i++)
1660 nh->ieee[i] = fch->saddr[i];
1661 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1662 "%02x:%02x with non-compliant S_NAA value.\n",
1663 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1664 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1666 printk (KERN_ERR "mptlan/type_trans: Unable to"
1667 " kmalloc a NAA_Hosed struct.\n");
1669 } else if (!found) {
1670 printk (KERN_ERR "mptlan/type_trans: found not"
1671 " set, but nh isn't null. Evil "
1672 "funkiness abounds.\n");
1674 write_unlock_irq(&bad_naa_lock);
1679 /* Strip the SNAP header from ARP packets since we don't
1680 * pass them through to the 802.2/SNAP layers.
1682 if (fcllc->dsap == EXTENDED_SAP &&
1683 (fcllc->ethertype == htons(ETH_P_IP) ||
1684 fcllc->ethertype == htons(ETH_P_ARP))) {
1685 skb_pull(skb, sizeof(struct fcllc));
1686 return fcllc->ethertype;
1689 return htons(ETH_P_802_2);
1692 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/