2 * linux/drivers/message/fusion/mptlan.c
3 * IP Over Fibre Channel device driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
12 * Special thanks goes to the I2O LAN driver people at the
13 * University of Helsinki, who, unbeknownst to them, provided
14 * the inspiration and initial structure for this driver.
16 * A huge debt of gratitude is owed to David S. Miller (DaveM)
17 * for fixing much of the stupid and broken stuff in the early
18 * driver while porting to sparc64 platform. THANK YOU!
20 * A really huge debt of gratitude is owed to Eddie C. Dost
21 * for gobs of hard work fixing and optimizing LAN code.
24 * (see also mptbase.c)
26 * Copyright (c) 2000-2004 LSI Logic Corporation
27 * Originally By: Noah Romer
28 * (mailto:mpt_linux_developer@lsil.com)
30 * $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $
32 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
34 This program is free software; you can redistribute it and/or modify
35 it under the terms of the GNU General Public License as published by
36 the Free Software Foundation; version 2 of the License.
38 This program is distributed in the hope that it will be useful,
39 but WITHOUT ANY WARRANTY; without even the implied warranty of
40 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 GNU General Public License for more details.
44 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
45 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
46 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
47 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
48 solely responsible for determining the appropriateness of using and
49 distributing the Program and assumes all risks associated with its
50 exercise of rights under this Agreement, including but not limited to
51 the risks and costs of program errors, damage to or loss of data,
52 programs or equipment, and unavailability or interruption of operations.
54 DISCLAIMER OF LIABILITY
55 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
56 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
58 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
59 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
60 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
61 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
63 You should have received a copy of the GNU General Public License
64 along with this program; if not, write to the Free Software
65 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
70 * Define statements used for debugging
72 //#define MPT_LAN_IO_DEBUG
74 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
77 #include <linux/init.h>
78 #include <linux/module.h>
81 #define MYNAM "mptlan"
83 MODULE_LICENSE("GPL");
85 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
87 * MPT LAN message sizes without variable part.
89 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
90 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
92 #define MPT_LAN_TRANSACTION32_SIZE \
93 (sizeof(SGETransaction32_t) - sizeof(u32))
96 * Fusion MPT LAN private structures
102 struct NAA_Hosed *next;
105 struct BufferControl {
111 struct mpt_lan_priv {
112 MPT_ADAPTER *mpt_dev;
113 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
115 atomic_t buckets_out; /* number of unused buckets on IOC */
116 int bucketthresh; /* Send more when this many left */
118 int *mpt_txfidx; /* Free Tx Context list */
120 spinlock_t txfidx_lock;
122 int *mpt_rxfidx; /* Free Rx Context list */
124 spinlock_t rxfidx_lock;
126 struct BufferControl *RcvCtl; /* Receive BufferControl structs */
127 struct BufferControl *SendCtl; /* Send BufferControl structs */
129 int max_buckets_out; /* Max buckets to send to IOC */
130 int tx_max_out; /* IOC's Tx queue len */
134 struct net_device_stats stats; /* Per device statistics */
136 struct mpt_work_struct post_buckets_task;
137 unsigned long post_buckets_active;
140 struct mpt_lan_ohdr {
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
152 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
153 MPT_FRAME_HDR *reply);
154 static int mpt_lan_open(struct net_device *dev);
155 static int mpt_lan_reset(struct net_device *dev);
156 static int mpt_lan_close(struct net_device *dev);
157 static void mpt_lan_post_receive_buckets(void *dev_id);
158 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
160 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
161 static int mpt_lan_receive_post_reply(struct net_device *dev,
162 LANReceivePostReply_t *pRecvRep);
163 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
164 static int mpt_lan_send_reply(struct net_device *dev,
165 LANSendReply_t *pSendRep);
166 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
167 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
168 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
169 struct net_device *dev);
171 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
173 * Fusion MPT LAN private data
175 static int LanCtx = -1;
177 static u32 max_buckets_out = 127;
178 static u32 tx_max_out_p = 127 - 16;
180 static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1];
182 #ifdef QLOGIC_NAA_WORKAROUND
183 static struct NAA_Hosed *mpt_bad_naa = NULL;
184 rwlock_t bad_naa_lock;
187 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
189 * Fusion MPT LAN external data
191 extern int mpt_lan_index;
193 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
195 * lan_reply - Handle all data sent from the hardware.
196 * @ioc: Pointer to MPT_ADAPTER structure
197 * @mf: Pointer to original MPT request frame (NULL if TurboReply)
198 * @reply: Pointer to MPT reply frame
200 * Returns 1 indicating original alloc'd request frame ptr
201 * should be freed, or 0 if it shouldn't.
204 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
206 struct net_device *dev = mpt_landev[ioc->id];
207 int FreeReqFrame = 0;
209 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
210 IOC_AND_NETDEV_NAMES_s_s(dev)));
212 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
216 u32 tmsg = CAST_PTR_TO_U32(reply);
218 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
219 IOC_AND_NETDEV_NAMES_s_s(dev),
222 switch (GET_LAN_FORM(tmsg)) {
224 // NOTE! (Optimization) First case here is now caught in
225 // mptbase.c::mpt_interrupt() routine and callcack here
226 // is now skipped for this case! 20001218 -sralston
228 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
229 // dioprintk((KERN_INFO MYNAM "/lan_reply: "
230 // "MessageContext turbo reply received\n"));
235 case LAN_REPLY_FORM_SEND_SINGLE:
236 // dioprintk((MYNAM "/lan_reply: "
237 // "calling mpt_lan_send_reply (turbo)\n"));
239 // Potential BUG here? -sralston
240 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
241 // If/when mpt_lan_send_turbo would return 1 here,
242 // calling routine (mptbase.c|mpt_interrupt)
243 // would Oops because mf has already been set
244 // to NULL. So after return from this func,
245 // mpt_interrupt() will attempt to put (NULL) mf ptr
246 // item back onto its adapter FreeQ - Oops!:-(
247 // It's Ok, since mpt_lan_send_turbo() *currently*
248 // always returns 0, but..., just in case:
250 (void) mpt_lan_send_turbo(dev, tmsg);
255 case LAN_REPLY_FORM_RECEIVE_SINGLE:
256 // dioprintk((KERN_INFO MYNAM "@lan_reply: "
257 // "rcv-Turbo = %08x\n", tmsg));
258 mpt_lan_receive_post_turbo(dev, tmsg);
262 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
263 "that I don't know what to do with\n");
265 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
273 // msg = (u32 *) reply;
274 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
275 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
276 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
277 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
278 // reply->u.hdr.Function));
280 switch (reply->u.hdr.Function) {
282 case MPI_FUNCTION_LAN_SEND:
284 LANSendReply_t *pSendRep;
286 pSendRep = (LANSendReply_t *) reply;
287 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
291 case MPI_FUNCTION_LAN_RECEIVE:
293 LANReceivePostReply_t *pRecvRep;
295 pRecvRep = (LANReceivePostReply_t *) reply;
296 if (pRecvRep->NumberOfContexts) {
297 mpt_lan_receive_post_reply(dev, pRecvRep);
298 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
301 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
302 "ReceivePostReply received.\n"));
306 case MPI_FUNCTION_LAN_RESET:
307 /* Just a default reply. Might want to check it to
308 * make sure that everything went ok.
313 case MPI_FUNCTION_EVENT_NOTIFICATION:
314 case MPI_FUNCTION_EVENT_ACK:
315 /* UPDATE! 20010120 -sralston
316 * _EVENT_NOTIFICATION should NOT come down this path any more.
317 * Should be routed to mpt_lan_event_process(), but just in case...
323 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
324 "reply that I don't know what to do with\n");
326 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
335 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
337 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
339 struct net_device *dev = mpt_landev[ioc->id];
340 struct mpt_lan_priv *priv = netdev_priv(dev);
342 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
343 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
344 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
346 if (priv->mpt_rxfidx == NULL)
349 if (reset_phase == MPT_IOC_SETUP_RESET) {
351 } else if (reset_phase == MPT_IOC_PRE_RESET) {
355 netif_stop_queue(dev);
357 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
359 atomic_set(&priv->buckets_out, 0);
361 /* Reset Rx Free Tail index and re-populate the queue. */
362 spin_lock_irqsave(&priv->rxfidx_lock, flags);
363 priv->mpt_rxfidx_tail = -1;
364 for (i = 0; i < priv->max_buckets_out; i++)
365 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
366 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
368 mpt_lan_post_receive_buckets(dev);
369 netif_wake_queue(dev);
375 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
377 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
379 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
381 switch (le32_to_cpu(pEvReply->Event)) {
382 case MPI_EVENT_NONE: /* 00 */
383 case MPI_EVENT_LOG_DATA: /* 01 */
384 case MPI_EVENT_STATE_CHANGE: /* 02 */
385 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
386 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
387 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
388 case MPI_EVENT_RESCAN: /* 06 */
389 /* Ok, do we need to do anything here? As far as
390 I can tell, this is when a new device gets added
392 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
393 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
394 case MPI_EVENT_LOGOUT: /* 09 */
395 case MPI_EVENT_EVENT_CHANGE: /* 0A */
401 * NOTE: pEvent->AckRequired handling now done in mptbase.c;
402 * Do NOT do it here now!
408 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
410 mpt_lan_open(struct net_device *dev)
412 struct mpt_lan_priv *priv = netdev_priv(dev);
415 if (mpt_lan_reset(dev) != 0) {
416 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
418 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
421 printk ("The ioc is active. Perhaps it needs to be"
424 printk ("The ioc in inactive, most likely in the "
425 "process of being reset. Please try again in "
429 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
430 if (priv->mpt_txfidx == NULL)
432 priv->mpt_txfidx_tail = -1;
434 priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
436 if (priv->SendCtl == NULL)
438 for (i = 0; i < priv->tx_max_out; i++) {
439 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
440 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
443 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
445 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
447 if (priv->mpt_rxfidx == NULL)
449 priv->mpt_rxfidx_tail = -1;
451 priv->RcvCtl = kmalloc(priv->max_buckets_out *
452 sizeof(struct BufferControl),
454 if (priv->RcvCtl == NULL)
456 for (i = 0; i < priv->max_buckets_out; i++) {
457 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
458 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
461 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
462 /**/ for (i = 0; i < priv->tx_max_out; i++)
463 /**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
464 /**/ dlprintk(("\n"));
466 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
468 mpt_lan_post_receive_buckets(dev);
469 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
470 IOC_AND_NETDEV_NAMES_s_s(dev));
472 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
473 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
474 " Notifications. This is a bad thing! We're not going "
475 "to go ahead, but I'd be leery of system stability at "
479 netif_start_queue(dev);
480 dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
484 kfree(priv->mpt_rxfidx);
485 priv->mpt_rxfidx = NULL;
487 kfree(priv->SendCtl);
488 priv->SendCtl = NULL;
490 kfree(priv->mpt_txfidx);
491 priv->mpt_txfidx = NULL;
495 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
496 /* Send a LanReset message to the FW. This should result in the FW returning
497 any buckets it still has. */
499 mpt_lan_reset(struct net_device *dev)
502 LANResetRequest_t *pResetReq;
503 struct mpt_lan_priv *priv = netdev_priv(dev);
505 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev->id);
508 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
509 "Unable to allocate a request frame.\n"));
514 pResetReq = (LANResetRequest_t *) mf;
516 pResetReq->Function = MPI_FUNCTION_LAN_RESET;
517 pResetReq->ChainOffset = 0;
518 pResetReq->Reserved = 0;
519 pResetReq->PortNumber = priv->pnum;
520 pResetReq->MsgFlags = 0;
521 pResetReq->Reserved2 = 0;
523 mpt_put_msg_frame(LanCtx, priv->mpt_dev->id, mf);
528 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
530 mpt_lan_close(struct net_device *dev)
532 struct mpt_lan_priv *priv = netdev_priv(dev);
533 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
534 unsigned int timeout;
537 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
539 mpt_event_deregister(LanCtx);
541 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
542 "since driver was loaded, %d still out\n",
543 priv->total_posted,atomic_read(&priv->buckets_out)));
545 netif_stop_queue(dev);
550 while (atomic_read(&priv->buckets_out) && --timeout) {
551 set_current_state(TASK_INTERRUPTIBLE);
555 for (i = 0; i < priv->max_buckets_out; i++) {
556 if (priv->RcvCtl[i].skb != NULL) {
557 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
558 /**/ "is still out\n", i));
559 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
562 dev_kfree_skb(priv->RcvCtl[i].skb);
566 kfree (priv->RcvCtl);
567 kfree (priv->mpt_rxfidx);
569 for (i = 0; i < priv->tx_max_out; i++) {
570 if (priv->SendCtl[i].skb != NULL) {
571 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
572 priv->SendCtl[i].len,
574 dev_kfree_skb(priv->SendCtl[i].skb);
578 kfree(priv->SendCtl);
579 kfree(priv->mpt_txfidx);
581 atomic_set(&priv->buckets_out, 0);
583 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
584 IOC_AND_NETDEV_NAMES_s_s(dev));
589 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
590 static struct net_device_stats *
591 mpt_lan_get_stats(struct net_device *dev)
593 struct mpt_lan_priv *priv = netdev_priv(dev);
595 return (struct net_device_stats *) &priv->stats;
598 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
600 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
602 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
608 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
609 /* Tx timeout handler. */
611 mpt_lan_tx_timeout(struct net_device *dev)
613 struct mpt_lan_priv *priv = netdev_priv(dev);
614 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
616 if (mpt_dev->active) {
617 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
618 netif_wake_queue(dev);
622 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
625 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
627 struct mpt_lan_priv *priv = netdev_priv(dev);
628 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
629 struct sk_buff *sent;
633 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
634 sent = priv->SendCtl[ctx].skb;
636 priv->stats.tx_packets++;
637 priv->stats.tx_bytes += sent->len;
639 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
640 IOC_AND_NETDEV_NAMES_s_s(dev),
641 __FUNCTION__, sent));
643 priv->SendCtl[ctx].skb = NULL;
644 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
645 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
646 dev_kfree_skb_irq(sent);
648 spin_lock_irqsave(&priv->txfidx_lock, flags);
649 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
650 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
652 netif_wake_queue(dev);
656 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
658 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
660 struct mpt_lan_priv *priv = netdev_priv(dev);
661 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
662 struct sk_buff *sent;
664 int FreeReqFrame = 0;
669 count = pSendRep->NumberOfContexts;
671 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
672 le16_to_cpu(pSendRep->IOCStatus)));
674 /* Add check for Loginfo Flag in IOCStatus */
676 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
677 case MPI_IOCSTATUS_SUCCESS:
678 priv->stats.tx_packets += count;
681 case MPI_IOCSTATUS_LAN_CANCELED:
682 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
685 case MPI_IOCSTATUS_INVALID_SGL:
686 priv->stats.tx_errors += count;
687 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
688 IOC_AND_NETDEV_NAMES_s_s(dev));
692 priv->stats.tx_errors += count;
696 pContext = &pSendRep->BufferContext;
698 spin_lock_irqsave(&priv->txfidx_lock, flags);
700 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
702 sent = priv->SendCtl[ctx].skb;
703 priv->stats.tx_bytes += sent->len;
705 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
706 IOC_AND_NETDEV_NAMES_s_s(dev),
707 __FUNCTION__, sent));
709 priv->SendCtl[ctx].skb = NULL;
710 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
711 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
712 dev_kfree_skb_irq(sent);
714 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
719 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
722 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
725 netif_wake_queue(dev);
729 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
731 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
733 struct mpt_lan_priv *priv = netdev_priv(dev);
734 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
736 LANSendRequest_t *pSendReq;
737 SGETransaction32_t *pTrans;
738 SGESimple64_t *pSimple;
742 u16 cur_naa = 0x1000;
744 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
747 spin_lock_irqsave(&priv->txfidx_lock, flags);
748 if (priv->mpt_txfidx_tail < 0) {
749 netif_stop_queue(dev);
750 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
752 printk (KERN_ERR "%s: no tx context available: %u\n",
753 __FUNCTION__, priv->mpt_txfidx_tail);
757 mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
759 netif_stop_queue(dev);
760 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
762 printk (KERN_ERR "%s: Unable to alloc request frame\n",
767 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
768 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
770 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
771 // IOC_AND_NETDEV_NAMES_s_s(dev)));
773 pSendReq = (LANSendRequest_t *) mf;
775 /* Set the mac.raw pointer, since this apparently isn't getting
776 * done before we get the skb. Pull the data pointer past the mac data.
778 skb->mac.raw = skb->data;
781 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
784 priv->SendCtl[ctx].skb = skb;
785 priv->SendCtl[ctx].dma = dma;
786 priv->SendCtl[ctx].len = skb->len;
789 pSendReq->Reserved = 0;
790 pSendReq->Function = MPI_FUNCTION_LAN_SEND;
791 pSendReq->ChainOffset = 0;
792 pSendReq->Reserved2 = 0;
793 pSendReq->MsgFlags = 0;
794 pSendReq->PortNumber = priv->pnum;
796 /* Transaction Context Element */
797 pTrans = (SGETransaction32_t *) pSendReq->SG_List;
799 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
800 pTrans->ContextSize = sizeof(u32);
801 pTrans->DetailsLength = 2 * sizeof(u32);
803 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
805 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
806 // IOC_AND_NETDEV_NAMES_s_s(dev),
807 // ctx, skb, skb->data));
809 #ifdef QLOGIC_NAA_WORKAROUND
811 struct NAA_Hosed *nh;
813 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
814 RFC 2625. The longer I look at this, the more my opinion of Qlogic
816 read_lock_irq(&bad_naa_lock);
817 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
818 if ((nh->ieee[0] == skb->mac.raw[0]) &&
819 (nh->ieee[1] == skb->mac.raw[1]) &&
820 (nh->ieee[2] == skb->mac.raw[2]) &&
821 (nh->ieee[3] == skb->mac.raw[3]) &&
822 (nh->ieee[4] == skb->mac.raw[4]) &&
823 (nh->ieee[5] == skb->mac.raw[5])) {
825 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
826 "= %04x.\n", cur_naa));
830 read_unlock_irq(&bad_naa_lock);
834 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
835 (skb->mac.raw[0] << 8) |
836 (skb->mac.raw[1] << 0));
837 pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
838 (skb->mac.raw[3] << 16) |
839 (skb->mac.raw[4] << 8) |
840 (skb->mac.raw[5] << 0));
842 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
844 /* If we ever decide to send more than one Simple SGE per LANSend, then
845 we will need to make sure that LAST_ELEMENT only gets set on the
846 last one. Otherwise, bad voodoo and evil funkiness will commence. */
847 pSimple->FlagsLength = cpu_to_le32(
848 ((MPI_SGE_FLAGS_LAST_ELEMENT |
849 MPI_SGE_FLAGS_END_OF_BUFFER |
850 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
851 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
852 MPI_SGE_FLAGS_HOST_TO_IOC |
853 MPI_SGE_FLAGS_64_BIT_ADDRESSING |
854 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
856 pSimple->Address.Low = cpu_to_le32((u32) dma);
857 if (sizeof(dma_addr_t) > sizeof(u32))
858 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
860 pSimple->Address.High = 0;
862 mpt_put_msg_frame (LanCtx, mpt_dev->id, mf);
863 dev->trans_start = jiffies;
865 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
866 IOC_AND_NETDEV_NAMES_s_s(dev),
867 le32_to_cpu(pSimple->FlagsLength)));
872 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
874 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
876 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
879 struct mpt_lan_priv *priv = dev->priv;
881 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
883 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
884 schedule_work(&priv->post_buckets_task);
886 queue_task(&priv->post_buckets_task, &tq_immediate);
887 mark_bh(IMMEDIATE_BH);
890 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
891 schedule_delayed_work(&priv->post_buckets_task, 1);
893 queue_task(&priv->post_buckets_task, &tq_timer);
895 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
898 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
899 IOC_AND_NETDEV_NAMES_s_s(dev) ));
903 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
905 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
907 struct mpt_lan_priv *priv = dev->priv;
909 skb->protocol = mpt_lan_type_trans(skb, dev);
911 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
912 "delivered to upper level.\n",
913 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
915 priv->stats.rx_bytes += skb->len;
916 priv->stats.rx_packets++;
921 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
922 atomic_read(&priv->buckets_out)));
924 if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
925 mpt_lan_wake_post_buckets_task(dev, 1);
927 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
928 "remaining, %d received back since sod\n",
929 atomic_read(&priv->buckets_out), priv->total_received));
934 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
937 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
939 struct mpt_lan_priv *priv = dev->priv;
940 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
941 struct sk_buff *skb, *old_skb;
945 ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
946 skb = priv->RcvCtl[ctx].skb;
948 len = GET_LAN_PACKET_LENGTH(tmsg);
950 if (len < MPT_LAN_RX_COPYBREAK) {
953 skb = (struct sk_buff *)dev_alloc_skb(len);
955 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
956 IOC_AND_NETDEV_NAMES_s_s(dev),
961 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
962 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
964 memcpy(skb_put(skb, len), old_skb->data, len);
966 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
967 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
973 priv->RcvCtl[ctx].skb = NULL;
975 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
976 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
979 spin_lock_irqsave(&priv->rxfidx_lock, flags);
980 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
981 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
983 atomic_dec(&priv->buckets_out);
984 priv->total_received++;
986 return mpt_lan_receive_skb(dev, skb);
989 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
991 mpt_lan_receive_post_free(struct net_device *dev,
992 LANReceivePostReply_t *pRecvRep)
994 struct mpt_lan_priv *priv = dev->priv;
995 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1002 count = pRecvRep->NumberOfContexts;
1004 /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
1005 "IOC returned %d buckets, freeing them...\n", count));
1007 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1008 for (i = 0; i < count; i++) {
1009 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1011 skb = priv->RcvCtl[ctx].skb;
1013 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
1014 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1015 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
1016 // priv, &(priv->buckets_out)));
1017 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
1019 priv->RcvCtl[ctx].skb = NULL;
1020 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1021 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1022 dev_kfree_skb_any(skb);
1024 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1026 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1028 atomic_sub(count, &priv->buckets_out);
1030 // for (i = 0; i < priv->max_buckets_out; i++)
1031 // if (priv->RcvCtl[i].skb != NULL)
1032 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1033 // "is still out\n", i));
1035 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1038 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1039 /**/ "remaining, %d received back since sod.\n",
1040 /**/ atomic_read(&priv->buckets_out), priv->total_received));
1044 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1046 mpt_lan_receive_post_reply(struct net_device *dev,
1047 LANReceivePostReply_t *pRecvRep)
1049 struct mpt_lan_priv *priv = dev->priv;
1050 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1051 struct sk_buff *skb, *old_skb;
1052 unsigned long flags;
1053 u32 len, ctx, offset;
1054 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1058 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1059 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1060 le16_to_cpu(pRecvRep->IOCStatus)));
1062 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1063 MPI_IOCSTATUS_LAN_CANCELED)
1064 return mpt_lan_receive_post_free(dev, pRecvRep);
1066 len = le32_to_cpu(pRecvRep->PacketLength);
1068 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1069 "ReceivePostReply w/ PacketLength zero!\n",
1070 IOC_AND_NETDEV_NAMES_s_s(dev));
1071 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1072 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1076 ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
1077 count = pRecvRep->NumberOfContexts;
1078 skb = priv->RcvCtl[ctx].skb;
1080 offset = le32_to_cpu(pRecvRep->PacketOffset);
1081 // if (offset != 0) {
1082 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1083 // "w/ PacketOffset %u\n",
1084 // IOC_AND_NETDEV_NAMES_s_s(dev),
1088 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1089 IOC_AND_NETDEV_NAMES_s_s(dev),
1095 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1096 // "for single packet, concatenating...\n",
1097 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1099 skb = (struct sk_buff *)dev_alloc_skb(len);
1101 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1102 IOC_AND_NETDEV_NAMES_s_s(dev),
1103 __FILE__, __LINE__);
1107 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1108 for (i = 0; i < count; i++) {
1110 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1111 old_skb = priv->RcvCtl[ctx].skb;
1113 l = priv->RcvCtl[ctx].len;
1117 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1118 // IOC_AND_NETDEV_NAMES_s_s(dev),
1121 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1122 priv->RcvCtl[ctx].dma,
1123 priv->RcvCtl[ctx].len,
1124 PCI_DMA_FROMDEVICE);
1125 memcpy(skb_put(skb, l), old_skb->data, l);
1127 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1128 priv->RcvCtl[ctx].dma,
1129 priv->RcvCtl[ctx].len,
1130 PCI_DMA_FROMDEVICE);
1132 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1135 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1137 } else if (len < MPT_LAN_RX_COPYBREAK) {
1141 skb = (struct sk_buff *)dev_alloc_skb(len);
1143 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1144 IOC_AND_NETDEV_NAMES_s_s(dev),
1145 __FILE__, __LINE__);
1149 pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1150 priv->RcvCtl[ctx].dma,
1151 priv->RcvCtl[ctx].len,
1152 PCI_DMA_FROMDEVICE);
1154 memcpy(skb_put(skb, len), old_skb->data, len);
1156 pci_dma_sync_single_for_device(mpt_dev->pcidev,
1157 priv->RcvCtl[ctx].dma,
1158 priv->RcvCtl[ctx].len,
1159 PCI_DMA_FROMDEVICE);
1161 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1162 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1163 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1166 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1168 priv->RcvCtl[ctx].skb = NULL;
1170 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1171 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1172 priv->RcvCtl[ctx].dma = 0;
1174 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1175 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1180 atomic_sub(count, &priv->buckets_out);
1181 priv->total_received += count;
1183 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1184 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1185 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1186 IOC_AND_NETDEV_NAMES_s_s(dev),
1187 priv->mpt_rxfidx_tail,
1188 MPT_LAN_MAX_BUCKETS_OUT);
1190 panic("Damn it Jim! I'm a doctor, not a programmer! "
1191 "Oh, wait a sec, I am a programmer. "
1192 "And, who's Jim?!?!\n"
1193 "Arrgghh! We've done it again!\n");
1197 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1198 "(priv->buckets_out = %d)\n",
1199 IOC_AND_NETDEV_NAMES_s_s(dev),
1200 atomic_read(&priv->buckets_out));
1201 else if (remaining < 10)
1202 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1203 "(priv->buckets_out = %d)\n",
1204 IOC_AND_NETDEV_NAMES_s_s(dev),
1205 remaining, atomic_read(&priv->buckets_out));
1207 if ((remaining < priv->bucketthresh) &&
1208 ((atomic_read(&priv->buckets_out) - remaining) >
1209 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1211 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1212 "buckets_out count and fw's BucketsRemaining "
1213 "count has crossed the threshold, issuing a "
1214 "LanReset to clear the fw's hashtable. You may "
1215 "want to check your /var/log/messages for \"CRC "
1216 "error\" event notifications.\n");
1219 mpt_lan_wake_post_buckets_task(dev, 0);
1222 return mpt_lan_receive_skb(dev, skb);
1225 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1226 /* Simple SGE's only at the moment */
1229 mpt_lan_post_receive_buckets(void *dev_id)
1231 struct net_device *dev = dev_id;
1232 struct mpt_lan_priv *priv = dev->priv;
1233 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1235 LANReceivePostRequest_t *pRecvReq;
1236 SGETransaction32_t *pTrans;
1237 SGESimple64_t *pSimple;
1238 struct sk_buff *skb;
1240 u32 curr, buckets, count, max;
1241 u32 len = (dev->mtu + dev->hard_header_len + 4);
1242 unsigned long flags;
1245 curr = atomic_read(&priv->buckets_out);
1246 buckets = (priv->max_buckets_out - curr);
1248 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1249 IOC_AND_NETDEV_NAMES_s_s(dev),
1250 __FUNCTION__, buckets, curr));
1252 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1253 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1256 mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
1258 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1260 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1261 __FUNCTION__, buckets));
1264 pRecvReq = (LANReceivePostRequest_t *) mf;
1270 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1271 pRecvReq->ChainOffset = 0;
1272 pRecvReq->MsgFlags = 0;
1273 pRecvReq->PortNumber = priv->pnum;
1275 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1278 for (i = 0; i < count; i++) {
1281 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1282 if (priv->mpt_rxfidx_tail < 0) {
1283 printk (KERN_ERR "%s: Can't alloc context\n",
1285 spin_unlock_irqrestore(&priv->rxfidx_lock,
1290 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1292 skb = priv->RcvCtl[ctx].skb;
1293 if (skb && (priv->RcvCtl[ctx].len != len)) {
1294 pci_unmap_single(mpt_dev->pcidev,
1295 priv->RcvCtl[ctx].dma,
1296 priv->RcvCtl[ctx].len,
1297 PCI_DMA_FROMDEVICE);
1298 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1299 skb = priv->RcvCtl[ctx].skb = NULL;
1303 skb = dev_alloc_skb(len);
1305 printk (KERN_WARNING
1306 MYNAM "/%s: Can't alloc skb\n",
1308 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1309 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1313 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1314 len, PCI_DMA_FROMDEVICE);
1316 priv->RcvCtl[ctx].skb = skb;
1317 priv->RcvCtl[ctx].dma = dma;
1318 priv->RcvCtl[ctx].len = len;
1321 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1323 pTrans->ContextSize = sizeof(u32);
1324 pTrans->DetailsLength = 0;
1326 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1328 pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1330 pSimple->FlagsLength = cpu_to_le32(
1331 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1332 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1333 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1334 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1335 if (sizeof(dma_addr_t) > sizeof(u32))
1336 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1338 pSimple->Address.High = 0;
1340 pTrans = (SGETransaction32_t *) (pSimple + 1);
1343 if (pSimple == NULL) {
1344 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1346 mpt_free_msg_frame(LanCtx, mpt_dev->id, mf);
1350 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1352 pRecvReq->BucketCount = cpu_to_le32(i);
1354 /* printk(KERN_INFO MYNAM ": posting buckets\n ");
1355 * for (i = 0; i < j + 2; i ++)
1356 * printk (" %08x", le32_to_cpu(msg[i]));
1360 mpt_put_msg_frame(LanCtx, mpt_dev->id, mf);
1362 priv->total_posted += i;
1364 atomic_add(i, &priv->buckets_out);
1368 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1369 __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1370 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1371 __FUNCTION__, priv->total_posted, priv->total_received));
1373 clear_bit(0, &priv->post_buckets_active);
1376 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1377 static struct net_device *
1378 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1380 struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1381 struct mpt_lan_priv *priv = NULL;
1382 u8 HWaddr[FC_ALEN], *a;
1387 dev->mtu = MPT_LAN_MTU;
1389 priv = netdev_priv(dev);
1391 priv->mpt_dev = mpt_dev;
1394 memset(&priv->post_buckets_task, 0, sizeof(struct mpt_work_struct));
1395 MPT_INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1396 priv->post_buckets_active = 0;
1398 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1399 __LINE__, dev->mtu + dev->hard_header_len + 4));
1401 atomic_set(&priv->buckets_out, 0);
1402 priv->total_posted = 0;
1403 priv->total_received = 0;
1404 priv->max_buckets_out = max_buckets_out;
1405 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1406 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1408 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1410 mpt_dev->pfacts[0].MaxLanBuckets,
1412 priv->max_buckets_out));
1414 priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1415 priv->txfidx_lock = SPIN_LOCK_UNLOCKED;
1416 priv->rxfidx_lock = SPIN_LOCK_UNLOCKED;
1418 memset(&priv->stats, 0, sizeof(priv->stats));
1420 /* Grab pre-fetched LANPage1 stuff. :-) */
1421 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1430 dev->addr_len = FC_ALEN;
1431 memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1432 memset(dev->broadcast, 0xff, FC_ALEN);
1434 /* The Tx queue is 127 deep on the 909.
1435 * Give ourselves some breathing room.
1437 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1438 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1440 dev->open = mpt_lan_open;
1441 dev->stop = mpt_lan_close;
1442 dev->get_stats = mpt_lan_get_stats;
1443 dev->set_multicast_list = NULL;
1444 dev->change_mtu = mpt_lan_change_mtu;
1445 dev->hard_start_xmit = mpt_lan_sdu_send;
1447 /* Not in 2.3.42. Need 2.3.45+ */
1448 dev->tx_timeout = mpt_lan_tx_timeout;
1449 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1451 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1452 "and setting initial values\n"));
1454 SET_MODULE_OWNER(dev);
1456 if (register_netdev(dev) != 0) {
1463 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1464 static int __init mpt_lan_init (void)
1466 struct net_device *dev;
1470 show_mptmod_ver(LANAME, LANVER);
1472 #ifdef QLOGIC_NAA_WORKAROUND
1473 /* Init the global r/w lock for the bad_naa list. We want to do this
1474 before any boards are initialized and may be used. */
1475 rwlock_init(&bad_naa_lock);
1478 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1479 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1483 /* Set the callback index to be used by driver core for turbo replies */
1484 mpt_lan_index = LanCtx;
1486 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1488 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) {
1489 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1491 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1492 "handler with mptbase! The world is at an end! "
1493 "Everything is fading to black! Goodbye.\n");
1497 for (j = 0; j < MPT_MAX_ADAPTERS; j++) {
1498 mpt_landev[j] = NULL;
1501 for (p = mpt_adapter_find_first(); p; p = mpt_adapter_find_next(p)) {
1502 for (i = 0; i < p->facts.NumberOfPorts; i++) {
1503 printk (KERN_INFO MYNAM ": %s: PortNum=%x, ProtocolFlags=%02Xh (%c%c%c%c)\n",
1505 p->pfacts[i].PortNumber,
1506 p->pfacts[i].ProtocolFlags,
1507 MPT_PROTOCOL_FLAGS_c_c_c_c(p->pfacts[i].ProtocolFlags));
1509 if (!(p->pfacts[i].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
1510 printk (KERN_INFO MYNAM ": %s: Hmmm... LAN protocol seems to be disabled on this adapter port!\n",
1515 dev = mpt_register_lan_device (p, i);
1517 printk (KERN_ERR MYNAM ": %s: Unable to register port%d as a LAN device\n",
1519 p->pfacts[i].PortNumber);
1521 printk (KERN_INFO MYNAM ": %s: Fusion MPT LAN device registered as '%s'\n",
1522 p->name, dev->name);
1523 printk (KERN_INFO MYNAM ": %s/%s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1524 IOC_AND_NETDEV_NAMES_s_s(dev),
1525 dev->dev_addr[0], dev->dev_addr[1],
1526 dev->dev_addr[2], dev->dev_addr[3],
1527 dev->dev_addr[4], dev->dev_addr[5]);
1528 // printk (KERN_INFO MYNAM ": %s/%s: Max_TX_outstanding = %d\n",
1529 // IOC_AND_NETDEV_NAMES_s_s(dev),
1530 // NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out);
1532 mpt_landev[j] = dev;
1533 dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n",
1534 dev, j, mpt_landev[j]));
1542 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1543 static void __exit mpt_lan_exit(void)
1547 mpt_reset_deregister(LanCtx);
1549 for (i = 0; mpt_landev[i] != NULL; i++) {
1550 struct net_device *dev = mpt_landev[i];
1552 printk (KERN_INFO ": %s/%s: Fusion MPT LAN device unregistered\n",
1553 IOC_AND_NETDEV_NAMES_s_s(dev));
1554 unregister_netdev(dev);
1556 mpt_landev[i] = NULL;
1560 mpt_deregister(LanCtx);
1565 /* deregister any send/receive handler structs. I2Oism? */
1568 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1569 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,59)
1570 MODULE_PARM(tx_max_out_p, "i");
1571 MODULE_PARM(max_buckets_out, "i"); // Debug stuff. FIXME!
1574 module_init(mpt_lan_init);
1575 module_exit(mpt_lan_exit);
1577 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1578 static unsigned short
1579 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1581 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1582 struct fcllc *fcllc;
1584 skb->mac.raw = skb->data;
1585 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1587 if (fch->dtype == htons(0xffff)) {
1588 u32 *p = (u32 *) fch;
1595 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1596 NETDEV_PTR_TO_IOC_NAME_s(dev));
1597 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1598 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1599 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1602 if (*fch->daddr & 1) {
1603 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1604 skb->pkt_type = PACKET_BROADCAST;
1606 skb->pkt_type = PACKET_MULTICAST;
1609 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1610 skb->pkt_type = PACKET_OTHERHOST;
1612 skb->pkt_type = PACKET_HOST;
1616 fcllc = (struct fcllc *)skb->data;
1618 #ifdef QLOGIC_NAA_WORKAROUND
1620 u16 source_naa = fch->stype, found = 0;
1622 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1625 if ((source_naa & 0xF000) == 0)
1626 source_naa = swab16(source_naa);
1628 if (fcllc->ethertype == htons(ETH_P_ARP))
1629 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1630 "%04x.\n", source_naa));
1632 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1633 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1634 struct NAA_Hosed *nh, *prevnh;
1637 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1638 "system with non-RFC 2625 NAA value (%04x).\n",
1641 write_lock_irq(&bad_naa_lock);
1642 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1643 prevnh=nh, nh=nh->next) {
1644 if ((nh->ieee[0] == fch->saddr[0]) &&
1645 (nh->ieee[1] == fch->saddr[1]) &&
1646 (nh->ieee[2] == fch->saddr[2]) &&
1647 (nh->ieee[3] == fch->saddr[3]) &&
1648 (nh->ieee[4] == fch->saddr[4]) &&
1649 (nh->ieee[5] == fch->saddr[5])) {
1651 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1652 "q/Rep w/ bad NAA from system already"
1658 if ((!found) && (nh == NULL)) {
1660 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1661 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1662 " bad NAA from system not yet in DB.\n"));
1671 nh->NAA = source_naa; /* Set the S_NAA value. */
1672 for (i = 0; i < FC_ALEN; i++)
1673 nh->ieee[i] = fch->saddr[i];
1674 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1675 "%02x:%02x with non-compliant S_NAA value.\n",
1676 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1677 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1679 printk (KERN_ERR "mptlan/type_trans: Unable to"
1680 " kmalloc a NAA_Hosed struct.\n");
1682 } else if (!found) {
1683 printk (KERN_ERR "mptlan/type_trans: found not"
1684 " set, but nh isn't null. Evil "
1685 "funkiness abounds.\n");
1687 write_unlock_irq(&bad_naa_lock);
1692 /* Strip the SNAP header from ARP packets since we don't
1693 * pass them through to the 802.2/SNAP layers.
1695 if (fcllc->dsap == EXTENDED_SAP &&
1696 (fcllc->ethertype == htons(ETH_P_IP) ||
1697 fcllc->ethertype == htons(ETH_P_ARP))) {
1698 skb_pull(skb, sizeof(struct fcllc));
1699 return fcllc->ethertype;
1702 return htons(ETH_P_802_2);
1705 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/