2 * linux/drivers/message/fusion/mptlan.c
3 * IP Over Fibre Channel device driver.
4 * For use with PCI chip/adapter(s):
5 * LSIFC9xx/LSI409xx Fibre Channel
6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware.
9 * This driver would not exist if not for Alan Cox's development
10 * of the linux i2o driver.
12 * Special thanks goes to the I2O LAN driver people at the
13 * University of Helsinki, who, unbeknownst to them, provided
14 * the inspiration and initial structure for this driver.
16 * A huge debt of gratitude is owed to David S. Miller (DaveM)
17 * for fixing much of the stupid and broken stuff in the early
18 * driver while porting to sparc64 platform. THANK YOU!
20 * A really huge debt of gratitude is owed to Eddie C. Dost
21 * for gobs of hard work fixing and optimizing LAN code.
24 * (see also mptbase.c)
26 * Copyright (c) 2000-2002 LSI Logic Corporation
27 * Originally By: Noah Romer
29 * $Id: mptlan.c,v 1.1.1.1 2005/04/11 02:50:25 jack Exp $
31 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
33 This program is free software; you can redistribute it and/or modify
34 it under the terms of the GNU General Public License as published by
35 the Free Software Foundation; version 2 of the License.
37 This program is distributed in the hope that it will be useful,
38 but WITHOUT ANY WARRANTY; without even the implied warranty of
39 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
40 GNU General Public License for more details.
43 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
44 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
45 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
46 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
47 solely responsible for determining the appropriateness of using and
48 distributing the Program and assumes all risks associated with its
49 exercise of rights under this Agreement, including but not limited to
50 the risks and costs of program errors, damage to or loss of data,
51 programs or equipment, and unavailability or interruption of operations.
53 DISCLAIMER OF LIABILITY
54 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
55 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
57 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
58 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
59 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
60 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
62 You should have received a copy of the GNU General Public License
63 along with this program; if not, write to the Free Software
64 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
67 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 * Define statements used for debugging
71 //#define MPT_LAN_IO_DEBUG
73 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
76 #include <linux/init.h>
77 #include <linux/module.h>
80 #define MYNAM "mptlan"
82 MODULE_LICENSE("GPL");
84 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
86 * MPT LAN message sizes without variable part.
88 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
89 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
91 #define MPT_LAN_TRANSACTION32_SIZE \
92 (sizeof(SGETransaction32_t) - sizeof(u32))
95 * Fusion MPT LAN private structures
101 struct NAA_Hosed *next;
104 struct BufferControl {
110 struct mpt_lan_priv {
111 MPT_ADAPTER *mpt_dev;
112 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
114 atomic_t buckets_out; /* number of unused buckets on IOC */
115 int bucketthresh; /* Send more when this many left */
117 int *mpt_txfidx; /* Free Tx Context list */
119 spinlock_t txfidx_lock;
121 int *mpt_rxfidx; /* Free Rx Context list */
123 spinlock_t rxfidx_lock;
125 struct BufferControl *RcvCtl; /* Receive BufferControl structs */
126 struct BufferControl *SendCtl; /* Send BufferControl structs */
128 int max_buckets_out; /* Max buckets to send to IOC */
129 int tx_max_out; /* IOC's Tx queue len */
133 struct net_device_stats stats; /* Per device statistics */
135 struct mpt_work_struct post_buckets_task;
136 unsigned long post_buckets_active;
139 struct mpt_lan_ohdr {
146 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
151 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
152 MPT_FRAME_HDR *reply);
153 static int mpt_lan_open(struct net_device *dev);
154 static int mpt_lan_reset(struct net_device *dev);
155 static int mpt_lan_close(struct net_device *dev);
156 static void mpt_lan_post_receive_buckets(void *dev_id);
157 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
159 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
160 static int mpt_lan_receive_post_reply(struct net_device *dev,
161 LANReceivePostReply_t *pRecvRep);
162 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
163 static int mpt_lan_send_reply(struct net_device *dev,
164 LANSendReply_t *pSendRep);
165 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
166 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
167 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
168 struct net_device *dev);
170 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
172 * Fusion MPT LAN private data
174 static int LanCtx = -1;
176 static u32 max_buckets_out = 127;
177 static u32 tx_max_out_p = 127 - 16;
179 static struct net_device *mpt_landev[MPT_MAX_ADAPTERS+1];
181 #ifdef QLOGIC_NAA_WORKAROUND
182 static struct NAA_Hosed *mpt_bad_naa = NULL;
183 rwlock_t bad_naa_lock;
186 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
188 * Fusion MPT LAN external data
190 extern int mpt_lan_index;
192 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
194 * lan_reply - Handle all data sent from the hardware.
195 * @ioc: Pointer to MPT_ADAPTER structure
196 * @mf: Pointer to original MPT request frame (NULL if TurboReply)
197 * @reply: Pointer to MPT reply frame
199 * Returns 1 indicating original alloc'd request frame ptr
200 * should be freed, or 0 if it shouldn't.
203 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
205 struct net_device *dev = mpt_landev[ioc->id];
206 int FreeReqFrame = 0;
208 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
209 IOC_AND_NETDEV_NAMES_s_s(dev)));
211 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
215 u32 tmsg = CAST_PTR_TO_U32(reply);
217 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
218 IOC_AND_NETDEV_NAMES_s_s(dev),
221 switch (GET_LAN_FORM(tmsg)) {
223 // NOTE! (Optimization) First case here is now caught in
224 // mptbase.c::mpt_interrupt() routine and callcack here
225 // is now skipped for this case! 20001218 -sralston
227 case LAN_REPLY_FORM_MESSAGE_CONTEXT:
228 // dioprintk((KERN_INFO MYNAM "/lan_reply: "
229 // "MessageContext turbo reply received\n"));
234 case LAN_REPLY_FORM_SEND_SINGLE:
235 // dioprintk((MYNAM "/lan_reply: "
236 // "calling mpt_lan_send_reply (turbo)\n"));
238 // Potential BUG here? -sralston
239 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
240 // If/when mpt_lan_send_turbo would return 1 here,
241 // calling routine (mptbase.c|mpt_interrupt)
242 // would Oops because mf has already been set
243 // to NULL. So after return from this func,
244 // mpt_interrupt() will attempt to put (NULL) mf ptr
245 // item back onto it's adapter FreeQ - Oops!:-(
246 // It's Ok, since mpt_lan_send_turbo() *currently*
247 // always returns 0, but..., just in case:
249 (void) mpt_lan_send_turbo(dev, tmsg);
254 case LAN_REPLY_FORM_RECEIVE_SINGLE:
255 // dioprintk((KERN_INFO MYNAM "@lan_reply: "
256 // "rcv-Turbo = %08x\n", tmsg));
257 mpt_lan_receive_post_turbo(dev, tmsg);
261 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
262 "that I don't know what to do with\n");
264 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
272 // msg = (u32 *) reply;
273 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
274 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
275 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
276 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
277 // reply->u.hdr.Function));
279 switch (reply->u.hdr.Function) {
281 case MPI_FUNCTION_LAN_SEND:
283 LANSendReply_t *pSendRep;
285 pSendRep = (LANSendReply_t *) reply;
286 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
290 case MPI_FUNCTION_LAN_RECEIVE:
292 LANReceivePostReply_t *pRecvRep;
294 pRecvRep = (LANReceivePostReply_t *) reply;
295 if (pRecvRep->NumberOfContexts) {
296 mpt_lan_receive_post_reply(dev, pRecvRep);
297 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
300 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
301 "ReceivePostReply received.\n"));
305 case MPI_FUNCTION_LAN_RESET:
306 /* Just a default reply. Might want to check it to
307 * make sure that everything went ok.
312 case MPI_FUNCTION_EVENT_NOTIFICATION:
313 case MPI_FUNCTION_EVENT_ACK:
314 /* UPDATE! 20010120 -sralston
315 * _EVENT_NOTIFICATION should NOT come down this path any more.
316 * Should be routed to mpt_lan_event_process(), but just in case...
322 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
323 "reply that I don't know what to do with\n");
325 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
334 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
336 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
338 struct net_device *dev = mpt_landev[ioc->id];
339 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
341 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
342 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"));
344 if (priv->mpt_rxfidx == NULL)
347 if (reset_phase == MPT_IOC_PRE_RESET) {
351 netif_stop_queue(dev);
353 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
355 atomic_set(&priv->buckets_out, 0);
357 /* Reset Rx Free Tail index and re-populate the queue. */
358 spin_lock_irqsave(&priv->rxfidx_lock, flags);
359 priv->mpt_rxfidx_tail = -1;
360 for (i = 0; i < priv->max_buckets_out; i++)
361 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
362 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
364 mpt_lan_post_receive_buckets(dev);
365 netif_wake_queue(dev);
371 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
373 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
375 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
377 switch (le32_to_cpu(pEvReply->Event)) {
378 case MPI_EVENT_NONE: /* 00 */
379 case MPI_EVENT_LOG_DATA: /* 01 */
380 case MPI_EVENT_STATE_CHANGE: /* 02 */
381 case MPI_EVENT_UNIT_ATTENTION: /* 03 */
382 case MPI_EVENT_IOC_BUS_RESET: /* 04 */
383 case MPI_EVENT_EXT_BUS_RESET: /* 05 */
384 case MPI_EVENT_RESCAN: /* 06 */
385 /* Ok, do we need to do anything here? As far as
386 I can tell, this is when a new device gets added
388 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
389 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
390 case MPI_EVENT_LOGOUT: /* 09 */
391 case MPI_EVENT_EVENT_CHANGE: /* 0A */
397 * NOTE: pEvent->AckRequired handling now done in mptbase.c;
398 * Do NOT do it here now!
404 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
406 mpt_lan_open(struct net_device *dev)
408 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
411 if (mpt_lan_reset(dev) != 0) {
412 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
414 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
417 printk ("The ioc is active. Perhaps it needs to be"
420 printk ("The ioc in inactive, most likely in the "
421 "process of being reset. Please try again in "
425 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
426 if (priv->mpt_txfidx == NULL)
428 priv->mpt_txfidx_tail = -1;
430 priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
432 if (priv->SendCtl == NULL)
434 for (i = 0; i < priv->tx_max_out; i++) {
435 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
436 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
439 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
441 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
443 if (priv->mpt_rxfidx == NULL)
445 priv->mpt_rxfidx_tail = -1;
447 priv->RcvCtl = kmalloc(priv->max_buckets_out *
448 sizeof(struct BufferControl),
450 if (priv->RcvCtl == NULL)
452 for (i = 0; i < priv->max_buckets_out; i++) {
453 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
454 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
457 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
458 /**/ for (i = 0; i < priv->tx_max_out; i++)
459 /**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
460 /**/ dlprintk(("\n"));
462 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
464 mpt_lan_post_receive_buckets(dev);
465 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
466 IOC_AND_NETDEV_NAMES_s_s(dev));
468 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
469 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
470 " Notifications. This is a bad thing! We're not going "
471 "to go ahead, but I'd be leery of system stability at "
475 netif_start_queue(dev);
476 dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
480 kfree(priv->mpt_rxfidx);
481 priv->mpt_rxfidx = NULL;
483 kfree(priv->SendCtl);
484 priv->SendCtl = NULL;
486 kfree(priv->mpt_txfidx);
487 priv->mpt_txfidx = NULL;
491 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
492 /* Send a LanReset message to the FW. This should result in the FW returning
493 any buckets it still has. */
495 mpt_lan_reset(struct net_device *dev)
498 LANResetRequest_t *pResetReq;
499 struct mpt_lan_priv *priv = (struct mpt_lan_priv *)dev->priv;
501 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev->id);
504 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
505 "Unable to allocate a request frame.\n"));
510 pResetReq = (LANResetRequest_t *) mf;
512 pResetReq->Function = MPI_FUNCTION_LAN_RESET;
513 pResetReq->ChainOffset = 0;
514 pResetReq->Reserved = 0;
515 pResetReq->PortNumber = priv->pnum;
516 pResetReq->MsgFlags = 0;
517 pResetReq->Reserved2 = 0;
519 mpt_put_msg_frame(LanCtx, priv->mpt_dev->id, mf);
524 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
526 mpt_lan_close(struct net_device *dev)
528 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
529 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
530 unsigned int timeout;
533 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
535 mpt_event_deregister(LanCtx);
537 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
538 "since driver was loaded, %d still out\n",
539 priv->total_posted,atomic_read(&priv->buckets_out)));
541 netif_stop_queue(dev);
546 while (atomic_read(&priv->buckets_out) && --timeout) {
547 set_current_state(TASK_INTERRUPTIBLE);
551 for (i = 0; i < priv->max_buckets_out; i++) {
552 if (priv->RcvCtl[i].skb != NULL) {
553 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
554 /**/ "is still out\n", i));
555 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
558 dev_kfree_skb(priv->RcvCtl[i].skb);
562 kfree (priv->RcvCtl);
563 kfree (priv->mpt_rxfidx);
565 for (i = 0; i < priv->tx_max_out; i++) {
566 if (priv->SendCtl[i].skb != NULL) {
567 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
568 priv->SendCtl[i].len,
570 dev_kfree_skb(priv->SendCtl[i].skb);
574 kfree(priv->SendCtl);
575 kfree(priv->mpt_txfidx);
577 atomic_set(&priv->buckets_out, 0);
579 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
580 IOC_AND_NETDEV_NAMES_s_s(dev));
585 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
586 static struct net_device_stats *
587 mpt_lan_get_stats(struct net_device *dev)
589 struct mpt_lan_priv *priv = (struct mpt_lan_priv *)dev->priv;
591 return (struct net_device_stats *) &priv->stats;
594 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
596 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
598 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
604 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
605 /* Tx timeout handler. */
607 mpt_lan_tx_timeout(struct net_device *dev)
609 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
610 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
612 if (mpt_dev->active) {
613 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
614 netif_wake_queue(dev);
618 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
621 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
623 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
624 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
625 struct sk_buff *sent;
629 ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
630 sent = priv->SendCtl[ctx].skb;
632 priv->stats.tx_packets++;
633 priv->stats.tx_bytes += sent->len;
635 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
636 IOC_AND_NETDEV_NAMES_s_s(dev),
637 __FUNCTION__, sent));
639 priv->SendCtl[ctx].skb = NULL;
640 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
641 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
642 dev_kfree_skb_irq(sent);
644 spin_lock_irqsave(&priv->txfidx_lock, flags);
645 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
646 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
648 netif_wake_queue(dev);
652 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
654 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
656 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
657 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
658 struct sk_buff *sent;
660 int FreeReqFrame = 0;
665 count = pSendRep->NumberOfContexts;
667 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
668 le16_to_cpu(pSendRep->IOCStatus)));
670 /* Add check for Loginfo Flag in IOCStatus */
672 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
673 case MPI_IOCSTATUS_SUCCESS:
674 priv->stats.tx_packets += count;
677 case MPI_IOCSTATUS_LAN_CANCELED:
678 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
681 case MPI_IOCSTATUS_INVALID_SGL:
682 priv->stats.tx_errors += count;
683 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
684 IOC_AND_NETDEV_NAMES_s_s(dev));
688 priv->stats.tx_errors += count;
692 pContext = &pSendRep->BufferContext;
694 spin_lock_irqsave(&priv->txfidx_lock, flags);
696 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
698 sent = priv->SendCtl[ctx].skb;
699 priv->stats.tx_bytes += sent->len;
701 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
702 IOC_AND_NETDEV_NAMES_s_s(dev),
703 __FUNCTION__, sent));
705 priv->SendCtl[ctx].skb = NULL;
706 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
707 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
708 dev_kfree_skb_irq(sent);
710 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
715 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
718 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
721 netif_wake_queue(dev);
725 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
727 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
729 struct mpt_lan_priv *priv = (struct mpt_lan_priv *) dev->priv;
730 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
732 LANSendRequest_t *pSendReq;
733 SGETransaction32_t *pTrans;
734 SGESimple64_t *pSimple;
738 u16 cur_naa = 0x1000;
740 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
743 spin_lock_irqsave(&priv->txfidx_lock, flags);
744 if (priv->mpt_txfidx_tail < 0) {
745 netif_stop_queue(dev);
746 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
748 printk (KERN_ERR "%s: no tx context available: %u\n",
749 __FUNCTION__, priv->mpt_txfidx_tail);
753 mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
755 netif_stop_queue(dev);
756 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
758 printk (KERN_ERR "%s: Unable to alloc request frame\n",
763 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
764 spin_unlock_irqrestore(&priv->txfidx_lock, flags);
766 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
767 // IOC_AND_NETDEV_NAMES_s_s(dev)));
769 pSendReq = (LANSendRequest_t *) mf;
771 /* Set the mac.raw pointer, since this apparently isn't getting
772 * done before we get the skb. Pull the data pointer past the mac data.
774 skb->mac.raw = skb->data;
777 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
780 priv->SendCtl[ctx].skb = skb;
781 priv->SendCtl[ctx].dma = dma;
782 priv->SendCtl[ctx].len = skb->len;
785 pSendReq->Reserved = 0;
786 pSendReq->Function = MPI_FUNCTION_LAN_SEND;
787 pSendReq->ChainOffset = 0;
788 pSendReq->Reserved2 = 0;
789 pSendReq->MsgFlags = 0;
790 pSendReq->PortNumber = priv->pnum;
792 /* Transaction Context Element */
793 pTrans = (SGETransaction32_t *) pSendReq->SG_List;
795 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
796 pTrans->ContextSize = sizeof(u32);
797 pTrans->DetailsLength = 2 * sizeof(u32);
799 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
801 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
802 // IOC_AND_NETDEV_NAMES_s_s(dev),
803 // ctx, skb, skb->data));
805 #ifdef QLOGIC_NAA_WORKAROUND
807 struct NAA_Hosed *nh;
809 /* Munge the NAA for Tx packets to QLogic boards, which don't follow
810 RFC 2625. The longer I look at this, the more my opinion of Qlogic
812 read_lock_irq(&bad_naa_lock);
813 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
814 if ((nh->ieee[0] == skb->mac.raw[0]) &&
815 (nh->ieee[1] == skb->mac.raw[1]) &&
816 (nh->ieee[2] == skb->mac.raw[2]) &&
817 (nh->ieee[3] == skb->mac.raw[3]) &&
818 (nh->ieee[4] == skb->mac.raw[4]) &&
819 (nh->ieee[5] == skb->mac.raw[5])) {
821 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
822 "= %04x.\n", cur_naa));
826 read_unlock_irq(&bad_naa_lock);
830 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
831 (skb->mac.raw[0] << 8) |
832 (skb->mac.raw[1] << 0));
833 pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
834 (skb->mac.raw[3] << 16) |
835 (skb->mac.raw[4] << 8) |
836 (skb->mac.raw[5] << 0));
838 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
840 /* If we ever decide to send more than one Simple SGE per LANSend, then
841 we will need to make sure that LAST_ELEMENT only gets set on the
842 last one. Otherwise, bad voodoo and evil funkiness will commence. */
843 pSimple->FlagsLength = cpu_to_le32(
844 ((MPI_SGE_FLAGS_LAST_ELEMENT |
845 MPI_SGE_FLAGS_END_OF_BUFFER |
846 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
847 MPI_SGE_FLAGS_SYSTEM_ADDRESS |
848 MPI_SGE_FLAGS_HOST_TO_IOC |
849 MPI_SGE_FLAGS_64_BIT_ADDRESSING |
850 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
852 pSimple->Address.Low = cpu_to_le32((u32) dma);
853 if (sizeof(dma_addr_t) > sizeof(u32))
854 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
856 pSimple->Address.High = 0;
858 mpt_put_msg_frame (LanCtx, mpt_dev->id, mf);
859 dev->trans_start = jiffies;
861 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
862 IOC_AND_NETDEV_NAMES_s_s(dev),
863 le32_to_cpu(pSimple->FlagsLength)));
868 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
870 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
872 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
875 struct mpt_lan_priv *priv = dev->priv;
877 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
879 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
880 schedule_work(&priv->post_buckets_task);
881 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,40)
882 schedule_task(&priv->post_buckets_task);
884 queue_task(&priv->post_buckets_task, &tq_immediate);
885 mark_bh(IMMEDIATE_BH);
888 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,41)
889 schedule_delayed_work(&priv->post_buckets_task, 1);
890 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,40)
891 schedule_task(&priv->post_buckets_task);
893 queue_task(&priv->post_buckets_task, &tq_timer);
895 dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
898 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
899 IOC_AND_NETDEV_NAMES_s_s(dev) ));
903 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
905 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
907 struct mpt_lan_priv *priv = dev->priv;
909 skb->protocol = mpt_lan_type_trans(skb, dev);
911 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
912 "delivered to upper level.\n",
913 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
915 priv->stats.rx_bytes += skb->len;
916 priv->stats.rx_packets++;
921 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
922 atomic_read(&priv->buckets_out)));
924 if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
925 mpt_lan_wake_post_buckets_task(dev, 1);
927 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
928 "remaining, %d received back since sod\n",
929 atomic_read(&priv->buckets_out), priv->total_received));
934 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
937 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
939 struct mpt_lan_priv *priv = dev->priv;
940 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
941 struct sk_buff *skb, *old_skb;
945 ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
946 skb = priv->RcvCtl[ctx].skb;
948 len = GET_LAN_PACKET_LENGTH(tmsg);
950 if (len < MPT_LAN_RX_COPYBREAK) {
953 skb = (struct sk_buff *)dev_alloc_skb(len);
955 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
956 IOC_AND_NETDEV_NAMES_s_s(dev),
961 pci_dma_sync_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
962 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
964 memcpy(skb_put(skb, len), old_skb->data, len);
971 priv->RcvCtl[ctx].skb = NULL;
973 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
974 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
977 spin_lock_irqsave(&priv->rxfidx_lock, flags);
978 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
979 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
981 atomic_dec(&priv->buckets_out);
982 priv->total_received++;
984 return mpt_lan_receive_skb(dev, skb);
987 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
989 mpt_lan_receive_post_free(struct net_device *dev,
990 LANReceivePostReply_t *pRecvRep)
992 struct mpt_lan_priv *priv = dev->priv;
993 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1000 count = pRecvRep->NumberOfContexts;
1002 /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
1003 "IOC returned %d buckets, freeing them...\n", count));
1005 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1006 for (i = 0; i < count; i++) {
1007 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1009 skb = priv->RcvCtl[ctx].skb;
1011 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
1012 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1013 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
1014 // priv, &(priv->buckets_out)));
1015 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
1017 priv->RcvCtl[ctx].skb = NULL;
1018 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1019 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1020 dev_kfree_skb_any(skb);
1022 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1024 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1026 atomic_sub(count, &priv->buckets_out);
1028 // for (i = 0; i < priv->max_buckets_out; i++)
1029 // if (priv->RcvCtl[i].skb != NULL)
1030 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
1031 // "is still out\n", i));
1033 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
1036 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
1037 /**/ "remaining, %d received back since sod.\n",
1038 /**/ atomic_read(&priv->buckets_out), priv->total_received));
1042 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1044 mpt_lan_receive_post_reply(struct net_device *dev,
1045 LANReceivePostReply_t *pRecvRep)
1047 struct mpt_lan_priv *priv = dev->priv;
1048 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1049 struct sk_buff *skb, *old_skb;
1050 unsigned long flags;
1051 u32 len, ctx, offset;
1052 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
1056 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
1057 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
1058 le16_to_cpu(pRecvRep->IOCStatus)));
1060 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
1061 MPI_IOCSTATUS_LAN_CANCELED)
1062 return mpt_lan_receive_post_free(dev, pRecvRep);
1064 len = le32_to_cpu(pRecvRep->PacketLength);
1066 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
1067 "ReceivePostReply w/ PacketLength zero!\n",
1068 IOC_AND_NETDEV_NAMES_s_s(dev));
1069 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
1070 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
1074 ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
1075 count = pRecvRep->NumberOfContexts;
1076 skb = priv->RcvCtl[ctx].skb;
1078 offset = le32_to_cpu(pRecvRep->PacketOffset);
1079 // if (offset != 0) {
1080 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1081 // "w/ PacketOffset %u\n",
1082 // IOC_AND_NETDEV_NAMES_s_s(dev),
1086 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1087 IOC_AND_NETDEV_NAMES_s_s(dev),
1093 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1094 // "for single packet, concatenating...\n",
1095 // IOC_AND_NETDEV_NAMES_s_s(dev)));
1097 skb = (struct sk_buff *)dev_alloc_skb(len);
1099 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1100 IOC_AND_NETDEV_NAMES_s_s(dev),
1101 __FILE__, __LINE__);
1105 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1106 for (i = 0; i < count; i++) {
1108 ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1109 old_skb = priv->RcvCtl[ctx].skb;
1111 l = priv->RcvCtl[ctx].len;
1115 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1116 // IOC_AND_NETDEV_NAMES_s_s(dev),
1119 pci_dma_sync_single(mpt_dev->pcidev,
1120 priv->RcvCtl[ctx].dma,
1121 priv->RcvCtl[ctx].len,
1122 PCI_DMA_FROMDEVICE);
1123 memcpy(skb_put(skb, l), old_skb->data, l);
1125 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1128 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1130 } else if (len < MPT_LAN_RX_COPYBREAK) {
1134 skb = (struct sk_buff *)dev_alloc_skb(len);
1136 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1137 IOC_AND_NETDEV_NAMES_s_s(dev),
1138 __FILE__, __LINE__);
1142 pci_dma_sync_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1143 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1145 memcpy(skb_put(skb, len), old_skb->data, len);
1147 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1148 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1149 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1152 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1154 priv->RcvCtl[ctx].skb = NULL;
1156 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1157 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1158 priv->RcvCtl[ctx].dma = 0;
1160 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1161 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1166 atomic_sub(count, &priv->buckets_out);
1167 priv->total_received += count;
1169 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1170 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1171 "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1172 IOC_AND_NETDEV_NAMES_s_s(dev),
1173 priv->mpt_rxfidx_tail,
1174 MPT_LAN_MAX_BUCKETS_OUT);
1176 panic("Damn it Jim! I'm a doctor, not a programmer! "
1177 "Oh, wait a sec, I am a programmer. "
1178 "And, who's Jim?!?!\n"
1179 "Arrgghh! We've done it again!\n");
1183 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1184 "(priv->buckets_out = %d)\n",
1185 IOC_AND_NETDEV_NAMES_s_s(dev),
1186 atomic_read(&priv->buckets_out));
1187 else if (remaining < 10)
1188 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1189 "(priv->buckets_out = %d)\n",
1190 IOC_AND_NETDEV_NAMES_s_s(dev),
1191 remaining, atomic_read(&priv->buckets_out));
1193 if ((remaining < priv->bucketthresh) &&
1194 ((atomic_read(&priv->buckets_out) - remaining) >
1195 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1197 printk (KERN_WARNING MYNAM " Mismatch between driver's "
1198 "buckets_out count and fw's BucketsRemaining "
1199 "count has crossed the threshold, issuing a "
1200 "LanReset to clear the fw's hashtable. You may "
1201 "want to check your /var/log/messages for \"CRC "
1202 "error\" event notifications.\n");
1205 mpt_lan_wake_post_buckets_task(dev, 0);
1208 return mpt_lan_receive_skb(dev, skb);
1211 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1212 /* Simple SGE's only at the moment */
1215 mpt_lan_post_receive_buckets(void *dev_id)
1217 struct net_device *dev = dev_id;
1218 struct mpt_lan_priv *priv = dev->priv;
1219 MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1221 LANReceivePostRequest_t *pRecvReq;
1222 SGETransaction32_t *pTrans;
1223 SGESimple64_t *pSimple;
1224 struct sk_buff *skb;
1226 u32 curr, buckets, count, max;
1227 u32 len = (dev->mtu + dev->hard_header_len + 4);
1228 unsigned long flags;
1231 curr = atomic_read(&priv->buckets_out);
1232 buckets = (priv->max_buckets_out - curr);
1234 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1235 IOC_AND_NETDEV_NAMES_s_s(dev),
1236 __FUNCTION__, buckets, curr));
1238 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1239 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1242 mf = mpt_get_msg_frame(LanCtx, mpt_dev->id);
1244 printk (KERN_ERR "%s: Unable to alloc request frame\n",
1246 dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1247 __FUNCTION__, buckets));
1250 pRecvReq = (LANReceivePostRequest_t *) mf;
1256 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
1257 pRecvReq->ChainOffset = 0;
1258 pRecvReq->MsgFlags = 0;
1259 pRecvReq->PortNumber = priv->pnum;
1261 pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1264 for (i = 0; i < count; i++) {
1267 spin_lock_irqsave(&priv->rxfidx_lock, flags);
1268 if (priv->mpt_rxfidx_tail < 0) {
1269 printk (KERN_ERR "%s: Can't alloc context\n",
1271 spin_unlock_irqrestore(&priv->rxfidx_lock,
1276 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1278 skb = priv->RcvCtl[ctx].skb;
1279 if (skb && (priv->RcvCtl[ctx].len != len)) {
1280 pci_unmap_single(mpt_dev->pcidev,
1281 priv->RcvCtl[ctx].dma,
1282 priv->RcvCtl[ctx].len,
1283 PCI_DMA_FROMDEVICE);
1284 dev_kfree_skb(priv->RcvCtl[ctx].skb);
1285 skb = priv->RcvCtl[ctx].skb = NULL;
1289 skb = dev_alloc_skb(len);
1291 printk (KERN_WARNING
1292 MYNAM "/%s: Can't alloc skb\n",
1294 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1295 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1299 dma = pci_map_single(mpt_dev->pcidev, skb->data,
1300 len, PCI_DMA_FROMDEVICE);
1302 priv->RcvCtl[ctx].skb = skb;
1303 priv->RcvCtl[ctx].dma = dma;
1304 priv->RcvCtl[ctx].len = len;
1307 spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1309 pTrans->ContextSize = sizeof(u32);
1310 pTrans->DetailsLength = 0;
1312 pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1314 pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1316 pSimple->FlagsLength = cpu_to_le32(
1317 ((MPI_SGE_FLAGS_END_OF_BUFFER |
1318 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1319 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1320 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1321 if (sizeof(dma_addr_t) > sizeof(u32))
1322 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1324 pSimple->Address.High = 0;
1326 pTrans = (SGETransaction32_t *) (pSimple + 1);
1329 if (pSimple == NULL) {
1330 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1332 mpt_free_msg_frame(LanCtx, mpt_dev->id, mf);
1336 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1338 pRecvReq->BucketCount = cpu_to_le32(i);
1340 /* printk(KERN_INFO MYNAM ": posting buckets\n ");
1341 * for (i = 0; i < j + 2; i ++)
1342 * printk (" %08x", le32_to_cpu(msg[i]));
1346 mpt_put_msg_frame(LanCtx, mpt_dev->id, mf);
1348 priv->total_posted += i;
1350 atomic_add(i, &priv->buckets_out);
1354 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1355 __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
1356 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1357 __FUNCTION__, priv->total_posted, priv->total_received));
1359 clear_bit(0, &priv->post_buckets_active);
1362 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1364 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1366 struct net_device *dev = NULL;
1367 struct mpt_lan_priv *priv = NULL;
1368 u8 HWaddr[FC_ALEN], *a;
1370 dev = init_fcdev(NULL, sizeof(struct mpt_lan_priv));
1373 dev->mtu = MPT_LAN_MTU;
1375 priv = (struct mpt_lan_priv *) dev->priv;
1377 priv->mpt_dev = mpt_dev;
1380 memset(&priv->post_buckets_task, 0, sizeof(struct mpt_work_struct));
1381 MPT_INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
1382 priv->post_buckets_active = 0;
1384 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1385 __LINE__, dev->mtu + dev->hard_header_len + 4));
1387 atomic_set(&priv->buckets_out, 0);
1388 priv->total_posted = 0;
1389 priv->total_received = 0;
1390 priv->max_buckets_out = max_buckets_out;
1391 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1392 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1394 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1396 mpt_dev->pfacts[0].MaxLanBuckets,
1398 priv->max_buckets_out));
1400 priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1401 priv->txfidx_lock = SPIN_LOCK_UNLOCKED;
1402 priv->rxfidx_lock = SPIN_LOCK_UNLOCKED;
1404 memset(&priv->stats, 0, sizeof(priv->stats));
1406 /* Grab pre-fetched LANPage1 stuff. :-) */
1407 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1416 dev->addr_len = FC_ALEN;
1417 memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1418 memset(dev->broadcast, 0xff, FC_ALEN);
1420 /* The Tx queue is 127 deep on the 909.
1421 * Give ourselves some breathing room.
1423 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1424 tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1426 dev->open = mpt_lan_open;
1427 dev->stop = mpt_lan_close;
1428 dev->get_stats = mpt_lan_get_stats;
1429 dev->set_multicast_list = NULL;
1430 dev->change_mtu = mpt_lan_change_mtu;
1431 dev->hard_start_xmit = mpt_lan_sdu_send;
1433 /* Not in 2.3.42. Need 2.3.45+ */
1434 dev->tx_timeout = mpt_lan_tx_timeout;
1435 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1437 dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1438 "and setting initial values\n"));
1440 SET_MODULE_OWNER(dev);
1445 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1449 struct net_device *dev;
1450 MPT_ADAPTER *curadapter;
1453 show_mptmod_ver(LANAME, LANVER);
1455 #ifdef QLOGIC_NAA_WORKAROUND
1456 /* Init the global r/w lock for the bad_naa list. We want to do this
1457 before any boards are initialized and may be used. */
1458 rwlock_init(&bad_naa_lock);
1461 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1462 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1466 /* Set the callback index to be used by driver core for turbo replies */
1467 mpt_lan_index = LanCtx;
1469 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1471 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset) == 0) {
1472 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1474 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1475 "handler with mptbase! The world is at an end! "
1476 "Everything is fading to black! Goodbye.\n");
1480 for (j = 0; j < MPT_MAX_ADAPTERS; j++) {
1481 mpt_landev[j] = NULL;
1484 curadapter = mpt_adapter_find_first();
1485 while (curadapter != NULL) {
1486 for (i = 0; i < curadapter->facts.NumberOfPorts; i++) {
1487 printk (KERN_INFO MYNAM ": %s: PortNum=%x, ProtocolFlags=%02Xh (%c%c%c%c)\n",
1489 curadapter->pfacts[i].PortNumber,
1490 curadapter->pfacts[i].ProtocolFlags,
1491 MPT_PROTOCOL_FLAGS_c_c_c_c(curadapter->pfacts[i].ProtocolFlags));
1493 if (curadapter->pfacts[i].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
1494 dev = mpt_register_lan_device (curadapter, i);
1496 printk (KERN_INFO MYNAM ": %s: Fusion MPT LAN device registered as '%s'\n",
1497 curadapter->name, dev->name);
1498 printk (KERN_INFO MYNAM ": %s/%s: LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
1499 IOC_AND_NETDEV_NAMES_s_s(dev),
1500 dev->dev_addr[0], dev->dev_addr[1],
1501 dev->dev_addr[2], dev->dev_addr[3],
1502 dev->dev_addr[4], dev->dev_addr[5]);
1503 // printk (KERN_INFO MYNAM ": %s/%s: Max_TX_outstanding = %d\n",
1504 // IOC_AND_NETDEV_NAMES_s_s(dev),
1505 // NETDEV_TO_LANPRIV_PTR(dev)->tx_max_out);
1507 mpt_landev[j] = dev;
1508 dlprintk((KERN_INFO MYNAM "/init: dev_addr=%p, mpt_landev[%d]=%p\n",
1509 dev, j, mpt_landev[j]));
1512 printk (KERN_ERR MYNAM ": %s: Unable to register port%d as a LAN device\n",
1514 curadapter->pfacts[i].PortNumber);
1517 printk (KERN_INFO MYNAM ": %s: Hmmm... LAN protocol seems to be disabled on this adapter port!\n",
1521 curadapter = mpt_adapter_find_next(curadapter);
1527 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1528 void __init mpt_lan_exit(void)
1532 mpt_reset_deregister(LanCtx);
1534 for (i = 0; mpt_landev[i] != NULL; i++) {
1535 struct net_device *dev = mpt_landev[i];
1537 printk (KERN_INFO MYNAM ": %s/%s: Fusion MPT LAN device unregistered\n",
1538 IOC_AND_NETDEV_NAMES_s_s(dev));
1539 unregister_fcdev(dev);
1540 mpt_landev[i] = (struct net_device *) 0xdeadbeef; /* Debug */
1544 mpt_deregister(LanCtx);
1549 /* deregister any send/receive handler structs. I2Oism? */
1552 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1554 MODULE_PARM(tx_max_out_p, "i");
1555 MODULE_PARM(max_buckets_out, "i"); // Debug stuff. FIXME!
1557 module_init(mpt_lan_init);
1558 module_exit(mpt_lan_exit);
1560 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1561 static unsigned short
1562 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1564 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1565 struct fcllc *fcllc;
1567 skb->mac.raw = skb->data;
1568 skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1570 if (fch->dtype == htons(0xffff)) {
1571 u32 *p = (u32 *) fch;
1578 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1579 NETDEV_PTR_TO_IOC_NAME_s(dev));
1580 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
1581 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1582 fch->saddr[3], fch->saddr[4], fch->saddr[5]);
1585 if (*fch->daddr & 1) {
1586 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1587 skb->pkt_type = PACKET_BROADCAST;
1589 skb->pkt_type = PACKET_MULTICAST;
1592 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1593 skb->pkt_type = PACKET_OTHERHOST;
1595 skb->pkt_type = PACKET_HOST;
1599 fcllc = (struct fcllc *)skb->data;
1601 #ifdef QLOGIC_NAA_WORKAROUND
1603 u16 source_naa = fch->stype, found = 0;
1605 /* Workaround for QLogic not following RFC 2625 in regards to the NAA
1608 if ((source_naa & 0xF000) == 0)
1609 source_naa = swab16(source_naa);
1611 if (fcllc->ethertype == htons(ETH_P_ARP))
1612 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
1613 "%04x.\n", source_naa));
1615 if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
1616 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){
1617 struct NAA_Hosed *nh, *prevnh;
1620 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
1621 "system with non-RFC 2625 NAA value (%04x).\n",
1624 write_lock_irq(&bad_naa_lock);
1625 for (prevnh = nh = mpt_bad_naa; nh != NULL;
1626 prevnh=nh, nh=nh->next) {
1627 if ((nh->ieee[0] == fch->saddr[0]) &&
1628 (nh->ieee[1] == fch->saddr[1]) &&
1629 (nh->ieee[2] == fch->saddr[2]) &&
1630 (nh->ieee[3] == fch->saddr[3]) &&
1631 (nh->ieee[4] == fch->saddr[4]) &&
1632 (nh->ieee[5] == fch->saddr[5])) {
1634 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
1635 "q/Rep w/ bad NAA from system already"
1641 if ((!found) && (nh == NULL)) {
1643 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
1644 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
1645 " bad NAA from system not yet in DB.\n"));
1654 nh->NAA = source_naa; /* Set the S_NAA value. */
1655 for (i = 0; i < FC_ALEN; i++)
1656 nh->ieee[i] = fch->saddr[i];
1657 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
1658 "%02x:%02x with non-compliant S_NAA value.\n",
1659 fch->saddr[0], fch->saddr[1], fch->saddr[2],
1660 fch->saddr[3], fch->saddr[4],fch->saddr[5]));
1662 printk (KERN_ERR "mptlan/type_trans: Unable to"
1663 " kmalloc a NAA_Hosed struct.\n");
1665 } else if (!found) {
1666 printk (KERN_ERR "mptlan/type_trans: found not"
1667 " set, but nh isn't null. Evil "
1668 "funkiness abounds.\n");
1670 write_unlock_irq(&bad_naa_lock);
1675 /* Strip the SNAP header from ARP packets since we don't
1676 * pass them through to the 802.2/SNAP layers.
1678 if (fcllc->dsap == EXTENDED_SAP &&
1679 (fcllc->ethertype == htons(ETH_P_IP) ||
1680 fcllc->ethertype == htons(ETH_P_ARP))) {
1681 skb_pull(skb, sizeof(struct fcllc));
1682 return fcllc->ethertype;
1685 return htons(ETH_P_802_2);
1688 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/