xref: /openbmc/linux/drivers/message/fusion/mptlan.c (revision b6dcefde)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16 
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21 
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32 
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 #include <linux/sched.h>
60 
61 #define my_VERSION	MPT_LINUX_VERSION_COMMON
62 #define MYNAM		"mptlan"
63 
64 MODULE_LICENSE("GPL");
65 MODULE_VERSION(my_VERSION);
66 
67 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
68 /*
69  * MPT LAN message sizes without variable part.
70  */
71 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
72 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
73 
74 #define MPT_LAN_TRANSACTION32_SIZE \
75 	(sizeof(SGETransaction32_t) - sizeof(u32))
76 
77 /*
78  *  Fusion MPT LAN private structures
79  */
80 
81 struct BufferControl {
82 	struct sk_buff	*skb;
83 	dma_addr_t	dma;
84 	unsigned int	len;
85 };
86 
87 struct mpt_lan_priv {
88 	MPT_ADAPTER *mpt_dev;
89 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
90 
91 	atomic_t buckets_out;		/* number of unused buckets on IOC */
92 	int bucketthresh;		/* Send more when this many left */
93 
94 	int *mpt_txfidx; /* Free Tx Context list */
95 	int mpt_txfidx_tail;
96 	spinlock_t txfidx_lock;
97 
98 	int *mpt_rxfidx; /* Free Rx Context list */
99 	int mpt_rxfidx_tail;
100 	spinlock_t rxfidx_lock;
101 
102 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
103 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
104 
105 	int max_buckets_out;		/* Max buckets to send to IOC */
106 	int tx_max_out;			/* IOC's Tx queue len */
107 
108 	u32 total_posted;
109 	u32 total_received;
110 
111 	struct delayed_work post_buckets_task;
112 	struct net_device *dev;
113 	unsigned long post_buckets_active;
114 };
115 
116 struct mpt_lan_ohdr {
117 	u16	dtype;
118 	u8	daddr[FC_ALEN];
119 	u16	stype;
120 	u8	saddr[FC_ALEN];
121 };
122 
123 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
124 
125 /*
126  *  Forward protos...
127  */
128 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
129 		       MPT_FRAME_HDR *reply);
130 static int  mpt_lan_open(struct net_device *dev);
131 static int  mpt_lan_reset(struct net_device *dev);
132 static int  mpt_lan_close(struct net_device *dev);
133 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
134 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
135 					   int priority);
136 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
137 static int  mpt_lan_receive_post_reply(struct net_device *dev,
138 				       LANReceivePostReply_t *pRecvRep);
139 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
140 static int  mpt_lan_send_reply(struct net_device *dev,
141 			       LANSendReply_t *pSendRep);
142 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
143 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
144 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
145 					 struct net_device *dev);
146 
147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
148 /*
149  *  Fusion MPT LAN private data
150  */
151 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
152 
153 static u32 max_buckets_out = 127;
154 static u32 tx_max_out_p = 127 - 16;
155 
156 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
157 /**
158  *	lan_reply - Handle all data sent from the hardware.
159  *	@ioc: Pointer to MPT_ADAPTER structure
160  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
161  *	@reply: Pointer to MPT reply frame
162  *
163  *	Returns 1 indicating original alloc'd request frame ptr
164  *	should be freed, or 0 if it shouldn't.
165  */
166 static int
167 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
168 {
169 	struct net_device *dev = ioc->netdev;
170 	int FreeReqFrame = 0;
171 
172 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
173 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
174 
175 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
176 //			mf, reply));
177 
178 	if (mf == NULL) {
179 		u32 tmsg = CAST_PTR_TO_U32(reply);
180 
181 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
182 				IOC_AND_NETDEV_NAMES_s_s(dev),
183 				tmsg));
184 
185 		switch (GET_LAN_FORM(tmsg)) {
186 
187 		// NOTE!  (Optimization) First case here is now caught in
188 		//  mptbase.c::mpt_interrupt() routine and callcack here
189 		//  is now skipped for this case!
190 #if 0
191 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
192 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
193 //				  "MessageContext turbo reply received\n"));
194 			FreeReqFrame = 1;
195 			break;
196 #endif
197 
198 		case LAN_REPLY_FORM_SEND_SINGLE:
199 //			dioprintk((MYNAM "/lan_reply: "
200 //				  "calling mpt_lan_send_reply (turbo)\n"));
201 
202 			// Potential BUG here?
203 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
204 			//  If/when mpt_lan_send_turbo would return 1 here,
205 			//  calling routine (mptbase.c|mpt_interrupt)
206 			//  would Oops because mf has already been set
207 			//  to NULL.  So after return from this func,
208 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
209 			//  item back onto its adapter FreeQ - Oops!:-(
210 			//  It's Ok, since mpt_lan_send_turbo() *currently*
211 			//  always returns 0, but..., just in case:
212 
213 			(void) mpt_lan_send_turbo(dev, tmsg);
214 			FreeReqFrame = 0;
215 
216 			break;
217 
218 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
219 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
220 //				  "rcv-Turbo = %08x\n", tmsg));
221 			mpt_lan_receive_post_turbo(dev, tmsg);
222 			break;
223 
224 		default:
225 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
226 				"that I don't know what to do with\n");
227 
228 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
229 
230 			break;
231 		}
232 
233 		return FreeReqFrame;
234 	}
235 
236 //	msg = (u32 *) reply;
237 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
238 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
239 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
240 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
241 //		  reply->u.hdr.Function));
242 
243 	switch (reply->u.hdr.Function) {
244 
245 	case MPI_FUNCTION_LAN_SEND:
246 	{
247 		LANSendReply_t *pSendRep;
248 
249 		pSendRep = (LANSendReply_t *) reply;
250 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
251 		break;
252 	}
253 
254 	case MPI_FUNCTION_LAN_RECEIVE:
255 	{
256 		LANReceivePostReply_t *pRecvRep;
257 
258 		pRecvRep = (LANReceivePostReply_t *) reply;
259 		if (pRecvRep->NumberOfContexts) {
260 			mpt_lan_receive_post_reply(dev, pRecvRep);
261 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
262 				FreeReqFrame = 1;
263 		} else
264 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
265 				  "ReceivePostReply received.\n"));
266 		break;
267 	}
268 
269 	case MPI_FUNCTION_LAN_RESET:
270 		/* Just a default reply. Might want to check it to
271 		 * make sure that everything went ok.
272 		 */
273 		FreeReqFrame = 1;
274 		break;
275 
276 	case MPI_FUNCTION_EVENT_NOTIFICATION:
277 	case MPI_FUNCTION_EVENT_ACK:
278 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
279 		 *  Should be routed to mpt_lan_event_process(), but just in case...
280 		 */
281 		FreeReqFrame = 1;
282 		break;
283 
284 	default:
285 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
286 			"reply that I don't know what to do with\n");
287 
288 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
289 		FreeReqFrame = 1;
290 
291 		break;
292 	}
293 
294 	return FreeReqFrame;
295 }
296 
297 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
298 static int
299 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
300 {
301 	struct net_device *dev = ioc->netdev;
302 	struct mpt_lan_priv *priv;
303 
304 	if (dev == NULL)
305 		return(1);
306 	else
307 		priv = netdev_priv(dev);
308 
309 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
310 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
311 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
312 
313 	if (priv->mpt_rxfidx == NULL)
314 		return (1);
315 
316 	if (reset_phase == MPT_IOC_SETUP_RESET) {
317 		;
318 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
319 		int i;
320 		unsigned long flags;
321 
322 		netif_stop_queue(dev);
323 
324 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
325 
326 		atomic_set(&priv->buckets_out, 0);
327 
328 		/* Reset Rx Free Tail index and re-populate the queue. */
329 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
330 		priv->mpt_rxfidx_tail = -1;
331 		for (i = 0; i < priv->max_buckets_out; i++)
332 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
333 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
334 	} else {
335 		mpt_lan_post_receive_buckets(priv);
336 		netif_wake_queue(dev);
337 	}
338 
339 	return 1;
340 }
341 
342 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
343 static int
344 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
345 {
346 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
347 
348 	switch (le32_to_cpu(pEvReply->Event)) {
349 	case MPI_EVENT_NONE:				/* 00 */
350 	case MPI_EVENT_LOG_DATA:			/* 01 */
351 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
352 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
353 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
354 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
355 	case MPI_EVENT_RESCAN:				/* 06 */
356 		/* Ok, do we need to do anything here? As far as
357 		   I can tell, this is when a new device gets added
358 		   to the loop. */
359 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
360 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
361 	case MPI_EVENT_LOGOUT:				/* 09 */
362 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
363 	default:
364 		break;
365 	}
366 
367 	/*
368 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
369 	 *  Do NOT do it here now!
370 	 */
371 
372 	return 1;
373 }
374 
375 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
376 static int
377 mpt_lan_open(struct net_device *dev)
378 {
379 	struct mpt_lan_priv *priv = netdev_priv(dev);
380 	int i;
381 
382 	if (mpt_lan_reset(dev) != 0) {
383 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
384 
385 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
386 
387 		if (mpt_dev->active)
388 			printk ("The ioc is active. Perhaps it needs to be"
389 				" reset?\n");
390 		else
391 			printk ("The ioc in inactive, most likely in the "
392 				"process of being reset. Please try again in "
393 				"a moment.\n");
394 	}
395 
396 	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
397 	if (priv->mpt_txfidx == NULL)
398 		goto out;
399 	priv->mpt_txfidx_tail = -1;
400 
401 	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
402 				GFP_KERNEL);
403 	if (priv->SendCtl == NULL)
404 		goto out_mpt_txfidx;
405 	for (i = 0; i < priv->tx_max_out; i++)
406 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
407 
408 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
409 
410 	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
411 				   GFP_KERNEL);
412 	if (priv->mpt_rxfidx == NULL)
413 		goto out_SendCtl;
414 	priv->mpt_rxfidx_tail = -1;
415 
416 	priv->RcvCtl = kcalloc(priv->max_buckets_out,
417 			       sizeof(struct BufferControl),
418 			       GFP_KERNEL);
419 	if (priv->RcvCtl == NULL)
420 		goto out_mpt_rxfidx;
421 	for (i = 0; i < priv->max_buckets_out; i++)
422 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
423 
424 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
425 /**/	for (i = 0; i < priv->tx_max_out; i++)
426 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
427 /**/	dlprintk(("\n"));
428 
429 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
430 
431 	mpt_lan_post_receive_buckets(priv);
432 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
433 			IOC_AND_NETDEV_NAMES_s_s(dev));
434 
435 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
436 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
437 			" Notifications. This is a bad thing! We're not going "
438 			"to go ahead, but I'd be leery of system stability at "
439 			"this point.\n");
440 	}
441 
442 	netif_start_queue(dev);
443 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
444 
445 	return 0;
446 out_mpt_rxfidx:
447 	kfree(priv->mpt_rxfidx);
448 	priv->mpt_rxfidx = NULL;
449 out_SendCtl:
450 	kfree(priv->SendCtl);
451 	priv->SendCtl = NULL;
452 out_mpt_txfidx:
453 	kfree(priv->mpt_txfidx);
454 	priv->mpt_txfidx = NULL;
455 out:	return -ENOMEM;
456 }
457 
458 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
459 /* Send a LanReset message to the FW. This should result in the FW returning
460    any buckets it still has. */
461 static int
462 mpt_lan_reset(struct net_device *dev)
463 {
464 	MPT_FRAME_HDR *mf;
465 	LANResetRequest_t *pResetReq;
466 	struct mpt_lan_priv *priv = netdev_priv(dev);
467 
468 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
469 
470 	if (mf == NULL) {
471 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
472 		"Unable to allocate a request frame.\n"));
473 */
474 		return -1;
475 	}
476 
477 	pResetReq = (LANResetRequest_t *) mf;
478 
479 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
480 	pResetReq->ChainOffset	= 0;
481 	pResetReq->Reserved	= 0;
482 	pResetReq->PortNumber	= priv->pnum;
483 	pResetReq->MsgFlags	= 0;
484 	pResetReq->Reserved2	= 0;
485 
486 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
487 
488 	return 0;
489 }
490 
491 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
492 static int
493 mpt_lan_close(struct net_device *dev)
494 {
495 	struct mpt_lan_priv *priv = netdev_priv(dev);
496 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
497 	unsigned long timeout;
498 	int i;
499 
500 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
501 
502 	mpt_event_deregister(LanCtx);
503 
504 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
505 		  "since driver was loaded, %d still out\n",
506 		  priv->total_posted,atomic_read(&priv->buckets_out)));
507 
508 	netif_stop_queue(dev);
509 
510 	mpt_lan_reset(dev);
511 
512 	timeout = jiffies + 2 * HZ;
513 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
514 		schedule_timeout_interruptible(1);
515 
516 	for (i = 0; i < priv->max_buckets_out; i++) {
517 		if (priv->RcvCtl[i].skb != NULL) {
518 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
519 /**/				  "is still out\n", i));
520 			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
521 					 priv->RcvCtl[i].len,
522 					 PCI_DMA_FROMDEVICE);
523 			dev_kfree_skb(priv->RcvCtl[i].skb);
524 		}
525 	}
526 
527 	kfree(priv->RcvCtl);
528 	kfree(priv->mpt_rxfidx);
529 
530 	for (i = 0; i < priv->tx_max_out; i++) {
531 		if (priv->SendCtl[i].skb != NULL) {
532 			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
533 					 priv->SendCtl[i].len,
534 					 PCI_DMA_TODEVICE);
535 			dev_kfree_skb(priv->SendCtl[i].skb);
536 		}
537 	}
538 
539 	kfree(priv->SendCtl);
540 	kfree(priv->mpt_txfidx);
541 
542 	atomic_set(&priv->buckets_out, 0);
543 
544 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
545 			IOC_AND_NETDEV_NAMES_s_s(dev));
546 
547 	return 0;
548 }
549 
550 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
551 static int
552 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
553 {
554 	if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
555 		return -EINVAL;
556 	dev->mtu = new_mtu;
557 	return 0;
558 }
559 
560 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
561 /* Tx timeout handler. */
562 static void
563 mpt_lan_tx_timeout(struct net_device *dev)
564 {
565 	struct mpt_lan_priv *priv = netdev_priv(dev);
566 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
567 
568 	if (mpt_dev->active) {
569 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
570 		netif_wake_queue(dev);
571 	}
572 }
573 
574 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
575 //static inline int
576 static int
577 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
578 {
579 	struct mpt_lan_priv *priv = netdev_priv(dev);
580 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
581 	struct sk_buff *sent;
582 	unsigned long flags;
583 	u32 ctx;
584 
585 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
586 	sent = priv->SendCtl[ctx].skb;
587 
588 	dev->stats.tx_packets++;
589 	dev->stats.tx_bytes += sent->len;
590 
591 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
592 			IOC_AND_NETDEV_NAMES_s_s(dev),
593 			__func__, sent));
594 
595 	priv->SendCtl[ctx].skb = NULL;
596 	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
597 			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
598 	dev_kfree_skb_irq(sent);
599 
600 	spin_lock_irqsave(&priv->txfidx_lock, flags);
601 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
602 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
603 
604 	netif_wake_queue(dev);
605 	return 0;
606 }
607 
608 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
609 static int
610 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
611 {
612 	struct mpt_lan_priv *priv = netdev_priv(dev);
613 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
614 	struct sk_buff *sent;
615 	unsigned long flags;
616 	int FreeReqFrame = 0;
617 	u32 *pContext;
618 	u32 ctx;
619 	u8 count;
620 
621 	count = pSendRep->NumberOfContexts;
622 
623 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
624 		 le16_to_cpu(pSendRep->IOCStatus)));
625 
626 	/* Add check for Loginfo Flag in IOCStatus */
627 
628 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
629 	case MPI_IOCSTATUS_SUCCESS:
630 		dev->stats.tx_packets += count;
631 		break;
632 
633 	case MPI_IOCSTATUS_LAN_CANCELED:
634 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
635 		break;
636 
637 	case MPI_IOCSTATUS_INVALID_SGL:
638 		dev->stats.tx_errors += count;
639 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
640 				IOC_AND_NETDEV_NAMES_s_s(dev));
641 		goto out;
642 
643 	default:
644 		dev->stats.tx_errors += count;
645 		break;
646 	}
647 
648 	pContext = &pSendRep->BufferContext;
649 
650 	spin_lock_irqsave(&priv->txfidx_lock, flags);
651 	while (count > 0) {
652 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
653 
654 		sent = priv->SendCtl[ctx].skb;
655 		dev->stats.tx_bytes += sent->len;
656 
657 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
658 				IOC_AND_NETDEV_NAMES_s_s(dev),
659 				__func__, sent));
660 
661 		priv->SendCtl[ctx].skb = NULL;
662 		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
663 				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
664 		dev_kfree_skb_irq(sent);
665 
666 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
667 
668 		pContext++;
669 		count--;
670 	}
671 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
672 
673 out:
674 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
675 		FreeReqFrame = 1;
676 
677 	netif_wake_queue(dev);
678 	return FreeReqFrame;
679 }
680 
681 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
682 static int
683 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
684 {
685 	struct mpt_lan_priv *priv = netdev_priv(dev);
686 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
687 	MPT_FRAME_HDR *mf;
688 	LANSendRequest_t *pSendReq;
689 	SGETransaction32_t *pTrans;
690 	SGESimple64_t *pSimple;
691 	const unsigned char *mac;
692 	dma_addr_t dma;
693 	unsigned long flags;
694 	int ctx;
695 	u16 cur_naa = 0x1000;
696 
697 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
698 			__func__, skb));
699 
700 	spin_lock_irqsave(&priv->txfidx_lock, flags);
701 	if (priv->mpt_txfidx_tail < 0) {
702 		netif_stop_queue(dev);
703 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
704 
705 		printk (KERN_ERR "%s: no tx context available: %u\n",
706 			__func__, priv->mpt_txfidx_tail);
707 		return NETDEV_TX_BUSY;
708 	}
709 
710 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
711 	if (mf == NULL) {
712 		netif_stop_queue(dev);
713 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
714 
715 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
716 			__func__);
717 		return NETDEV_TX_BUSY;
718 	}
719 
720 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
721 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
722 
723 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
724 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
725 
726 	pSendReq = (LANSendRequest_t *) mf;
727 
728 	/* Set the mac.raw pointer, since this apparently isn't getting
729 	 * done before we get the skb. Pull the data pointer past the mac data.
730 	 */
731 	skb_reset_mac_header(skb);
732 	skb_pull(skb, 12);
733 
734         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
735 			     PCI_DMA_TODEVICE);
736 
737 	priv->SendCtl[ctx].skb = skb;
738 	priv->SendCtl[ctx].dma = dma;
739 	priv->SendCtl[ctx].len = skb->len;
740 
741 	/* Message Header */
742 	pSendReq->Reserved    = 0;
743 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
744 	pSendReq->ChainOffset = 0;
745 	pSendReq->Reserved2   = 0;
746 	pSendReq->MsgFlags    = 0;
747 	pSendReq->PortNumber  = priv->pnum;
748 
749 	/* Transaction Context Element */
750 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
751 
752 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
753 	pTrans->ContextSize   = sizeof(u32);
754 	pTrans->DetailsLength = 2 * sizeof(u32);
755 	pTrans->Flags         = 0;
756 	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
757 
758 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
759 //			IOC_AND_NETDEV_NAMES_s_s(dev),
760 //			ctx, skb, skb->data));
761 
762 	mac = skb_mac_header(skb);
763 
764 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
765 						    (mac[0] <<  8) |
766 						    (mac[1] <<  0));
767 	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
768 						    (mac[3] << 16) |
769 						    (mac[4] <<  8) |
770 						    (mac[5] <<  0));
771 
772 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
773 
774 	/* If we ever decide to send more than one Simple SGE per LANSend, then
775 	   we will need to make sure that LAST_ELEMENT only gets set on the
776 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
777 	pSimple->FlagsLength = cpu_to_le32(
778 			((MPI_SGE_FLAGS_LAST_ELEMENT |
779 			  MPI_SGE_FLAGS_END_OF_BUFFER |
780 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
781 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
782 			  MPI_SGE_FLAGS_HOST_TO_IOC |
783 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
784 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
785 			skb->len);
786 	pSimple->Address.Low = cpu_to_le32((u32) dma);
787 	if (sizeof(dma_addr_t) > sizeof(u32))
788 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
789 	else
790 		pSimple->Address.High = 0;
791 
792 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
793 	dev->trans_start = jiffies;
794 
795 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
796 			IOC_AND_NETDEV_NAMES_s_s(dev),
797 			le32_to_cpu(pSimple->FlagsLength)));
798 
799 	return NETDEV_TX_OK;
800 }
801 
802 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
803 static void
804 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
805 /*
806  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
807  */
808 {
809 	struct mpt_lan_priv *priv = netdev_priv(dev);
810 
811 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
812 		if (priority) {
813 			schedule_delayed_work(&priv->post_buckets_task, 0);
814 		} else {
815 			schedule_delayed_work(&priv->post_buckets_task, 1);
816 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
817 				   "timer.\n"));
818 		}
819 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
820 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
821 	}
822 }
823 
824 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
825 static int
826 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
827 {
828 	struct mpt_lan_priv *priv = netdev_priv(dev);
829 
830 	skb->protocol = mpt_lan_type_trans(skb, dev);
831 
832 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
833 		 "delivered to upper level.\n",
834 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
835 
836 	dev->stats.rx_bytes += skb->len;
837 	dev->stats.rx_packets++;
838 
839 	skb->dev = dev;
840 	netif_rx(skb);
841 
842 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
843 		 atomic_read(&priv->buckets_out)));
844 
845 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
846 		mpt_lan_wake_post_buckets_task(dev, 1);
847 
848 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
849 		  "remaining, %d received back since sod\n",
850 		  atomic_read(&priv->buckets_out), priv->total_received));
851 
852 	return 0;
853 }
854 
855 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
856 //static inline int
857 static int
858 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
859 {
860 	struct mpt_lan_priv *priv = netdev_priv(dev);
861 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
862 	struct sk_buff *skb, *old_skb;
863 	unsigned long flags;
864 	u32 ctx, len;
865 
866 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
867 	skb = priv->RcvCtl[ctx].skb;
868 
869 	len = GET_LAN_PACKET_LENGTH(tmsg);
870 
871 	if (len < MPT_LAN_RX_COPYBREAK) {
872 		old_skb = skb;
873 
874 		skb = (struct sk_buff *)dev_alloc_skb(len);
875 		if (!skb) {
876 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
877 					IOC_AND_NETDEV_NAMES_s_s(dev),
878 					__FILE__, __LINE__);
879 			return -ENOMEM;
880 		}
881 
882 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
883 					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
884 
885 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
886 
887 		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
888 					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
889 		goto out;
890 	}
891 
892 	skb_put(skb, len);
893 
894 	priv->RcvCtl[ctx].skb = NULL;
895 
896 	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
897 			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
898 
899 out:
900 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
901 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
902 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
903 
904 	atomic_dec(&priv->buckets_out);
905 	priv->total_received++;
906 
907 	return mpt_lan_receive_skb(dev, skb);
908 }
909 
910 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
911 static int
912 mpt_lan_receive_post_free(struct net_device *dev,
913 			  LANReceivePostReply_t *pRecvRep)
914 {
915 	struct mpt_lan_priv *priv = netdev_priv(dev);
916 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
917 	unsigned long flags;
918 	struct sk_buff *skb;
919 	u32 ctx;
920 	int count;
921 	int i;
922 
923 	count = pRecvRep->NumberOfContexts;
924 
925 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
926 		  "IOC returned %d buckets, freeing them...\n", count));
927 
928 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
929 	for (i = 0; i < count; i++) {
930 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
931 
932 		skb = priv->RcvCtl[ctx].skb;
933 
934 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
935 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
936 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
937 //				priv, &(priv->buckets_out)));
938 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
939 
940 		priv->RcvCtl[ctx].skb = NULL;
941 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
942 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
943 		dev_kfree_skb_any(skb);
944 
945 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
946 	}
947 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
948 
949 	atomic_sub(count, &priv->buckets_out);
950 
951 //	for (i = 0; i < priv->max_buckets_out; i++)
952 //		if (priv->RcvCtl[i].skb != NULL)
953 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
954 //				  "is still out\n", i));
955 
956 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
957 		  count));
958 */
959 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
960 /**/		  "remaining, %d received back since sod.\n",
961 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
962 	return 0;
963 }
964 
965 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
966 static int
967 mpt_lan_receive_post_reply(struct net_device *dev,
968 			   LANReceivePostReply_t *pRecvRep)
969 {
970 	struct mpt_lan_priv *priv = netdev_priv(dev);
971 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
972 	struct sk_buff *skb, *old_skb;
973 	unsigned long flags;
974 	u32 len, ctx, offset;
975 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
976 	int count;
977 	int i, l;
978 
979 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
980 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
981 		 le16_to_cpu(pRecvRep->IOCStatus)));
982 
983 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
984 						MPI_IOCSTATUS_LAN_CANCELED)
985 		return mpt_lan_receive_post_free(dev, pRecvRep);
986 
987 	len = le32_to_cpu(pRecvRep->PacketLength);
988 	if (len == 0) {
989 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
990 			"ReceivePostReply w/ PacketLength zero!\n",
991 				IOC_AND_NETDEV_NAMES_s_s(dev));
992 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
993 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
994 		return -1;
995 	}
996 
997 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
998 	count  = pRecvRep->NumberOfContexts;
999 	skb    = priv->RcvCtl[ctx].skb;
1000 
1001 	offset = le32_to_cpu(pRecvRep->PacketOffset);
1002 //	if (offset != 0) {
1003 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1004 //			"w/ PacketOffset %u\n",
1005 //				IOC_AND_NETDEV_NAMES_s_s(dev),
1006 //				offset);
1007 //	}
1008 
1009 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1010 			IOC_AND_NETDEV_NAMES_s_s(dev),
1011 			offset, len));
1012 
1013 	if (count > 1) {
1014 		int szrem = len;
1015 
1016 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1017 //			"for single packet, concatenating...\n",
1018 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1019 
1020 		skb = (struct sk_buff *)dev_alloc_skb(len);
1021 		if (!skb) {
1022 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1023 					IOC_AND_NETDEV_NAMES_s_s(dev),
1024 					__FILE__, __LINE__);
1025 			return -ENOMEM;
1026 		}
1027 
1028 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1029 		for (i = 0; i < count; i++) {
1030 
1031 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1032 			old_skb = priv->RcvCtl[ctx].skb;
1033 
1034 			l = priv->RcvCtl[ctx].len;
1035 			if (szrem < l)
1036 				l = szrem;
1037 
1038 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1039 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1040 //					i, l));
1041 
1042 			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1043 						    priv->RcvCtl[ctx].dma,
1044 						    priv->RcvCtl[ctx].len,
1045 						    PCI_DMA_FROMDEVICE);
1046 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1047 
1048 			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1049 						       priv->RcvCtl[ctx].dma,
1050 						       priv->RcvCtl[ctx].len,
1051 						       PCI_DMA_FROMDEVICE);
1052 
1053 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1054 			szrem -= l;
1055 		}
1056 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1057 
1058 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1059 
1060 		old_skb = skb;
1061 
1062 		skb = (struct sk_buff *)dev_alloc_skb(len);
1063 		if (!skb) {
1064 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1065 					IOC_AND_NETDEV_NAMES_s_s(dev),
1066 					__FILE__, __LINE__);
1067 			return -ENOMEM;
1068 		}
1069 
1070 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1071 					    priv->RcvCtl[ctx].dma,
1072 					    priv->RcvCtl[ctx].len,
1073 					    PCI_DMA_FROMDEVICE);
1074 
1075 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1076 
1077 		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1078 					       priv->RcvCtl[ctx].dma,
1079 					       priv->RcvCtl[ctx].len,
1080 					       PCI_DMA_FROMDEVICE);
1081 
1082 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1083 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1084 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1085 
1086 	} else {
1087 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1088 
1089 		priv->RcvCtl[ctx].skb = NULL;
1090 
1091 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1092 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1093 		priv->RcvCtl[ctx].dma = 0;
1094 
1095 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1096 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1097 
1098 		skb_put(skb,len);
1099 	}
1100 
1101 	atomic_sub(count, &priv->buckets_out);
1102 	priv->total_received += count;
1103 
1104 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1105 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1106 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1107 				IOC_AND_NETDEV_NAMES_s_s(dev),
1108 				priv->mpt_rxfidx_tail,
1109 				MPT_LAN_MAX_BUCKETS_OUT);
1110 
1111 		return -1;
1112 	}
1113 
1114 	if (remaining == 0)
1115 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1116 			"(priv->buckets_out = %d)\n",
1117 			IOC_AND_NETDEV_NAMES_s_s(dev),
1118 			atomic_read(&priv->buckets_out));
1119 	else if (remaining < 10)
1120 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1121 			"(priv->buckets_out = %d)\n",
1122 			IOC_AND_NETDEV_NAMES_s_s(dev),
1123 			remaining, atomic_read(&priv->buckets_out));
1124 
1125 	if ((remaining < priv->bucketthresh) &&
1126 	    ((atomic_read(&priv->buckets_out) - remaining) >
1127 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1128 
1129 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1130 			"buckets_out count and fw's BucketsRemaining "
1131 			"count has crossed the threshold, issuing a "
1132 			"LanReset to clear the fw's hashtable. You may "
1133 			"want to check your /var/log/messages for \"CRC "
1134 			"error\" event notifications.\n");
1135 
1136 		mpt_lan_reset(dev);
1137 		mpt_lan_wake_post_buckets_task(dev, 0);
1138 	}
1139 
1140 	return mpt_lan_receive_skb(dev, skb);
1141 }
1142 
1143 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1144 /* Simple SGE's only at the moment */
1145 
1146 static void
1147 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1148 {
1149 	struct net_device *dev = priv->dev;
1150 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1151 	MPT_FRAME_HDR *mf;
1152 	LANReceivePostRequest_t *pRecvReq;
1153 	SGETransaction32_t *pTrans;
1154 	SGESimple64_t *pSimple;
1155 	struct sk_buff *skb;
1156 	dma_addr_t dma;
1157 	u32 curr, buckets, count, max;
1158 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1159 	unsigned long flags;
1160 	int i;
1161 
1162 	curr = atomic_read(&priv->buckets_out);
1163 	buckets = (priv->max_buckets_out - curr);
1164 
1165 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1166 			IOC_AND_NETDEV_NAMES_s_s(dev),
1167 			__func__, buckets, curr));
1168 
1169 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1170 			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1171 
1172 	while (buckets) {
1173 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1174 		if (mf == NULL) {
1175 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1176 				__func__);
1177 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1178 				 __func__, buckets));
1179 			goto out;
1180 		}
1181 		pRecvReq = (LANReceivePostRequest_t *) mf;
1182 
1183 		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1184 		mpt_dev->RequestNB[i] = 0;
1185 		count = buckets;
1186 		if (count > max)
1187 			count = max;
1188 
1189 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1190 		pRecvReq->ChainOffset = 0;
1191 		pRecvReq->MsgFlags    = 0;
1192 		pRecvReq->PortNumber  = priv->pnum;
1193 
1194 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1195 		pSimple = NULL;
1196 
1197 		for (i = 0; i < count; i++) {
1198 			int ctx;
1199 
1200 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1201 			if (priv->mpt_rxfidx_tail < 0) {
1202 				printk (KERN_ERR "%s: Can't alloc context\n",
1203 					__func__);
1204 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1205 						       flags);
1206 				break;
1207 			}
1208 
1209 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1210 
1211 			skb = priv->RcvCtl[ctx].skb;
1212 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1213 				pci_unmap_single(mpt_dev->pcidev,
1214 						 priv->RcvCtl[ctx].dma,
1215 						 priv->RcvCtl[ctx].len,
1216 						 PCI_DMA_FROMDEVICE);
1217 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1218 				skb = priv->RcvCtl[ctx].skb = NULL;
1219 			}
1220 
1221 			if (skb == NULL) {
1222 				skb = dev_alloc_skb(len);
1223 				if (skb == NULL) {
1224 					printk (KERN_WARNING
1225 						MYNAM "/%s: Can't alloc skb\n",
1226 						__func__);
1227 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1228 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1229 					break;
1230 				}
1231 
1232 				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1233 						     len, PCI_DMA_FROMDEVICE);
1234 
1235 				priv->RcvCtl[ctx].skb = skb;
1236 				priv->RcvCtl[ctx].dma = dma;
1237 				priv->RcvCtl[ctx].len = len;
1238 			}
1239 
1240 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1241 
1242 			pTrans->ContextSize   = sizeof(u32);
1243 			pTrans->DetailsLength = 0;
1244 			pTrans->Flags         = 0;
1245 			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1246 
1247 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1248 
1249 			pSimple->FlagsLength = cpu_to_le32(
1250 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1251 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1252 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1253 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1254 			if (sizeof(dma_addr_t) > sizeof(u32))
1255 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1256 			else
1257 				pSimple->Address.High = 0;
1258 
1259 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1260 		}
1261 
1262 		if (pSimple == NULL) {
1263 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1264 /**/				__func__);
1265 			mpt_free_msg_frame(mpt_dev, mf);
1266 			goto out;
1267 		}
1268 
1269 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1270 
1271 		pRecvReq->BucketCount = cpu_to_le32(i);
1272 
1273 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1274  *	for (i = 0; i < j + 2; i ++)
1275  *	    printk (" %08x", le32_to_cpu(msg[i]));
1276  *	printk ("\n");
1277  */
1278 
1279 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1280 
1281 		priv->total_posted += i;
1282 		buckets -= i;
1283 		atomic_add(i, &priv->buckets_out);
1284 	}
1285 
1286 out:
1287 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1288 		  __func__, buckets, atomic_read(&priv->buckets_out)));
1289 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1290 	__func__, priv->total_posted, priv->total_received));
1291 
1292 	clear_bit(0, &priv->post_buckets_active);
1293 }
1294 
1295 static void
1296 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1297 {
1298 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1299 						  post_buckets_task.work));
1300 }
1301 
1302 static const struct net_device_ops mpt_netdev_ops = {
1303 	.ndo_open       = mpt_lan_open,
1304 	.ndo_stop       = mpt_lan_close,
1305 	.ndo_start_xmit = mpt_lan_sdu_send,
1306 	.ndo_change_mtu = mpt_lan_change_mtu,
1307 	.ndo_tx_timeout = mpt_lan_tx_timeout,
1308 };
1309 
1310 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1311 static struct net_device *
1312 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1313 {
1314 	struct net_device *dev;
1315 	struct mpt_lan_priv *priv;
1316 	u8 HWaddr[FC_ALEN], *a;
1317 
1318 	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1319 	if (!dev)
1320 		return NULL;
1321 
1322 	dev->mtu = MPT_LAN_MTU;
1323 
1324 	priv = netdev_priv(dev);
1325 
1326 	priv->dev = dev;
1327 	priv->mpt_dev = mpt_dev;
1328 	priv->pnum = pnum;
1329 
1330 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1331 			  mpt_lan_post_receive_buckets_work);
1332 	priv->post_buckets_active = 0;
1333 
1334 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1335 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1336 
1337 	atomic_set(&priv->buckets_out, 0);
1338 	priv->total_posted = 0;
1339 	priv->total_received = 0;
1340 	priv->max_buckets_out = max_buckets_out;
1341 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1342 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1343 
1344 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1345 			__LINE__,
1346 			mpt_dev->pfacts[0].MaxLanBuckets,
1347 			max_buckets_out,
1348 			priv->max_buckets_out));
1349 
1350 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1351 	spin_lock_init(&priv->txfidx_lock);
1352 	spin_lock_init(&priv->rxfidx_lock);
1353 
1354 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1355 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1356 
1357 	HWaddr[0] = a[5];
1358 	HWaddr[1] = a[4];
1359 	HWaddr[2] = a[3];
1360 	HWaddr[3] = a[2];
1361 	HWaddr[4] = a[1];
1362 	HWaddr[5] = a[0];
1363 
1364 	dev->addr_len = FC_ALEN;
1365 	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1366 	memset(dev->broadcast, 0xff, FC_ALEN);
1367 
1368 	/* The Tx queue is 127 deep on the 909.
1369 	 * Give ourselves some breathing room.
1370 	 */
1371 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1372 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1373 
1374 	dev->netdev_ops = &mpt_netdev_ops;
1375 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1376 
1377 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1378 		"and setting initial values\n"));
1379 
1380 	if (register_netdev(dev) != 0) {
1381 		free_netdev(dev);
1382 		dev = NULL;
1383 	}
1384 	return dev;
1385 }
1386 
1387 static int
1388 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1389 {
1390 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1391 	struct net_device	*dev;
1392 	int			i;
1393 
1394 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1395 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1396 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1397 		       ioc->name, ioc->pfacts[i].PortNumber,
1398 		       ioc->pfacts[i].ProtocolFlags,
1399 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1400 			       ioc->pfacts[i].ProtocolFlags));
1401 
1402 		if (!(ioc->pfacts[i].ProtocolFlags &
1403 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1404 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1405 			       "seems to be disabled on this adapter port!\n",
1406 			       ioc->name);
1407 			continue;
1408 		}
1409 
1410 		dev = mpt_register_lan_device(ioc, i);
1411 		if (!dev) {
1412 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1413 			       "port%d as a LAN device\n", ioc->name,
1414 			       ioc->pfacts[i].PortNumber);
1415 			continue;
1416 		}
1417 
1418 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1419 		       "registered as '%s'\n", ioc->name, dev->name);
1420 		printk(KERN_INFO MYNAM ": %s/%s: "
1421 		       "LanAddr = %pM\n",
1422 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1423 		       dev->dev_addr);
1424 
1425 		ioc->netdev = dev;
1426 
1427 		return 0;
1428 	}
1429 
1430 	return -ENODEV;
1431 }
1432 
1433 static void
1434 mptlan_remove(struct pci_dev *pdev)
1435 {
1436 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1437 	struct net_device	*dev = ioc->netdev;
1438 
1439 	if(dev != NULL) {
1440 		unregister_netdev(dev);
1441 		free_netdev(dev);
1442 	}
1443 }
1444 
1445 static struct mpt_pci_driver mptlan_driver = {
1446 	.probe		= mptlan_probe,
1447 	.remove		= mptlan_remove,
1448 };
1449 
1450 static int __init mpt_lan_init (void)
1451 {
1452 	show_mptmod_ver(LANAME, LANVER);
1453 
1454 	if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1455 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1456 		return -EBUSY;
1457 	}
1458 
1459 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1460 
1461 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1462 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1463 		       "handler with mptbase! The world is at an end! "
1464 		       "Everything is fading to black! Goodbye.\n");
1465 		return -EBUSY;
1466 	}
1467 
1468 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1469 
1470 	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1471 	return 0;
1472 }
1473 
1474 static void __exit mpt_lan_exit(void)
1475 {
1476 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1477 	mpt_reset_deregister(LanCtx);
1478 
1479 	if (LanCtx) {
1480 		mpt_deregister(LanCtx);
1481 		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1482 	}
1483 }
1484 
1485 module_init(mpt_lan_init);
1486 module_exit(mpt_lan_exit);
1487 
1488 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1489 static unsigned short
1490 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1491 {
1492 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1493 	struct fcllc *fcllc;
1494 
1495 	skb_reset_mac_header(skb);
1496 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1497 
1498 	if (fch->dtype == htons(0xffff)) {
1499 		u32 *p = (u32 *) fch;
1500 
1501 		swab32s(p + 0);
1502 		swab32s(p + 1);
1503 		swab32s(p + 2);
1504 		swab32s(p + 3);
1505 
1506 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1507 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1508 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1509 				fch->saddr);
1510 	}
1511 
1512 	if (*fch->daddr & 1) {
1513 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1514 			skb->pkt_type = PACKET_BROADCAST;
1515 		} else {
1516 			skb->pkt_type = PACKET_MULTICAST;
1517 		}
1518 	} else {
1519 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1520 			skb->pkt_type = PACKET_OTHERHOST;
1521 		} else {
1522 			skb->pkt_type = PACKET_HOST;
1523 		}
1524 	}
1525 
1526 	fcllc = (struct fcllc *)skb->data;
1527 
1528 	/* Strip the SNAP header from ARP packets since we don't
1529 	 * pass them through to the 802.2/SNAP layers.
1530 	 */
1531 	if (fcllc->dsap == EXTENDED_SAP &&
1532 		(fcllc->ethertype == htons(ETH_P_IP) ||
1533 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1534 		skb_pull(skb, sizeof(struct fcllc));
1535 		return fcllc->ethertype;
1536 	}
1537 
1538 	return htons(ETH_P_802_2);
1539 }
1540 
1541 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1542