xref: /openbmc/linux/drivers/message/fusion/mptlan.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16 
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21 
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32 
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 
60 #define my_VERSION	MPT_LINUX_VERSION_COMMON
61 #define MYNAM		"mptlan"
62 
63 MODULE_LICENSE("GPL");
64 MODULE_VERSION(my_VERSION);
65 
66 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
67 /*
68  * MPT LAN message sizes without variable part.
69  */
70 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
71 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
72 
73 #define MPT_LAN_TRANSACTION32_SIZE \
74 	(sizeof(SGETransaction32_t) - sizeof(u32))
75 
76 /*
77  *  Fusion MPT LAN private structures
78  */
79 
80 struct BufferControl {
81 	struct sk_buff	*skb;
82 	dma_addr_t	dma;
83 	unsigned int	len;
84 };
85 
86 struct mpt_lan_priv {
87 	MPT_ADAPTER *mpt_dev;
88 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
89 
90 	atomic_t buckets_out;		/* number of unused buckets on IOC */
91 	int bucketthresh;		/* Send more when this many left */
92 
93 	int *mpt_txfidx; /* Free Tx Context list */
94 	int mpt_txfidx_tail;
95 	spinlock_t txfidx_lock;
96 
97 	int *mpt_rxfidx; /* Free Rx Context list */
98 	int mpt_rxfidx_tail;
99 	spinlock_t rxfidx_lock;
100 
101 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
102 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
103 
104 	int max_buckets_out;		/* Max buckets to send to IOC */
105 	int tx_max_out;			/* IOC's Tx queue len */
106 
107 	u32 total_posted;
108 	u32 total_received;
109 
110 	struct delayed_work post_buckets_task;
111 	struct net_device *dev;
112 	unsigned long post_buckets_active;
113 };
114 
115 struct mpt_lan_ohdr {
116 	u16	dtype;
117 	u8	daddr[FC_ALEN];
118 	u16	stype;
119 	u8	saddr[FC_ALEN];
120 };
121 
122 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
123 
124 /*
125  *  Forward protos...
126  */
127 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
128 		       MPT_FRAME_HDR *reply);
129 static int  mpt_lan_open(struct net_device *dev);
130 static int  mpt_lan_reset(struct net_device *dev);
131 static int  mpt_lan_close(struct net_device *dev);
132 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
133 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
134 					   int priority);
135 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
136 static int  mpt_lan_receive_post_reply(struct net_device *dev,
137 				       LANReceivePostReply_t *pRecvRep);
138 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
139 static int  mpt_lan_send_reply(struct net_device *dev,
140 			       LANSendReply_t *pSendRep);
141 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
142 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
143 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
144 					 struct net_device *dev);
145 
146 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
147 /*
148  *  Fusion MPT LAN private data
149  */
150 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
151 
152 static u32 max_buckets_out = 127;
153 static u32 tx_max_out_p = 127 - 16;
154 
155 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
156 /**
157  *	lan_reply - Handle all data sent from the hardware.
158  *	@ioc: Pointer to MPT_ADAPTER structure
159  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
160  *	@reply: Pointer to MPT reply frame
161  *
162  *	Returns 1 indicating original alloc'd request frame ptr
163  *	should be freed, or 0 if it shouldn't.
164  */
165 static int
166 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
167 {
168 	struct net_device *dev = ioc->netdev;
169 	int FreeReqFrame = 0;
170 
171 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
172 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
173 
174 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
175 //			mf, reply));
176 
177 	if (mf == NULL) {
178 		u32 tmsg = CAST_PTR_TO_U32(reply);
179 
180 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
181 				IOC_AND_NETDEV_NAMES_s_s(dev),
182 				tmsg));
183 
184 		switch (GET_LAN_FORM(tmsg)) {
185 
186 		// NOTE!  (Optimization) First case here is now caught in
187 		//  mptbase.c::mpt_interrupt() routine and callcack here
188 		//  is now skipped for this case!
189 #if 0
190 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
191 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
192 //				  "MessageContext turbo reply received\n"));
193 			FreeReqFrame = 1;
194 			break;
195 #endif
196 
197 		case LAN_REPLY_FORM_SEND_SINGLE:
198 //			dioprintk((MYNAM "/lan_reply: "
199 //				  "calling mpt_lan_send_reply (turbo)\n"));
200 
201 			// Potential BUG here?
202 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
203 			//  If/when mpt_lan_send_turbo would return 1 here,
204 			//  calling routine (mptbase.c|mpt_interrupt)
205 			//  would Oops because mf has already been set
206 			//  to NULL.  So after return from this func,
207 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
208 			//  item back onto its adapter FreeQ - Oops!:-(
209 			//  It's Ok, since mpt_lan_send_turbo() *currently*
210 			//  always returns 0, but..., just in case:
211 
212 			(void) mpt_lan_send_turbo(dev, tmsg);
213 			FreeReqFrame = 0;
214 
215 			break;
216 
217 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
218 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
219 //				  "rcv-Turbo = %08x\n", tmsg));
220 			mpt_lan_receive_post_turbo(dev, tmsg);
221 			break;
222 
223 		default:
224 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
225 				"that I don't know what to do with\n");
226 
227 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
228 
229 			break;
230 		}
231 
232 		return FreeReqFrame;
233 	}
234 
235 //	msg = (u32 *) reply;
236 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
237 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
238 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
239 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
240 //		  reply->u.hdr.Function));
241 
242 	switch (reply->u.hdr.Function) {
243 
244 	case MPI_FUNCTION_LAN_SEND:
245 	{
246 		LANSendReply_t *pSendRep;
247 
248 		pSendRep = (LANSendReply_t *) reply;
249 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
250 		break;
251 	}
252 
253 	case MPI_FUNCTION_LAN_RECEIVE:
254 	{
255 		LANReceivePostReply_t *pRecvRep;
256 
257 		pRecvRep = (LANReceivePostReply_t *) reply;
258 		if (pRecvRep->NumberOfContexts) {
259 			mpt_lan_receive_post_reply(dev, pRecvRep);
260 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
261 				FreeReqFrame = 1;
262 		} else
263 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
264 				  "ReceivePostReply received.\n"));
265 		break;
266 	}
267 
268 	case MPI_FUNCTION_LAN_RESET:
269 		/* Just a default reply. Might want to check it to
270 		 * make sure that everything went ok.
271 		 */
272 		FreeReqFrame = 1;
273 		break;
274 
275 	case MPI_FUNCTION_EVENT_NOTIFICATION:
276 	case MPI_FUNCTION_EVENT_ACK:
277 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
278 		 *  Should be routed to mpt_lan_event_process(), but just in case...
279 		 */
280 		FreeReqFrame = 1;
281 		break;
282 
283 	default:
284 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
285 			"reply that I don't know what to do with\n");
286 
287 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
288 		FreeReqFrame = 1;
289 
290 		break;
291 	}
292 
293 	return FreeReqFrame;
294 }
295 
296 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
297 static int
298 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
299 {
300 	struct net_device *dev = ioc->netdev;
301 	struct mpt_lan_priv *priv;
302 
303 	if (dev == NULL)
304 		return(1);
305 	else
306 		priv = netdev_priv(dev);
307 
308 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
309 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
310 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
311 
312 	if (priv->mpt_rxfidx == NULL)
313 		return (1);
314 
315 	if (reset_phase == MPT_IOC_SETUP_RESET) {
316 		;
317 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
318 		int i;
319 		unsigned long flags;
320 
321 		netif_stop_queue(dev);
322 
323 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
324 
325 		atomic_set(&priv->buckets_out, 0);
326 
327 		/* Reset Rx Free Tail index and re-populate the queue. */
328 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
329 		priv->mpt_rxfidx_tail = -1;
330 		for (i = 0; i < priv->max_buckets_out; i++)
331 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
332 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
333 	} else {
334 		mpt_lan_post_receive_buckets(priv);
335 		netif_wake_queue(dev);
336 	}
337 
338 	return 1;
339 }
340 
341 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
342 static int
343 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
344 {
345 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
346 
347 	switch (le32_to_cpu(pEvReply->Event)) {
348 	case MPI_EVENT_NONE:				/* 00 */
349 	case MPI_EVENT_LOG_DATA:			/* 01 */
350 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
351 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
352 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
353 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
354 	case MPI_EVENT_RESCAN:				/* 06 */
355 		/* Ok, do we need to do anything here? As far as
356 		   I can tell, this is when a new device gets added
357 		   to the loop. */
358 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
359 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
360 	case MPI_EVENT_LOGOUT:				/* 09 */
361 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
362 	default:
363 		break;
364 	}
365 
366 	/*
367 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
368 	 *  Do NOT do it here now!
369 	 */
370 
371 	return 1;
372 }
373 
374 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
375 static int
376 mpt_lan_open(struct net_device *dev)
377 {
378 	struct mpt_lan_priv *priv = netdev_priv(dev);
379 	int i;
380 
381 	if (mpt_lan_reset(dev) != 0) {
382 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
383 
384 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
385 
386 		if (mpt_dev->active)
387 			printk ("The ioc is active. Perhaps it needs to be"
388 				" reset?\n");
389 		else
390 			printk ("The ioc in inactive, most likely in the "
391 				"process of being reset. Please try again in "
392 				"a moment.\n");
393 	}
394 
395 	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
396 	if (priv->mpt_txfidx == NULL)
397 		goto out;
398 	priv->mpt_txfidx_tail = -1;
399 
400 	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
401 				GFP_KERNEL);
402 	if (priv->SendCtl == NULL)
403 		goto out_mpt_txfidx;
404 	for (i = 0; i < priv->tx_max_out; i++)
405 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
406 
407 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
408 
409 	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
410 				   GFP_KERNEL);
411 	if (priv->mpt_rxfidx == NULL)
412 		goto out_SendCtl;
413 	priv->mpt_rxfidx_tail = -1;
414 
415 	priv->RcvCtl = kcalloc(priv->max_buckets_out,
416 			       sizeof(struct BufferControl),
417 			       GFP_KERNEL);
418 	if (priv->RcvCtl == NULL)
419 		goto out_mpt_rxfidx;
420 	for (i = 0; i < priv->max_buckets_out; i++)
421 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
422 
423 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
424 /**/	for (i = 0; i < priv->tx_max_out; i++)
425 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
426 /**/	dlprintk(("\n"));
427 
428 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
429 
430 	mpt_lan_post_receive_buckets(priv);
431 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
432 			IOC_AND_NETDEV_NAMES_s_s(dev));
433 
434 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
435 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
436 			" Notifications. This is a bad thing! We're not going "
437 			"to go ahead, but I'd be leery of system stability at "
438 			"this point.\n");
439 	}
440 
441 	netif_start_queue(dev);
442 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
443 
444 	return 0;
445 out_mpt_rxfidx:
446 	kfree(priv->mpt_rxfidx);
447 	priv->mpt_rxfidx = NULL;
448 out_SendCtl:
449 	kfree(priv->SendCtl);
450 	priv->SendCtl = NULL;
451 out_mpt_txfidx:
452 	kfree(priv->mpt_txfidx);
453 	priv->mpt_txfidx = NULL;
454 out:	return -ENOMEM;
455 }
456 
457 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
458 /* Send a LanReset message to the FW. This should result in the FW returning
459    any buckets it still has. */
460 static int
461 mpt_lan_reset(struct net_device *dev)
462 {
463 	MPT_FRAME_HDR *mf;
464 	LANResetRequest_t *pResetReq;
465 	struct mpt_lan_priv *priv = netdev_priv(dev);
466 
467 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
468 
469 	if (mf == NULL) {
470 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
471 		"Unable to allocate a request frame.\n"));
472 */
473 		return -1;
474 	}
475 
476 	pResetReq = (LANResetRequest_t *) mf;
477 
478 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
479 	pResetReq->ChainOffset	= 0;
480 	pResetReq->Reserved	= 0;
481 	pResetReq->PortNumber	= priv->pnum;
482 	pResetReq->MsgFlags	= 0;
483 	pResetReq->Reserved2	= 0;
484 
485 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
486 
487 	return 0;
488 }
489 
490 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
491 static int
492 mpt_lan_close(struct net_device *dev)
493 {
494 	struct mpt_lan_priv *priv = netdev_priv(dev);
495 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
496 	unsigned long timeout;
497 	int i;
498 
499 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
500 
501 	mpt_event_deregister(LanCtx);
502 
503 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
504 		  "since driver was loaded, %d still out\n",
505 		  priv->total_posted,atomic_read(&priv->buckets_out)));
506 
507 	netif_stop_queue(dev);
508 
509 	mpt_lan_reset(dev);
510 
511 	timeout = jiffies + 2 * HZ;
512 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
513 		schedule_timeout_interruptible(1);
514 
515 	for (i = 0; i < priv->max_buckets_out; i++) {
516 		if (priv->RcvCtl[i].skb != NULL) {
517 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
518 /**/				  "is still out\n", i));
519 			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
520 					 priv->RcvCtl[i].len,
521 					 PCI_DMA_FROMDEVICE);
522 			dev_kfree_skb(priv->RcvCtl[i].skb);
523 		}
524 	}
525 
526 	kfree(priv->RcvCtl);
527 	kfree(priv->mpt_rxfidx);
528 
529 	for (i = 0; i < priv->tx_max_out; i++) {
530 		if (priv->SendCtl[i].skb != NULL) {
531 			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
532 					 priv->SendCtl[i].len,
533 					 PCI_DMA_TODEVICE);
534 			dev_kfree_skb(priv->SendCtl[i].skb);
535 		}
536 	}
537 
538 	kfree(priv->SendCtl);
539 	kfree(priv->mpt_txfidx);
540 
541 	atomic_set(&priv->buckets_out, 0);
542 
543 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
544 			IOC_AND_NETDEV_NAMES_s_s(dev));
545 
546 	return 0;
547 }
548 
549 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
550 static int
551 mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
552 {
553 	if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
554 		return -EINVAL;
555 	dev->mtu = new_mtu;
556 	return 0;
557 }
558 
559 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
560 /* Tx timeout handler. */
561 static void
562 mpt_lan_tx_timeout(struct net_device *dev)
563 {
564 	struct mpt_lan_priv *priv = netdev_priv(dev);
565 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
566 
567 	if (mpt_dev->active) {
568 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
569 		netif_wake_queue(dev);
570 	}
571 }
572 
573 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
574 //static inline int
575 static int
576 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
577 {
578 	struct mpt_lan_priv *priv = netdev_priv(dev);
579 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
580 	struct sk_buff *sent;
581 	unsigned long flags;
582 	u32 ctx;
583 
584 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
585 	sent = priv->SendCtl[ctx].skb;
586 
587 	dev->stats.tx_packets++;
588 	dev->stats.tx_bytes += sent->len;
589 
590 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
591 			IOC_AND_NETDEV_NAMES_s_s(dev),
592 			__func__, sent));
593 
594 	priv->SendCtl[ctx].skb = NULL;
595 	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
596 			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
597 	dev_kfree_skb_irq(sent);
598 
599 	spin_lock_irqsave(&priv->txfidx_lock, flags);
600 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
601 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
602 
603 	netif_wake_queue(dev);
604 	return 0;
605 }
606 
607 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
608 static int
609 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
610 {
611 	struct mpt_lan_priv *priv = netdev_priv(dev);
612 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
613 	struct sk_buff *sent;
614 	unsigned long flags;
615 	int FreeReqFrame = 0;
616 	u32 *pContext;
617 	u32 ctx;
618 	u8 count;
619 
620 	count = pSendRep->NumberOfContexts;
621 
622 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
623 		 le16_to_cpu(pSendRep->IOCStatus)));
624 
625 	/* Add check for Loginfo Flag in IOCStatus */
626 
627 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
628 	case MPI_IOCSTATUS_SUCCESS:
629 		dev->stats.tx_packets += count;
630 		break;
631 
632 	case MPI_IOCSTATUS_LAN_CANCELED:
633 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
634 		break;
635 
636 	case MPI_IOCSTATUS_INVALID_SGL:
637 		dev->stats.tx_errors += count;
638 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
639 				IOC_AND_NETDEV_NAMES_s_s(dev));
640 		goto out;
641 
642 	default:
643 		dev->stats.tx_errors += count;
644 		break;
645 	}
646 
647 	pContext = &pSendRep->BufferContext;
648 
649 	spin_lock_irqsave(&priv->txfidx_lock, flags);
650 	while (count > 0) {
651 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
652 
653 		sent = priv->SendCtl[ctx].skb;
654 		dev->stats.tx_bytes += sent->len;
655 
656 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
657 				IOC_AND_NETDEV_NAMES_s_s(dev),
658 				__func__, sent));
659 
660 		priv->SendCtl[ctx].skb = NULL;
661 		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
662 				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
663 		dev_kfree_skb_irq(sent);
664 
665 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
666 
667 		pContext++;
668 		count--;
669 	}
670 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
671 
672 out:
673 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
674 		FreeReqFrame = 1;
675 
676 	netif_wake_queue(dev);
677 	return FreeReqFrame;
678 }
679 
680 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
681 static int
682 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
683 {
684 	struct mpt_lan_priv *priv = netdev_priv(dev);
685 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
686 	MPT_FRAME_HDR *mf;
687 	LANSendRequest_t *pSendReq;
688 	SGETransaction32_t *pTrans;
689 	SGESimple64_t *pSimple;
690 	const unsigned char *mac;
691 	dma_addr_t dma;
692 	unsigned long flags;
693 	int ctx;
694 	u16 cur_naa = 0x1000;
695 
696 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
697 			__func__, skb));
698 
699 	spin_lock_irqsave(&priv->txfidx_lock, flags);
700 	if (priv->mpt_txfidx_tail < 0) {
701 		netif_stop_queue(dev);
702 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
703 
704 		printk (KERN_ERR "%s: no tx context available: %u\n",
705 			__func__, priv->mpt_txfidx_tail);
706 		return 1;
707 	}
708 
709 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
710 	if (mf == NULL) {
711 		netif_stop_queue(dev);
712 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
713 
714 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
715 			__func__);
716 		return 1;
717 	}
718 
719 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
720 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
721 
722 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
723 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
724 
725 	pSendReq = (LANSendRequest_t *) mf;
726 
727 	/* Set the mac.raw pointer, since this apparently isn't getting
728 	 * done before we get the skb. Pull the data pointer past the mac data.
729 	 */
730 	skb_reset_mac_header(skb);
731 	skb_pull(skb, 12);
732 
733         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
734 			     PCI_DMA_TODEVICE);
735 
736 	priv->SendCtl[ctx].skb = skb;
737 	priv->SendCtl[ctx].dma = dma;
738 	priv->SendCtl[ctx].len = skb->len;
739 
740 	/* Message Header */
741 	pSendReq->Reserved    = 0;
742 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
743 	pSendReq->ChainOffset = 0;
744 	pSendReq->Reserved2   = 0;
745 	pSendReq->MsgFlags    = 0;
746 	pSendReq->PortNumber  = priv->pnum;
747 
748 	/* Transaction Context Element */
749 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
750 
751 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
752 	pTrans->ContextSize   = sizeof(u32);
753 	pTrans->DetailsLength = 2 * sizeof(u32);
754 	pTrans->Flags         = 0;
755 	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
756 
757 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
758 //			IOC_AND_NETDEV_NAMES_s_s(dev),
759 //			ctx, skb, skb->data));
760 
761 	mac = skb_mac_header(skb);
762 
763 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
764 						    (mac[0] <<  8) |
765 						    (mac[1] <<  0));
766 	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
767 						    (mac[3] << 16) |
768 						    (mac[4] <<  8) |
769 						    (mac[5] <<  0));
770 
771 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
772 
773 	/* If we ever decide to send more than one Simple SGE per LANSend, then
774 	   we will need to make sure that LAST_ELEMENT only gets set on the
775 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
776 	pSimple->FlagsLength = cpu_to_le32(
777 			((MPI_SGE_FLAGS_LAST_ELEMENT |
778 			  MPI_SGE_FLAGS_END_OF_BUFFER |
779 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
780 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
781 			  MPI_SGE_FLAGS_HOST_TO_IOC |
782 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
783 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
784 			skb->len);
785 	pSimple->Address.Low = cpu_to_le32((u32) dma);
786 	if (sizeof(dma_addr_t) > sizeof(u32))
787 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
788 	else
789 		pSimple->Address.High = 0;
790 
791 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
792 	dev->trans_start = jiffies;
793 
794 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
795 			IOC_AND_NETDEV_NAMES_s_s(dev),
796 			le32_to_cpu(pSimple->FlagsLength)));
797 
798 	return 0;
799 }
800 
801 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
802 static void
803 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
804 /*
805  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
806  */
807 {
808 	struct mpt_lan_priv *priv = netdev_priv(dev);
809 
810 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
811 		if (priority) {
812 			schedule_delayed_work(&priv->post_buckets_task, 0);
813 		} else {
814 			schedule_delayed_work(&priv->post_buckets_task, 1);
815 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
816 				   "timer.\n"));
817 		}
818 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
819 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
820 	}
821 }
822 
823 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
824 static int
825 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
826 {
827 	struct mpt_lan_priv *priv = netdev_priv(dev);
828 
829 	skb->protocol = mpt_lan_type_trans(skb, dev);
830 
831 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
832 		 "delivered to upper level.\n",
833 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
834 
835 	dev->stats.rx_bytes += skb->len;
836 	dev->stats.rx_packets++;
837 
838 	skb->dev = dev;
839 	netif_rx(skb);
840 
841 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
842 		 atomic_read(&priv->buckets_out)));
843 
844 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
845 		mpt_lan_wake_post_buckets_task(dev, 1);
846 
847 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
848 		  "remaining, %d received back since sod\n",
849 		  atomic_read(&priv->buckets_out), priv->total_received));
850 
851 	return 0;
852 }
853 
854 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
855 //static inline int
856 static int
857 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
858 {
859 	struct mpt_lan_priv *priv = netdev_priv(dev);
860 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
861 	struct sk_buff *skb, *old_skb;
862 	unsigned long flags;
863 	u32 ctx, len;
864 
865 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
866 	skb = priv->RcvCtl[ctx].skb;
867 
868 	len = GET_LAN_PACKET_LENGTH(tmsg);
869 
870 	if (len < MPT_LAN_RX_COPYBREAK) {
871 		old_skb = skb;
872 
873 		skb = (struct sk_buff *)dev_alloc_skb(len);
874 		if (!skb) {
875 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
876 					IOC_AND_NETDEV_NAMES_s_s(dev),
877 					__FILE__, __LINE__);
878 			return -ENOMEM;
879 		}
880 
881 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
882 					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
883 
884 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
885 
886 		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
887 					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
888 		goto out;
889 	}
890 
891 	skb_put(skb, len);
892 
893 	priv->RcvCtl[ctx].skb = NULL;
894 
895 	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
896 			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
897 
898 out:
899 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
900 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
901 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
902 
903 	atomic_dec(&priv->buckets_out);
904 	priv->total_received++;
905 
906 	return mpt_lan_receive_skb(dev, skb);
907 }
908 
909 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
910 static int
911 mpt_lan_receive_post_free(struct net_device *dev,
912 			  LANReceivePostReply_t *pRecvRep)
913 {
914 	struct mpt_lan_priv *priv = netdev_priv(dev);
915 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
916 	unsigned long flags;
917 	struct sk_buff *skb;
918 	u32 ctx;
919 	int count;
920 	int i;
921 
922 	count = pRecvRep->NumberOfContexts;
923 
924 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
925 		  "IOC returned %d buckets, freeing them...\n", count));
926 
927 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
928 	for (i = 0; i < count; i++) {
929 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
930 
931 		skb = priv->RcvCtl[ctx].skb;
932 
933 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
934 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
935 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
936 //				priv, &(priv->buckets_out)));
937 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
938 
939 		priv->RcvCtl[ctx].skb = NULL;
940 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
941 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
942 		dev_kfree_skb_any(skb);
943 
944 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
945 	}
946 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
947 
948 	atomic_sub(count, &priv->buckets_out);
949 
950 //	for (i = 0; i < priv->max_buckets_out; i++)
951 //		if (priv->RcvCtl[i].skb != NULL)
952 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
953 //				  "is still out\n", i));
954 
955 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
956 		  count));
957 */
958 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
959 /**/		  "remaining, %d received back since sod.\n",
960 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
961 	return 0;
962 }
963 
964 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
965 static int
966 mpt_lan_receive_post_reply(struct net_device *dev,
967 			   LANReceivePostReply_t *pRecvRep)
968 {
969 	struct mpt_lan_priv *priv = netdev_priv(dev);
970 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
971 	struct sk_buff *skb, *old_skb;
972 	unsigned long flags;
973 	u32 len, ctx, offset;
974 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
975 	int count;
976 	int i, l;
977 
978 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
979 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
980 		 le16_to_cpu(pRecvRep->IOCStatus)));
981 
982 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
983 						MPI_IOCSTATUS_LAN_CANCELED)
984 		return mpt_lan_receive_post_free(dev, pRecvRep);
985 
986 	len = le32_to_cpu(pRecvRep->PacketLength);
987 	if (len == 0) {
988 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
989 			"ReceivePostReply w/ PacketLength zero!\n",
990 				IOC_AND_NETDEV_NAMES_s_s(dev));
991 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
992 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
993 		return -1;
994 	}
995 
996 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
997 	count  = pRecvRep->NumberOfContexts;
998 	skb    = priv->RcvCtl[ctx].skb;
999 
1000 	offset = le32_to_cpu(pRecvRep->PacketOffset);
1001 //	if (offset != 0) {
1002 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
1003 //			"w/ PacketOffset %u\n",
1004 //				IOC_AND_NETDEV_NAMES_s_s(dev),
1005 //				offset);
1006 //	}
1007 
1008 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1009 			IOC_AND_NETDEV_NAMES_s_s(dev),
1010 			offset, len));
1011 
1012 	if (count > 1) {
1013 		int szrem = len;
1014 
1015 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1016 //			"for single packet, concatenating...\n",
1017 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1018 
1019 		skb = (struct sk_buff *)dev_alloc_skb(len);
1020 		if (!skb) {
1021 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1022 					IOC_AND_NETDEV_NAMES_s_s(dev),
1023 					__FILE__, __LINE__);
1024 			return -ENOMEM;
1025 		}
1026 
1027 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1028 		for (i = 0; i < count; i++) {
1029 
1030 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1031 			old_skb = priv->RcvCtl[ctx].skb;
1032 
1033 			l = priv->RcvCtl[ctx].len;
1034 			if (szrem < l)
1035 				l = szrem;
1036 
1037 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1038 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1039 //					i, l));
1040 
1041 			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1042 						    priv->RcvCtl[ctx].dma,
1043 						    priv->RcvCtl[ctx].len,
1044 						    PCI_DMA_FROMDEVICE);
1045 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1046 
1047 			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1048 						       priv->RcvCtl[ctx].dma,
1049 						       priv->RcvCtl[ctx].len,
1050 						       PCI_DMA_FROMDEVICE);
1051 
1052 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1053 			szrem -= l;
1054 		}
1055 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1056 
1057 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1058 
1059 		old_skb = skb;
1060 
1061 		skb = (struct sk_buff *)dev_alloc_skb(len);
1062 		if (!skb) {
1063 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1064 					IOC_AND_NETDEV_NAMES_s_s(dev),
1065 					__FILE__, __LINE__);
1066 			return -ENOMEM;
1067 		}
1068 
1069 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1070 					    priv->RcvCtl[ctx].dma,
1071 					    priv->RcvCtl[ctx].len,
1072 					    PCI_DMA_FROMDEVICE);
1073 
1074 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1075 
1076 		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1077 					       priv->RcvCtl[ctx].dma,
1078 					       priv->RcvCtl[ctx].len,
1079 					       PCI_DMA_FROMDEVICE);
1080 
1081 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1082 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1083 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1084 
1085 	} else {
1086 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1087 
1088 		priv->RcvCtl[ctx].skb = NULL;
1089 
1090 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1091 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1092 		priv->RcvCtl[ctx].dma = 0;
1093 
1094 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1095 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1096 
1097 		skb_put(skb,len);
1098 	}
1099 
1100 	atomic_sub(count, &priv->buckets_out);
1101 	priv->total_received += count;
1102 
1103 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1104 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1105 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1106 				IOC_AND_NETDEV_NAMES_s_s(dev),
1107 				priv->mpt_rxfidx_tail,
1108 				MPT_LAN_MAX_BUCKETS_OUT);
1109 
1110 		return -1;
1111 	}
1112 
1113 	if (remaining == 0)
1114 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1115 			"(priv->buckets_out = %d)\n",
1116 			IOC_AND_NETDEV_NAMES_s_s(dev),
1117 			atomic_read(&priv->buckets_out));
1118 	else if (remaining < 10)
1119 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1120 			"(priv->buckets_out = %d)\n",
1121 			IOC_AND_NETDEV_NAMES_s_s(dev),
1122 			remaining, atomic_read(&priv->buckets_out));
1123 
1124 	if ((remaining < priv->bucketthresh) &&
1125 	    ((atomic_read(&priv->buckets_out) - remaining) >
1126 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1127 
1128 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1129 			"buckets_out count and fw's BucketsRemaining "
1130 			"count has crossed the threshold, issuing a "
1131 			"LanReset to clear the fw's hashtable. You may "
1132 			"want to check your /var/log/messages for \"CRC "
1133 			"error\" event notifications.\n");
1134 
1135 		mpt_lan_reset(dev);
1136 		mpt_lan_wake_post_buckets_task(dev, 0);
1137 	}
1138 
1139 	return mpt_lan_receive_skb(dev, skb);
1140 }
1141 
1142 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1143 /* Simple SGE's only at the moment */
1144 
1145 static void
1146 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1147 {
1148 	struct net_device *dev = priv->dev;
1149 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1150 	MPT_FRAME_HDR *mf;
1151 	LANReceivePostRequest_t *pRecvReq;
1152 	SGETransaction32_t *pTrans;
1153 	SGESimple64_t *pSimple;
1154 	struct sk_buff *skb;
1155 	dma_addr_t dma;
1156 	u32 curr, buckets, count, max;
1157 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1158 	unsigned long flags;
1159 	int i;
1160 
1161 	curr = atomic_read(&priv->buckets_out);
1162 	buckets = (priv->max_buckets_out - curr);
1163 
1164 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1165 			IOC_AND_NETDEV_NAMES_s_s(dev),
1166 			__func__, buckets, curr));
1167 
1168 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1169 			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1170 
1171 	while (buckets) {
1172 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1173 		if (mf == NULL) {
1174 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1175 				__func__);
1176 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1177 				 __func__, buckets));
1178 			goto out;
1179 		}
1180 		pRecvReq = (LANReceivePostRequest_t *) mf;
1181 
1182 		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1183 		mpt_dev->RequestNB[i] = 0;
1184 		count = buckets;
1185 		if (count > max)
1186 			count = max;
1187 
1188 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1189 		pRecvReq->ChainOffset = 0;
1190 		pRecvReq->MsgFlags    = 0;
1191 		pRecvReq->PortNumber  = priv->pnum;
1192 
1193 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1194 		pSimple = NULL;
1195 
1196 		for (i = 0; i < count; i++) {
1197 			int ctx;
1198 
1199 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1200 			if (priv->mpt_rxfidx_tail < 0) {
1201 				printk (KERN_ERR "%s: Can't alloc context\n",
1202 					__func__);
1203 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1204 						       flags);
1205 				break;
1206 			}
1207 
1208 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1209 
1210 			skb = priv->RcvCtl[ctx].skb;
1211 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1212 				pci_unmap_single(mpt_dev->pcidev,
1213 						 priv->RcvCtl[ctx].dma,
1214 						 priv->RcvCtl[ctx].len,
1215 						 PCI_DMA_FROMDEVICE);
1216 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1217 				skb = priv->RcvCtl[ctx].skb = NULL;
1218 			}
1219 
1220 			if (skb == NULL) {
1221 				skb = dev_alloc_skb(len);
1222 				if (skb == NULL) {
1223 					printk (KERN_WARNING
1224 						MYNAM "/%s: Can't alloc skb\n",
1225 						__func__);
1226 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1227 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1228 					break;
1229 				}
1230 
1231 				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1232 						     len, PCI_DMA_FROMDEVICE);
1233 
1234 				priv->RcvCtl[ctx].skb = skb;
1235 				priv->RcvCtl[ctx].dma = dma;
1236 				priv->RcvCtl[ctx].len = len;
1237 			}
1238 
1239 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1240 
1241 			pTrans->ContextSize   = sizeof(u32);
1242 			pTrans->DetailsLength = 0;
1243 			pTrans->Flags         = 0;
1244 			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1245 
1246 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1247 
1248 			pSimple->FlagsLength = cpu_to_le32(
1249 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1250 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1251 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1252 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1253 			if (sizeof(dma_addr_t) > sizeof(u32))
1254 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1255 			else
1256 				pSimple->Address.High = 0;
1257 
1258 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1259 		}
1260 
1261 		if (pSimple == NULL) {
1262 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1263 /**/				__func__);
1264 			mpt_free_msg_frame(mpt_dev, mf);
1265 			goto out;
1266 		}
1267 
1268 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1269 
1270 		pRecvReq->BucketCount = cpu_to_le32(i);
1271 
1272 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1273  *	for (i = 0; i < j + 2; i ++)
1274  *	    printk (" %08x", le32_to_cpu(msg[i]));
1275  *	printk ("\n");
1276  */
1277 
1278 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1279 
1280 		priv->total_posted += i;
1281 		buckets -= i;
1282 		atomic_add(i, &priv->buckets_out);
1283 	}
1284 
1285 out:
1286 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1287 		  __func__, buckets, atomic_read(&priv->buckets_out)));
1288 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1289 	__func__, priv->total_posted, priv->total_received));
1290 
1291 	clear_bit(0, &priv->post_buckets_active);
1292 }
1293 
1294 static void
1295 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1296 {
1297 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1298 						  post_buckets_task.work));
1299 }
1300 
1301 static const struct net_device_ops mpt_netdev_ops = {
1302 	.ndo_open       = mpt_lan_open,
1303 	.ndo_stop       = mpt_lan_close,
1304 	.ndo_start_xmit = mpt_lan_sdu_send,
1305 	.ndo_change_mtu = mpt_lan_change_mtu,
1306 	.ndo_tx_timeout = mpt_lan_tx_timeout,
1307 };
1308 
1309 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1310 static struct net_device *
1311 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1312 {
1313 	struct net_device *dev;
1314 	struct mpt_lan_priv *priv;
1315 	u8 HWaddr[FC_ALEN], *a;
1316 
1317 	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1318 	if (!dev)
1319 		return NULL;
1320 
1321 	dev->mtu = MPT_LAN_MTU;
1322 
1323 	priv = netdev_priv(dev);
1324 
1325 	priv->dev = dev;
1326 	priv->mpt_dev = mpt_dev;
1327 	priv->pnum = pnum;
1328 
1329 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1330 			  mpt_lan_post_receive_buckets_work);
1331 	priv->post_buckets_active = 0;
1332 
1333 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1334 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1335 
1336 	atomic_set(&priv->buckets_out, 0);
1337 	priv->total_posted = 0;
1338 	priv->total_received = 0;
1339 	priv->max_buckets_out = max_buckets_out;
1340 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1341 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1342 
1343 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1344 			__LINE__,
1345 			mpt_dev->pfacts[0].MaxLanBuckets,
1346 			max_buckets_out,
1347 			priv->max_buckets_out));
1348 
1349 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1350 	spin_lock_init(&priv->txfidx_lock);
1351 	spin_lock_init(&priv->rxfidx_lock);
1352 
1353 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1354 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1355 
1356 	HWaddr[0] = a[5];
1357 	HWaddr[1] = a[4];
1358 	HWaddr[2] = a[3];
1359 	HWaddr[3] = a[2];
1360 	HWaddr[4] = a[1];
1361 	HWaddr[5] = a[0];
1362 
1363 	dev->addr_len = FC_ALEN;
1364 	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1365 	memset(dev->broadcast, 0xff, FC_ALEN);
1366 
1367 	/* The Tx queue is 127 deep on the 909.
1368 	 * Give ourselves some breathing room.
1369 	 */
1370 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1371 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1372 
1373 	dev->netdev_ops = &mpt_netdev_ops;
1374 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1375 
1376 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1377 		"and setting initial values\n"));
1378 
1379 	if (register_netdev(dev) != 0) {
1380 		free_netdev(dev);
1381 		dev = NULL;
1382 	}
1383 	return dev;
1384 }
1385 
1386 static int
1387 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1388 {
1389 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1390 	struct net_device	*dev;
1391 	int			i;
1392 
1393 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1394 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1395 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1396 		       ioc->name, ioc->pfacts[i].PortNumber,
1397 		       ioc->pfacts[i].ProtocolFlags,
1398 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1399 			       ioc->pfacts[i].ProtocolFlags));
1400 
1401 		if (!(ioc->pfacts[i].ProtocolFlags &
1402 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1403 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1404 			       "seems to be disabled on this adapter port!\n",
1405 			       ioc->name);
1406 			continue;
1407 		}
1408 
1409 		dev = mpt_register_lan_device(ioc, i);
1410 		if (!dev) {
1411 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1412 			       "port%d as a LAN device\n", ioc->name,
1413 			       ioc->pfacts[i].PortNumber);
1414 			continue;
1415 		}
1416 
1417 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1418 		       "registered as '%s'\n", ioc->name, dev->name);
1419 		printk(KERN_INFO MYNAM ": %s/%s: "
1420 		       "LanAddr = %pM\n",
1421 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1422 		       dev->dev_addr);
1423 
1424 		ioc->netdev = dev;
1425 
1426 		return 0;
1427 	}
1428 
1429 	return -ENODEV;
1430 }
1431 
1432 static void
1433 mptlan_remove(struct pci_dev *pdev)
1434 {
1435 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1436 	struct net_device	*dev = ioc->netdev;
1437 
1438 	if(dev != NULL) {
1439 		unregister_netdev(dev);
1440 		free_netdev(dev);
1441 	}
1442 }
1443 
1444 static struct mpt_pci_driver mptlan_driver = {
1445 	.probe		= mptlan_probe,
1446 	.remove		= mptlan_remove,
1447 };
1448 
1449 static int __init mpt_lan_init (void)
1450 {
1451 	show_mptmod_ver(LANAME, LANVER);
1452 
1453 	if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
1454 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1455 		return -EBUSY;
1456 	}
1457 
1458 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1459 
1460 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1461 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1462 		       "handler with mptbase! The world is at an end! "
1463 		       "Everything is fading to black! Goodbye.\n");
1464 		return -EBUSY;
1465 	}
1466 
1467 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1468 
1469 	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1470 	return 0;
1471 }
1472 
1473 static void __exit mpt_lan_exit(void)
1474 {
1475 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1476 	mpt_reset_deregister(LanCtx);
1477 
1478 	if (LanCtx) {
1479 		mpt_deregister(LanCtx);
1480 		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1481 	}
1482 }
1483 
1484 module_init(mpt_lan_init);
1485 module_exit(mpt_lan_exit);
1486 
1487 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1488 static unsigned short
1489 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1490 {
1491 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1492 	struct fcllc *fcllc;
1493 
1494 	skb_reset_mac_header(skb);
1495 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1496 
1497 	if (fch->dtype == htons(0xffff)) {
1498 		u32 *p = (u32 *) fch;
1499 
1500 		swab32s(p + 0);
1501 		swab32s(p + 1);
1502 		swab32s(p + 2);
1503 		swab32s(p + 3);
1504 
1505 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1506 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1507 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1508 				fch->saddr);
1509 	}
1510 
1511 	if (*fch->daddr & 1) {
1512 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1513 			skb->pkt_type = PACKET_BROADCAST;
1514 		} else {
1515 			skb->pkt_type = PACKET_MULTICAST;
1516 		}
1517 	} else {
1518 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1519 			skb->pkt_type = PACKET_OTHERHOST;
1520 		} else {
1521 			skb->pkt_type = PACKET_HOST;
1522 		}
1523 	}
1524 
1525 	fcllc = (struct fcllc *)skb->data;
1526 
1527 	/* Strip the SNAP header from ARP packets since we don't
1528 	 * pass them through to the 802.2/SNAP layers.
1529 	 */
1530 	if (fcllc->dsap == EXTENDED_SAP &&
1531 		(fcllc->ethertype == htons(ETH_P_IP) ||
1532 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1533 		skb_pull(skb, sizeof(struct fcllc));
1534 		return fcllc->ethertype;
1535 	}
1536 
1537 	return htons(ETH_P_802_2);
1538 }
1539 
1540 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1541