xref: /openbmc/linux/drivers/message/fusion/mptlan.c (revision 63705da3)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16 
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21 
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32 
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 #include <linux/sched.h>
60 #include <linux/slab.h>
61 
62 #define my_VERSION	MPT_LINUX_VERSION_COMMON
63 #define MYNAM		"mptlan"
64 
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(my_VERSION);
67 
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * MPT LAN message sizes without variable part.
71  */
72 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
74 
75 /*
76  *  Fusion MPT LAN private structures
77  */
78 
79 struct BufferControl {
80 	struct sk_buff	*skb;
81 	dma_addr_t	dma;
82 	unsigned int	len;
83 };
84 
85 struct mpt_lan_priv {
86 	MPT_ADAPTER *mpt_dev;
87 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
88 
89 	atomic_t buckets_out;		/* number of unused buckets on IOC */
90 	int bucketthresh;		/* Send more when this many left */
91 
92 	int *mpt_txfidx; /* Free Tx Context list */
93 	int mpt_txfidx_tail;
94 	spinlock_t txfidx_lock;
95 
96 	int *mpt_rxfidx; /* Free Rx Context list */
97 	int mpt_rxfidx_tail;
98 	spinlock_t rxfidx_lock;
99 
100 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
101 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
102 
103 	int max_buckets_out;		/* Max buckets to send to IOC */
104 	int tx_max_out;			/* IOC's Tx queue len */
105 
106 	u32 total_posted;
107 	u32 total_received;
108 
109 	struct delayed_work post_buckets_task;
110 	struct net_device *dev;
111 	unsigned long post_buckets_active;
112 };
113 
114 struct mpt_lan_ohdr {
115 	u16	dtype;
116 	u8	daddr[FC_ALEN];
117 	u16	stype;
118 	u8	saddr[FC_ALEN];
119 };
120 
121 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
122 
123 /*
124  *  Forward protos...
125  */
126 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
127 		       MPT_FRAME_HDR *reply);
128 static int  mpt_lan_open(struct net_device *dev);
129 static int  mpt_lan_reset(struct net_device *dev);
130 static int  mpt_lan_close(struct net_device *dev);
131 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
132 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
133 					   int priority);
134 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
135 static int  mpt_lan_receive_post_reply(struct net_device *dev,
136 				       LANReceivePostReply_t *pRecvRep);
137 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
138 static int  mpt_lan_send_reply(struct net_device *dev,
139 			       LANSendReply_t *pSendRep);
140 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
141 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
142 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
143 					 struct net_device *dev);
144 
145 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
146 /*
147  *  Fusion MPT LAN private data
148  */
149 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
150 
151 static u32 max_buckets_out = 127;
152 static u32 tx_max_out_p = 127 - 16;
153 
154 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
155 /**
156  *	lan_reply - Handle all data sent from the hardware.
157  *	@ioc: Pointer to MPT_ADAPTER structure
158  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
159  *	@reply: Pointer to MPT reply frame
160  *
161  *	Returns 1 indicating original alloc'd request frame ptr
162  *	should be freed, or 0 if it shouldn't.
163  */
164 static int
165 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
166 {
167 	struct net_device *dev = ioc->netdev;
168 	int FreeReqFrame = 0;
169 
170 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
171 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
172 
173 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
174 //			mf, reply));
175 
176 	if (mf == NULL) {
177 		u32 tmsg = CAST_PTR_TO_U32(reply);
178 
179 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
180 				IOC_AND_NETDEV_NAMES_s_s(dev),
181 				tmsg));
182 
183 		switch (GET_LAN_FORM(tmsg)) {
184 
185 		// NOTE!  (Optimization) First case here is now caught in
186 		//  mptbase.c::mpt_interrupt() routine and callcack here
187 		//  is now skipped for this case!
188 #if 0
189 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
190 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
191 //				  "MessageContext turbo reply received\n"));
192 			FreeReqFrame = 1;
193 			break;
194 #endif
195 
196 		case LAN_REPLY_FORM_SEND_SINGLE:
197 //			dioprintk((MYNAM "/lan_reply: "
198 //				  "calling mpt_lan_send_reply (turbo)\n"));
199 
200 			// Potential BUG here?
201 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
202 			//  If/when mpt_lan_send_turbo would return 1 here,
203 			//  calling routine (mptbase.c|mpt_interrupt)
204 			//  would Oops because mf has already been set
205 			//  to NULL.  So after return from this func,
206 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
207 			//  item back onto its adapter FreeQ - Oops!:-(
208 			//  It's Ok, since mpt_lan_send_turbo() *currently*
209 			//  always returns 0, but..., just in case:
210 
211 			(void) mpt_lan_send_turbo(dev, tmsg);
212 			FreeReqFrame = 0;
213 
214 			break;
215 
216 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
217 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
218 //				  "rcv-Turbo = %08x\n", tmsg));
219 			mpt_lan_receive_post_turbo(dev, tmsg);
220 			break;
221 
222 		default:
223 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
224 				"that I don't know what to do with\n");
225 
226 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
227 
228 			break;
229 		}
230 
231 		return FreeReqFrame;
232 	}
233 
234 //	msg = (u32 *) reply;
235 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
236 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
237 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
238 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
239 //		  reply->u.hdr.Function));
240 
241 	switch (reply->u.hdr.Function) {
242 
243 	case MPI_FUNCTION_LAN_SEND:
244 	{
245 		LANSendReply_t *pSendRep;
246 
247 		pSendRep = (LANSendReply_t *) reply;
248 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
249 		break;
250 	}
251 
252 	case MPI_FUNCTION_LAN_RECEIVE:
253 	{
254 		LANReceivePostReply_t *pRecvRep;
255 
256 		pRecvRep = (LANReceivePostReply_t *) reply;
257 		if (pRecvRep->NumberOfContexts) {
258 			mpt_lan_receive_post_reply(dev, pRecvRep);
259 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
260 				FreeReqFrame = 1;
261 		} else
262 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
263 				  "ReceivePostReply received.\n"));
264 		break;
265 	}
266 
267 	case MPI_FUNCTION_LAN_RESET:
268 		/* Just a default reply. Might want to check it to
269 		 * make sure that everything went ok.
270 		 */
271 		FreeReqFrame = 1;
272 		break;
273 
274 	case MPI_FUNCTION_EVENT_NOTIFICATION:
275 	case MPI_FUNCTION_EVENT_ACK:
276 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
277 		 *  Should be routed to mpt_lan_event_process(), but just in case...
278 		 */
279 		FreeReqFrame = 1;
280 		break;
281 
282 	default:
283 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
284 			"reply that I don't know what to do with\n");
285 
286 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
287 		FreeReqFrame = 1;
288 
289 		break;
290 	}
291 
292 	return FreeReqFrame;
293 }
294 
295 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
296 static int
297 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
298 {
299 	struct net_device *dev = ioc->netdev;
300 	struct mpt_lan_priv *priv;
301 
302 	if (dev == NULL)
303 		return(1);
304 	else
305 		priv = netdev_priv(dev);
306 
307 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
308 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
309 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
310 
311 	if (priv->mpt_rxfidx == NULL)
312 		return (1);
313 
314 	if (reset_phase == MPT_IOC_SETUP_RESET) {
315 		;
316 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
317 		int i;
318 		unsigned long flags;
319 
320 		netif_stop_queue(dev);
321 
322 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
323 
324 		atomic_set(&priv->buckets_out, 0);
325 
326 		/* Reset Rx Free Tail index and re-populate the queue. */
327 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
328 		priv->mpt_rxfidx_tail = -1;
329 		for (i = 0; i < priv->max_buckets_out; i++)
330 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
331 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
332 	} else {
333 		mpt_lan_post_receive_buckets(priv);
334 		netif_wake_queue(dev);
335 	}
336 
337 	return 1;
338 }
339 
340 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
341 static int
342 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
343 {
344 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
345 
346 	switch (le32_to_cpu(pEvReply->Event)) {
347 	case MPI_EVENT_NONE:				/* 00 */
348 	case MPI_EVENT_LOG_DATA:			/* 01 */
349 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
350 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
351 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
352 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
353 	case MPI_EVENT_RESCAN:				/* 06 */
354 		/* Ok, do we need to do anything here? As far as
355 		   I can tell, this is when a new device gets added
356 		   to the loop. */
357 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
358 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
359 	case MPI_EVENT_LOGOUT:				/* 09 */
360 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
361 	default:
362 		break;
363 	}
364 
365 	/*
366 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
367 	 *  Do NOT do it here now!
368 	 */
369 
370 	return 1;
371 }
372 
373 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
374 static int
375 mpt_lan_open(struct net_device *dev)
376 {
377 	struct mpt_lan_priv *priv = netdev_priv(dev);
378 	int i;
379 
380 	if (mpt_lan_reset(dev) != 0) {
381 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
382 
383 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
384 
385 		if (mpt_dev->active)
386 			printk ("The ioc is active. Perhaps it needs to be"
387 				" reset?\n");
388 		else
389 			printk ("The ioc in inactive, most likely in the "
390 				"process of being reset. Please try again in "
391 				"a moment.\n");
392 	}
393 
394 	priv->mpt_txfidx = kmalloc_array(priv->tx_max_out, sizeof(int),
395 					 GFP_KERNEL);
396 	if (priv->mpt_txfidx == NULL)
397 		goto out;
398 	priv->mpt_txfidx_tail = -1;
399 
400 	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
401 				GFP_KERNEL);
402 	if (priv->SendCtl == NULL)
403 		goto out_mpt_txfidx;
404 	for (i = 0; i < priv->tx_max_out; i++)
405 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
406 
407 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
408 
409 	priv->mpt_rxfidx = kmalloc_array(priv->max_buckets_out, sizeof(int),
410 					 GFP_KERNEL);
411 	if (priv->mpt_rxfidx == NULL)
412 		goto out_SendCtl;
413 	priv->mpt_rxfidx_tail = -1;
414 
415 	priv->RcvCtl = kcalloc(priv->max_buckets_out,
416 			       sizeof(struct BufferControl),
417 			       GFP_KERNEL);
418 	if (priv->RcvCtl == NULL)
419 		goto out_mpt_rxfidx;
420 	for (i = 0; i < priv->max_buckets_out; i++)
421 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
422 
423 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
424 /**/	for (i = 0; i < priv->tx_max_out; i++)
425 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
426 /**/	dlprintk(("\n"));
427 
428 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
429 
430 	mpt_lan_post_receive_buckets(priv);
431 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
432 			IOC_AND_NETDEV_NAMES_s_s(dev));
433 
434 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
435 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
436 			" Notifications. This is a bad thing! We're not going "
437 			"to go ahead, but I'd be leery of system stability at "
438 			"this point.\n");
439 	}
440 
441 	netif_start_queue(dev);
442 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
443 
444 	return 0;
445 out_mpt_rxfidx:
446 	kfree(priv->mpt_rxfidx);
447 	priv->mpt_rxfidx = NULL;
448 out_SendCtl:
449 	kfree(priv->SendCtl);
450 	priv->SendCtl = NULL;
451 out_mpt_txfidx:
452 	kfree(priv->mpt_txfidx);
453 	priv->mpt_txfidx = NULL;
454 out:	return -ENOMEM;
455 }
456 
457 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
458 /* Send a LanReset message to the FW. This should result in the FW returning
459    any buckets it still has. */
460 static int
461 mpt_lan_reset(struct net_device *dev)
462 {
463 	MPT_FRAME_HDR *mf;
464 	LANResetRequest_t *pResetReq;
465 	struct mpt_lan_priv *priv = netdev_priv(dev);
466 
467 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
468 
469 	if (mf == NULL) {
470 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
471 		"Unable to allocate a request frame.\n"));
472 */
473 		return -1;
474 	}
475 
476 	pResetReq = (LANResetRequest_t *) mf;
477 
478 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
479 	pResetReq->ChainOffset	= 0;
480 	pResetReq->Reserved	= 0;
481 	pResetReq->PortNumber	= priv->pnum;
482 	pResetReq->MsgFlags	= 0;
483 	pResetReq->Reserved2	= 0;
484 
485 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
486 
487 	return 0;
488 }
489 
490 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
491 static int
492 mpt_lan_close(struct net_device *dev)
493 {
494 	struct mpt_lan_priv *priv = netdev_priv(dev);
495 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
496 	unsigned long timeout;
497 	int i;
498 
499 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
500 
501 	mpt_event_deregister(LanCtx);
502 
503 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
504 		  "since driver was loaded, %d still out\n",
505 		  priv->total_posted,atomic_read(&priv->buckets_out)));
506 
507 	netif_stop_queue(dev);
508 
509 	mpt_lan_reset(dev);
510 
511 	timeout = jiffies + 2 * HZ;
512 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
513 		schedule_timeout_interruptible(1);
514 
515 	for (i = 0; i < priv->max_buckets_out; i++) {
516 		if (priv->RcvCtl[i].skb != NULL) {
517 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
518 /**/				  "is still out\n", i));
519 			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
520 					 priv->RcvCtl[i].len,
521 					 PCI_DMA_FROMDEVICE);
522 			dev_kfree_skb(priv->RcvCtl[i].skb);
523 		}
524 	}
525 
526 	kfree(priv->RcvCtl);
527 	kfree(priv->mpt_rxfidx);
528 
529 	for (i = 0; i < priv->tx_max_out; i++) {
530 		if (priv->SendCtl[i].skb != NULL) {
531 			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
532 					 priv->SendCtl[i].len,
533 					 PCI_DMA_TODEVICE);
534 			dev_kfree_skb(priv->SendCtl[i].skb);
535 		}
536 	}
537 
538 	kfree(priv->SendCtl);
539 	kfree(priv->mpt_txfidx);
540 
541 	atomic_set(&priv->buckets_out, 0);
542 
543 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
544 			IOC_AND_NETDEV_NAMES_s_s(dev));
545 
546 	return 0;
547 }
548 
549 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
550 /* Tx timeout handler. */
551 static void
552 mpt_lan_tx_timeout(struct net_device *dev, unsigned int txqueue)
553 {
554 	struct mpt_lan_priv *priv = netdev_priv(dev);
555 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
556 
557 	if (mpt_dev->active) {
558 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
559 		netif_wake_queue(dev);
560 	}
561 }
562 
563 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
564 //static inline int
565 static int
566 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
567 {
568 	struct mpt_lan_priv *priv = netdev_priv(dev);
569 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
570 	struct sk_buff *sent;
571 	unsigned long flags;
572 	u32 ctx;
573 
574 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
575 	sent = priv->SendCtl[ctx].skb;
576 
577 	dev->stats.tx_packets++;
578 	dev->stats.tx_bytes += sent->len;
579 
580 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
581 			IOC_AND_NETDEV_NAMES_s_s(dev),
582 			__func__, sent));
583 
584 	priv->SendCtl[ctx].skb = NULL;
585 	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
586 			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
587 	dev_kfree_skb_irq(sent);
588 
589 	spin_lock_irqsave(&priv->txfidx_lock, flags);
590 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
591 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
592 
593 	netif_wake_queue(dev);
594 	return 0;
595 }
596 
597 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
598 static int
599 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
600 {
601 	struct mpt_lan_priv *priv = netdev_priv(dev);
602 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
603 	struct sk_buff *sent;
604 	unsigned long flags;
605 	int FreeReqFrame = 0;
606 	u32 *pContext;
607 	u32 ctx;
608 	u8 count;
609 
610 	count = pSendRep->NumberOfContexts;
611 
612 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
613 		 le16_to_cpu(pSendRep->IOCStatus)));
614 
615 	/* Add check for Loginfo Flag in IOCStatus */
616 
617 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
618 	case MPI_IOCSTATUS_SUCCESS:
619 		dev->stats.tx_packets += count;
620 		break;
621 
622 	case MPI_IOCSTATUS_LAN_CANCELED:
623 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
624 		break;
625 
626 	case MPI_IOCSTATUS_INVALID_SGL:
627 		dev->stats.tx_errors += count;
628 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
629 				IOC_AND_NETDEV_NAMES_s_s(dev));
630 		goto out;
631 
632 	default:
633 		dev->stats.tx_errors += count;
634 		break;
635 	}
636 
637 	pContext = &pSendRep->BufferContext;
638 
639 	spin_lock_irqsave(&priv->txfidx_lock, flags);
640 	while (count > 0) {
641 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
642 
643 		sent = priv->SendCtl[ctx].skb;
644 		dev->stats.tx_bytes += sent->len;
645 
646 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
647 				IOC_AND_NETDEV_NAMES_s_s(dev),
648 				__func__, sent));
649 
650 		priv->SendCtl[ctx].skb = NULL;
651 		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
652 				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
653 		dev_kfree_skb_irq(sent);
654 
655 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
656 
657 		pContext++;
658 		count--;
659 	}
660 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
661 
662 out:
663 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
664 		FreeReqFrame = 1;
665 
666 	netif_wake_queue(dev);
667 	return FreeReqFrame;
668 }
669 
670 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
671 static netdev_tx_t
672 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
673 {
674 	struct mpt_lan_priv *priv = netdev_priv(dev);
675 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
676 	MPT_FRAME_HDR *mf;
677 	LANSendRequest_t *pSendReq;
678 	SGETransaction32_t *pTrans;
679 	SGESimple64_t *pSimple;
680 	const unsigned char *mac;
681 	dma_addr_t dma;
682 	unsigned long flags;
683 	int ctx;
684 	u16 cur_naa = 0x1000;
685 
686 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
687 			__func__, skb));
688 
689 	spin_lock_irqsave(&priv->txfidx_lock, flags);
690 	if (priv->mpt_txfidx_tail < 0) {
691 		netif_stop_queue(dev);
692 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
693 
694 		printk (KERN_ERR "%s: no tx context available: %u\n",
695 			__func__, priv->mpt_txfidx_tail);
696 		return NETDEV_TX_BUSY;
697 	}
698 
699 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
700 	if (mf == NULL) {
701 		netif_stop_queue(dev);
702 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
703 
704 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
705 			__func__);
706 		return NETDEV_TX_BUSY;
707 	}
708 
709 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
710 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
711 
712 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
713 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
714 
715 	pSendReq = (LANSendRequest_t *) mf;
716 
717 	/* Set the mac.raw pointer, since this apparently isn't getting
718 	 * done before we get the skb. Pull the data pointer past the mac data.
719 	 */
720 	skb_reset_mac_header(skb);
721 	skb_pull(skb, 12);
722 
723         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
724 			     PCI_DMA_TODEVICE);
725 
726 	priv->SendCtl[ctx].skb = skb;
727 	priv->SendCtl[ctx].dma = dma;
728 	priv->SendCtl[ctx].len = skb->len;
729 
730 	/* Message Header */
731 	pSendReq->Reserved    = 0;
732 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
733 	pSendReq->ChainOffset = 0;
734 	pSendReq->Reserved2   = 0;
735 	pSendReq->MsgFlags    = 0;
736 	pSendReq->PortNumber  = priv->pnum;
737 
738 	/* Transaction Context Element */
739 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
740 
741 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
742 	pTrans->ContextSize   = sizeof(u32);
743 	pTrans->DetailsLength = 2 * sizeof(u32);
744 	pTrans->Flags         = 0;
745 	pTrans->TransactionContext = cpu_to_le32(ctx);
746 
747 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
748 //			IOC_AND_NETDEV_NAMES_s_s(dev),
749 //			ctx, skb, skb->data));
750 
751 	mac = skb_mac_header(skb);
752 
753 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
754 						    (mac[0] <<  8) |
755 						    (mac[1] <<  0));
756 	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
757 						    (mac[3] << 16) |
758 						    (mac[4] <<  8) |
759 						    (mac[5] <<  0));
760 
761 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
762 
763 	/* If we ever decide to send more than one Simple SGE per LANSend, then
764 	   we will need to make sure that LAST_ELEMENT only gets set on the
765 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
766 	pSimple->FlagsLength = cpu_to_le32(
767 			((MPI_SGE_FLAGS_LAST_ELEMENT |
768 			  MPI_SGE_FLAGS_END_OF_BUFFER |
769 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
770 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
771 			  MPI_SGE_FLAGS_HOST_TO_IOC |
772 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
773 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
774 			skb->len);
775 	pSimple->Address.Low = cpu_to_le32((u32) dma);
776 	if (sizeof(dma_addr_t) > sizeof(u32))
777 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
778 	else
779 		pSimple->Address.High = 0;
780 
781 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
782 	netif_trans_update(dev);
783 
784 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
785 			IOC_AND_NETDEV_NAMES_s_s(dev),
786 			le32_to_cpu(pSimple->FlagsLength)));
787 
788 	return NETDEV_TX_OK;
789 }
790 
791 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
792 static void
793 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
794 /*
795  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
796  */
797 {
798 	struct mpt_lan_priv *priv = netdev_priv(dev);
799 
800 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
801 		if (priority) {
802 			schedule_delayed_work(&priv->post_buckets_task, 0);
803 		} else {
804 			schedule_delayed_work(&priv->post_buckets_task, 1);
805 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
806 				   "timer.\n"));
807 		}
808 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
809 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
810 	}
811 }
812 
813 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
814 static int
815 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
816 {
817 	struct mpt_lan_priv *priv = netdev_priv(dev);
818 
819 	skb->protocol = mpt_lan_type_trans(skb, dev);
820 
821 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
822 		 "delivered to upper level.\n",
823 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
824 
825 	dev->stats.rx_bytes += skb->len;
826 	dev->stats.rx_packets++;
827 
828 	skb->dev = dev;
829 	netif_rx(skb);
830 
831 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
832 		 atomic_read(&priv->buckets_out)));
833 
834 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
835 		mpt_lan_wake_post_buckets_task(dev, 1);
836 
837 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
838 		  "remaining, %d received back since sod\n",
839 		  atomic_read(&priv->buckets_out), priv->total_received));
840 
841 	return 0;
842 }
843 
844 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
845 //static inline int
846 static int
847 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
848 {
849 	struct mpt_lan_priv *priv = netdev_priv(dev);
850 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
851 	struct sk_buff *skb, *old_skb;
852 	unsigned long flags;
853 	u32 ctx, len;
854 
855 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
856 	skb = priv->RcvCtl[ctx].skb;
857 
858 	len = GET_LAN_PACKET_LENGTH(tmsg);
859 
860 	if (len < MPT_LAN_RX_COPYBREAK) {
861 		old_skb = skb;
862 
863 		skb = (struct sk_buff *)dev_alloc_skb(len);
864 		if (!skb) {
865 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
866 					IOC_AND_NETDEV_NAMES_s_s(dev),
867 					__FILE__, __LINE__);
868 			return -ENOMEM;
869 		}
870 
871 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
872 					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
873 
874 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
875 
876 		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
877 					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
878 		goto out;
879 	}
880 
881 	skb_put(skb, len);
882 
883 	priv->RcvCtl[ctx].skb = NULL;
884 
885 	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
886 			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
887 
888 out:
889 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
890 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
891 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
892 
893 	atomic_dec(&priv->buckets_out);
894 	priv->total_received++;
895 
896 	return mpt_lan_receive_skb(dev, skb);
897 }
898 
899 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
900 static int
901 mpt_lan_receive_post_free(struct net_device *dev,
902 			  LANReceivePostReply_t *pRecvRep)
903 {
904 	struct mpt_lan_priv *priv = netdev_priv(dev);
905 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
906 	unsigned long flags;
907 	struct sk_buff *skb;
908 	u32 ctx;
909 	int count;
910 	int i;
911 
912 	count = pRecvRep->NumberOfContexts;
913 
914 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
915 		  "IOC returned %d buckets, freeing them...\n", count));
916 
917 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
918 	for (i = 0; i < count; i++) {
919 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
920 
921 		skb = priv->RcvCtl[ctx].skb;
922 
923 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
924 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
925 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
926 //				priv, &(priv->buckets_out)));
927 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
928 
929 		priv->RcvCtl[ctx].skb = NULL;
930 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
931 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
932 		dev_kfree_skb_any(skb);
933 
934 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
935 	}
936 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
937 
938 	atomic_sub(count, &priv->buckets_out);
939 
940 //	for (i = 0; i < priv->max_buckets_out; i++)
941 //		if (priv->RcvCtl[i].skb != NULL)
942 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
943 //				  "is still out\n", i));
944 
945 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
946 		  count));
947 */
948 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
949 /**/		  "remaining, %d received back since sod.\n",
950 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
951 	return 0;
952 }
953 
954 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
955 static int
956 mpt_lan_receive_post_reply(struct net_device *dev,
957 			   LANReceivePostReply_t *pRecvRep)
958 {
959 	struct mpt_lan_priv *priv = netdev_priv(dev);
960 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
961 	struct sk_buff *skb, *old_skb;
962 	unsigned long flags;
963 	u32 len, ctx, offset;
964 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
965 	int count;
966 	int i, l;
967 
968 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
969 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
970 		 le16_to_cpu(pRecvRep->IOCStatus)));
971 
972 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
973 						MPI_IOCSTATUS_LAN_CANCELED)
974 		return mpt_lan_receive_post_free(dev, pRecvRep);
975 
976 	len = le32_to_cpu(pRecvRep->PacketLength);
977 	if (len == 0) {
978 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
979 			"ReceivePostReply w/ PacketLength zero!\n",
980 				IOC_AND_NETDEV_NAMES_s_s(dev));
981 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
982 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
983 		return -1;
984 	}
985 
986 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
987 	count  = pRecvRep->NumberOfContexts;
988 	skb    = priv->RcvCtl[ctx].skb;
989 
990 	offset = le32_to_cpu(pRecvRep->PacketOffset);
991 //	if (offset != 0) {
992 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
993 //			"w/ PacketOffset %u\n",
994 //				IOC_AND_NETDEV_NAMES_s_s(dev),
995 //				offset);
996 //	}
997 
998 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
999 			IOC_AND_NETDEV_NAMES_s_s(dev),
1000 			offset, len));
1001 
1002 	if (count > 1) {
1003 		int szrem = len;
1004 
1005 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1006 //			"for single packet, concatenating...\n",
1007 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1008 
1009 		skb = (struct sk_buff *)dev_alloc_skb(len);
1010 		if (!skb) {
1011 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1012 					IOC_AND_NETDEV_NAMES_s_s(dev),
1013 					__FILE__, __LINE__);
1014 			return -ENOMEM;
1015 		}
1016 
1017 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1018 		for (i = 0; i < count; i++) {
1019 
1020 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1021 			old_skb = priv->RcvCtl[ctx].skb;
1022 
1023 			l = priv->RcvCtl[ctx].len;
1024 			if (szrem < l)
1025 				l = szrem;
1026 
1027 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1028 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1029 //					i, l));
1030 
1031 			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1032 						    priv->RcvCtl[ctx].dma,
1033 						    priv->RcvCtl[ctx].len,
1034 						    PCI_DMA_FROMDEVICE);
1035 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1036 
1037 			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1038 						       priv->RcvCtl[ctx].dma,
1039 						       priv->RcvCtl[ctx].len,
1040 						       PCI_DMA_FROMDEVICE);
1041 
1042 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1043 			szrem -= l;
1044 		}
1045 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1046 
1047 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1048 
1049 		old_skb = skb;
1050 
1051 		skb = (struct sk_buff *)dev_alloc_skb(len);
1052 		if (!skb) {
1053 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1054 					IOC_AND_NETDEV_NAMES_s_s(dev),
1055 					__FILE__, __LINE__);
1056 			return -ENOMEM;
1057 		}
1058 
1059 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1060 					    priv->RcvCtl[ctx].dma,
1061 					    priv->RcvCtl[ctx].len,
1062 					    PCI_DMA_FROMDEVICE);
1063 
1064 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1065 
1066 		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1067 					       priv->RcvCtl[ctx].dma,
1068 					       priv->RcvCtl[ctx].len,
1069 					       PCI_DMA_FROMDEVICE);
1070 
1071 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1072 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1073 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1074 
1075 	} else {
1076 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1077 
1078 		priv->RcvCtl[ctx].skb = NULL;
1079 
1080 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1081 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1082 		priv->RcvCtl[ctx].dma = 0;
1083 
1084 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1085 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1086 
1087 		skb_put(skb,len);
1088 	}
1089 
1090 	atomic_sub(count, &priv->buckets_out);
1091 	priv->total_received += count;
1092 
1093 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1094 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1095 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1096 				IOC_AND_NETDEV_NAMES_s_s(dev),
1097 				priv->mpt_rxfidx_tail,
1098 				MPT_LAN_MAX_BUCKETS_OUT);
1099 
1100 		return -1;
1101 	}
1102 
1103 	if (remaining == 0)
1104 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1105 			"(priv->buckets_out = %d)\n",
1106 			IOC_AND_NETDEV_NAMES_s_s(dev),
1107 			atomic_read(&priv->buckets_out));
1108 	else if (remaining < 10)
1109 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1110 			"(priv->buckets_out = %d)\n",
1111 			IOC_AND_NETDEV_NAMES_s_s(dev),
1112 			remaining, atomic_read(&priv->buckets_out));
1113 
1114 	if ((remaining < priv->bucketthresh) &&
1115 	    ((atomic_read(&priv->buckets_out) - remaining) >
1116 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1117 
1118 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1119 			"buckets_out count and fw's BucketsRemaining "
1120 			"count has crossed the threshold, issuing a "
1121 			"LanReset to clear the fw's hashtable. You may "
1122 			"want to check your /var/log/messages for \"CRC "
1123 			"error\" event notifications.\n");
1124 
1125 		mpt_lan_reset(dev);
1126 		mpt_lan_wake_post_buckets_task(dev, 0);
1127 	}
1128 
1129 	return mpt_lan_receive_skb(dev, skb);
1130 }
1131 
1132 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1133 /* Simple SGE's only at the moment */
1134 
1135 static void
1136 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1137 {
1138 	struct net_device *dev = priv->dev;
1139 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1140 	MPT_FRAME_HDR *mf;
1141 	LANReceivePostRequest_t *pRecvReq;
1142 	SGETransaction32_t *pTrans;
1143 	SGESimple64_t *pSimple;
1144 	struct sk_buff *skb;
1145 	dma_addr_t dma;
1146 	u32 curr, buckets, count, max;
1147 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1148 	unsigned long flags;
1149 	int i;
1150 
1151 	curr = atomic_read(&priv->buckets_out);
1152 	buckets = (priv->max_buckets_out - curr);
1153 
1154 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1155 			IOC_AND_NETDEV_NAMES_s_s(dev),
1156 			__func__, buckets, curr));
1157 
1158 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1159 			(sizeof(SGETransaction32_t) + sizeof(SGESimple64_t));
1160 
1161 	while (buckets) {
1162 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1163 		if (mf == NULL) {
1164 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1165 				__func__);
1166 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1167 				 __func__, buckets));
1168 			goto out;
1169 		}
1170 		pRecvReq = (LANReceivePostRequest_t *) mf;
1171 
1172 		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1173 		mpt_dev->RequestNB[i] = 0;
1174 		count = buckets;
1175 		if (count > max)
1176 			count = max;
1177 
1178 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1179 		pRecvReq->ChainOffset = 0;
1180 		pRecvReq->MsgFlags    = 0;
1181 		pRecvReq->PortNumber  = priv->pnum;
1182 
1183 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1184 		pSimple = NULL;
1185 
1186 		for (i = 0; i < count; i++) {
1187 			int ctx;
1188 
1189 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1190 			if (priv->mpt_rxfidx_tail < 0) {
1191 				printk (KERN_ERR "%s: Can't alloc context\n",
1192 					__func__);
1193 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1194 						       flags);
1195 				break;
1196 			}
1197 
1198 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1199 
1200 			skb = priv->RcvCtl[ctx].skb;
1201 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1202 				pci_unmap_single(mpt_dev->pcidev,
1203 						 priv->RcvCtl[ctx].dma,
1204 						 priv->RcvCtl[ctx].len,
1205 						 PCI_DMA_FROMDEVICE);
1206 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1207 				skb = priv->RcvCtl[ctx].skb = NULL;
1208 			}
1209 
1210 			if (skb == NULL) {
1211 				skb = dev_alloc_skb(len);
1212 				if (skb == NULL) {
1213 					printk (KERN_WARNING
1214 						MYNAM "/%s: Can't alloc skb\n",
1215 						__func__);
1216 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1217 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1218 					break;
1219 				}
1220 
1221 				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1222 						     len, PCI_DMA_FROMDEVICE);
1223 
1224 				priv->RcvCtl[ctx].skb = skb;
1225 				priv->RcvCtl[ctx].dma = dma;
1226 				priv->RcvCtl[ctx].len = len;
1227 			}
1228 
1229 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1230 
1231 			pTrans->ContextSize   = sizeof(u32);
1232 			pTrans->DetailsLength = 0;
1233 			pTrans->Flags         = 0;
1234 			pTrans->TransactionContext = cpu_to_le32(ctx);
1235 
1236 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1237 
1238 			pSimple->FlagsLength = cpu_to_le32(
1239 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1240 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1241 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1242 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1243 			if (sizeof(dma_addr_t) > sizeof(u32))
1244 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1245 			else
1246 				pSimple->Address.High = 0;
1247 
1248 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1249 		}
1250 
1251 		if (pSimple == NULL) {
1252 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1253 /**/				__func__);
1254 			mpt_free_msg_frame(mpt_dev, mf);
1255 			goto out;
1256 		}
1257 
1258 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1259 
1260 		pRecvReq->BucketCount = cpu_to_le32(i);
1261 
1262 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1263  *	for (i = 0; i < j + 2; i ++)
1264  *	    printk (" %08x", le32_to_cpu(msg[i]));
1265  *	printk ("\n");
1266  */
1267 
1268 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1269 
1270 		priv->total_posted += i;
1271 		buckets -= i;
1272 		atomic_add(i, &priv->buckets_out);
1273 	}
1274 
1275 out:
1276 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1277 		  __func__, buckets, atomic_read(&priv->buckets_out)));
1278 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1279 	__func__, priv->total_posted, priv->total_received));
1280 
1281 	clear_bit(0, &priv->post_buckets_active);
1282 }
1283 
1284 static void
1285 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1286 {
1287 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1288 						  post_buckets_task.work));
1289 }
1290 
1291 static const struct net_device_ops mpt_netdev_ops = {
1292 	.ndo_open       = mpt_lan_open,
1293 	.ndo_stop       = mpt_lan_close,
1294 	.ndo_start_xmit = mpt_lan_sdu_send,
1295 	.ndo_tx_timeout = mpt_lan_tx_timeout,
1296 };
1297 
1298 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1299 static struct net_device *
1300 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1301 {
1302 	struct net_device *dev;
1303 	struct mpt_lan_priv *priv;
1304 	u8 HWaddr[FC_ALEN], *a;
1305 
1306 	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1307 	if (!dev)
1308 		return NULL;
1309 
1310 	dev->mtu = MPT_LAN_MTU;
1311 
1312 	priv = netdev_priv(dev);
1313 
1314 	priv->dev = dev;
1315 	priv->mpt_dev = mpt_dev;
1316 	priv->pnum = pnum;
1317 
1318 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1319 			  mpt_lan_post_receive_buckets_work);
1320 	priv->post_buckets_active = 0;
1321 
1322 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1323 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1324 
1325 	atomic_set(&priv->buckets_out, 0);
1326 	priv->total_posted = 0;
1327 	priv->total_received = 0;
1328 	priv->max_buckets_out = max_buckets_out;
1329 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1330 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1331 
1332 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1333 			__LINE__,
1334 			mpt_dev->pfacts[0].MaxLanBuckets,
1335 			max_buckets_out,
1336 			priv->max_buckets_out));
1337 
1338 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1339 	spin_lock_init(&priv->txfidx_lock);
1340 	spin_lock_init(&priv->rxfidx_lock);
1341 
1342 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1343 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1344 
1345 	HWaddr[0] = a[5];
1346 	HWaddr[1] = a[4];
1347 	HWaddr[2] = a[3];
1348 	HWaddr[3] = a[2];
1349 	HWaddr[4] = a[1];
1350 	HWaddr[5] = a[0];
1351 
1352 	dev->addr_len = FC_ALEN;
1353 	dev_addr_set(dev, HWaddr);
1354 	memset(dev->broadcast, 0xff, FC_ALEN);
1355 
1356 	/* The Tx queue is 127 deep on the 909.
1357 	 * Give ourselves some breathing room.
1358 	 */
1359 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1360 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1361 
1362 	dev->netdev_ops = &mpt_netdev_ops;
1363 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1364 
1365 	/* MTU range: 96 - 65280 */
1366 	dev->min_mtu = MPT_LAN_MIN_MTU;
1367 	dev->max_mtu = MPT_LAN_MAX_MTU;
1368 
1369 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1370 		"and setting initial values\n"));
1371 
1372 	if (register_netdev(dev) != 0) {
1373 		free_netdev(dev);
1374 		dev = NULL;
1375 	}
1376 	return dev;
1377 }
1378 
1379 static int
1380 mptlan_probe(struct pci_dev *pdev)
1381 {
1382 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1383 	struct net_device	*dev;
1384 	int			i;
1385 
1386 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1387 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1388 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1389 		       ioc->name, ioc->pfacts[i].PortNumber,
1390 		       ioc->pfacts[i].ProtocolFlags,
1391 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1392 			       ioc->pfacts[i].ProtocolFlags));
1393 
1394 		if (!(ioc->pfacts[i].ProtocolFlags &
1395 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1396 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1397 			       "seems to be disabled on this adapter port!\n",
1398 			       ioc->name);
1399 			continue;
1400 		}
1401 
1402 		dev = mpt_register_lan_device(ioc, i);
1403 		if (!dev) {
1404 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1405 			       "port%d as a LAN device\n", ioc->name,
1406 			       ioc->pfacts[i].PortNumber);
1407 			continue;
1408 		}
1409 
1410 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1411 		       "registered as '%s'\n", ioc->name, dev->name);
1412 		printk(KERN_INFO MYNAM ": %s/%s: "
1413 		       "LanAddr = %pM\n",
1414 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1415 		       dev->dev_addr);
1416 
1417 		ioc->netdev = dev;
1418 
1419 		return 0;
1420 	}
1421 
1422 	return -ENODEV;
1423 }
1424 
1425 static void
1426 mptlan_remove(struct pci_dev *pdev)
1427 {
1428 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1429 	struct net_device	*dev = ioc->netdev;
1430 
1431 	if(dev != NULL) {
1432 		unregister_netdev(dev);
1433 		free_netdev(dev);
1434 	}
1435 }
1436 
1437 static struct mpt_pci_driver mptlan_driver = {
1438 	.probe		= mptlan_probe,
1439 	.remove		= mptlan_remove,
1440 };
1441 
1442 static int __init mpt_lan_init (void)
1443 {
1444 	show_mptmod_ver(LANAME, LANVER);
1445 
1446 	LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1447 				"lan_reply");
1448 	if (LanCtx <= 0) {
1449 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1450 		return -EBUSY;
1451 	}
1452 
1453 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1454 
1455 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1456 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1457 		       "handler with mptbase! The world is at an end! "
1458 		       "Everything is fading to black! Goodbye.\n");
1459 		return -EBUSY;
1460 	}
1461 
1462 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1463 
1464 	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1465 	return 0;
1466 }
1467 
1468 static void __exit mpt_lan_exit(void)
1469 {
1470 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1471 	mpt_reset_deregister(LanCtx);
1472 
1473 	if (LanCtx) {
1474 		mpt_deregister(LanCtx);
1475 		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1476 	}
1477 }
1478 
1479 module_init(mpt_lan_init);
1480 module_exit(mpt_lan_exit);
1481 
1482 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1483 static unsigned short
1484 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1485 {
1486 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1487 	struct fcllc *fcllc;
1488 
1489 	skb_reset_mac_header(skb);
1490 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1491 
1492 	if (fch->dtype == htons(0xffff)) {
1493 		u32 *p = (u32 *) fch;
1494 
1495 		swab32s(p + 0);
1496 		swab32s(p + 1);
1497 		swab32s(p + 2);
1498 		swab32s(p + 3);
1499 
1500 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1501 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1502 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1503 				fch->saddr);
1504 	}
1505 
1506 	if (*fch->daddr & 1) {
1507 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1508 			skb->pkt_type = PACKET_BROADCAST;
1509 		} else {
1510 			skb->pkt_type = PACKET_MULTICAST;
1511 		}
1512 	} else {
1513 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1514 			skb->pkt_type = PACKET_OTHERHOST;
1515 		} else {
1516 			skb->pkt_type = PACKET_HOST;
1517 		}
1518 	}
1519 
1520 	fcllc = (struct fcllc *)skb->data;
1521 
1522 	/* Strip the SNAP header from ARP packets since we don't
1523 	 * pass them through to the 802.2/SNAP layers.
1524 	 */
1525 	if (fcllc->dsap == EXTENDED_SAP &&
1526 		(fcllc->ethertype == htons(ETH_P_IP) ||
1527 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1528 		skb_pull(skb, sizeof(struct fcllc));
1529 		return fcllc->ethertype;
1530 	}
1531 
1532 	return htons(ETH_P_802_2);
1533 }
1534 
1535 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1536