xref: /openbmc/linux/drivers/message/fusion/mptlan.c (revision 74ce1896)
1 /*
2  *  linux/drivers/message/fusion/mptlan.c
3  *      IP Over Fibre Channel device driver.
4  *      For use with LSI Fibre Channel PCI chip/adapters
5  *      running LSI Fusion MPT (Message Passing Technology) firmware.
6  *
7  *  Copyright (c) 2000-2008 LSI Corporation
8  *  (mailto:DL-MPTFusionLinux@lsi.com)
9  *
10  */
11 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
12 /*
13     This program is free software; you can redistribute it and/or modify
14     it under the terms of the GNU General Public License as published by
15     the Free Software Foundation; version 2 of the License.
16 
17     This program is distributed in the hope that it will be useful,
18     but WITHOUT ANY WARRANTY; without even the implied warranty of
19     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20     GNU General Public License for more details.
21 
22     NO WARRANTY
23     THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
24     CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
25     LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
26     MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
27     solely responsible for determining the appropriateness of using and
28     distributing the Program and assumes all risks associated with its
29     exercise of rights under this Agreement, including but not limited to
30     the risks and costs of program errors, damage to or loss of data,
31     programs or equipment, and unavailability or interruption of operations.
32 
33     DISCLAIMER OF LIABILITY
34     NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
35     DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36     DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
37     ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38     TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39     USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
40     HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
41 
42     You should have received a copy of the GNU General Public License
43     along with this program; if not, write to the Free Software
44     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
45 */
46 
47 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
48 /*
49  * Define statements used for debugging
50  */
51 //#define MPT_LAN_IO_DEBUG
52 
53 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
54 
55 #include "mptlan.h"
56 #include <linux/init.h>
57 #include <linux/module.h>
58 #include <linux/fs.h>
59 #include <linux/sched.h>
60 #include <linux/slab.h>
61 
62 #define my_VERSION	MPT_LINUX_VERSION_COMMON
63 #define MYNAM		"mptlan"
64 
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(my_VERSION);
67 
68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
69 /*
70  * MPT LAN message sizes without variable part.
71  */
72 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
73 	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
74 
75 #define MPT_LAN_TRANSACTION32_SIZE \
76 	(sizeof(SGETransaction32_t) - sizeof(u32))
77 
78 /*
79  *  Fusion MPT LAN private structures
80  */
81 
82 struct BufferControl {
83 	struct sk_buff	*skb;
84 	dma_addr_t	dma;
85 	unsigned int	len;
86 };
87 
88 struct mpt_lan_priv {
89 	MPT_ADAPTER *mpt_dev;
90 	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
91 
92 	atomic_t buckets_out;		/* number of unused buckets on IOC */
93 	int bucketthresh;		/* Send more when this many left */
94 
95 	int *mpt_txfidx; /* Free Tx Context list */
96 	int mpt_txfidx_tail;
97 	spinlock_t txfidx_lock;
98 
99 	int *mpt_rxfidx; /* Free Rx Context list */
100 	int mpt_rxfidx_tail;
101 	spinlock_t rxfidx_lock;
102 
103 	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
104 	struct BufferControl *SendCtl;	/* Send BufferControl structs */
105 
106 	int max_buckets_out;		/* Max buckets to send to IOC */
107 	int tx_max_out;			/* IOC's Tx queue len */
108 
109 	u32 total_posted;
110 	u32 total_received;
111 
112 	struct delayed_work post_buckets_task;
113 	struct net_device *dev;
114 	unsigned long post_buckets_active;
115 };
116 
117 struct mpt_lan_ohdr {
118 	u16	dtype;
119 	u8	daddr[FC_ALEN];
120 	u16	stype;
121 	u8	saddr[FC_ALEN];
122 };
123 
124 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
125 
126 /*
127  *  Forward protos...
128  */
129 static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
130 		       MPT_FRAME_HDR *reply);
131 static int  mpt_lan_open(struct net_device *dev);
132 static int  mpt_lan_reset(struct net_device *dev);
133 static int  mpt_lan_close(struct net_device *dev);
134 static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
135 static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
136 					   int priority);
137 static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
138 static int  mpt_lan_receive_post_reply(struct net_device *dev,
139 				       LANReceivePostReply_t *pRecvRep);
140 static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
141 static int  mpt_lan_send_reply(struct net_device *dev,
142 			       LANSendReply_t *pSendRep);
143 static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
144 static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
145 static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
146 					 struct net_device *dev);
147 
148 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
149 /*
150  *  Fusion MPT LAN private data
151  */
152 static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
153 
154 static u32 max_buckets_out = 127;
155 static u32 tx_max_out_p = 127 - 16;
156 
157 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
158 /**
159  *	lan_reply - Handle all data sent from the hardware.
160  *	@ioc: Pointer to MPT_ADAPTER structure
161  *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
162  *	@reply: Pointer to MPT reply frame
163  *
164  *	Returns 1 indicating original alloc'd request frame ptr
165  *	should be freed, or 0 if it shouldn't.
166  */
167 static int
168 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
169 {
170 	struct net_device *dev = ioc->netdev;
171 	int FreeReqFrame = 0;
172 
173 	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
174 		  IOC_AND_NETDEV_NAMES_s_s(dev)));
175 
176 //	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
177 //			mf, reply));
178 
179 	if (mf == NULL) {
180 		u32 tmsg = CAST_PTR_TO_U32(reply);
181 
182 		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
183 				IOC_AND_NETDEV_NAMES_s_s(dev),
184 				tmsg));
185 
186 		switch (GET_LAN_FORM(tmsg)) {
187 
188 		// NOTE!  (Optimization) First case here is now caught in
189 		//  mptbase.c::mpt_interrupt() routine and callcack here
190 		//  is now skipped for this case!
191 #if 0
192 		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
193 //			dioprintk((KERN_INFO MYNAM "/lan_reply: "
194 //				  "MessageContext turbo reply received\n"));
195 			FreeReqFrame = 1;
196 			break;
197 #endif
198 
199 		case LAN_REPLY_FORM_SEND_SINGLE:
200 //			dioprintk((MYNAM "/lan_reply: "
201 //				  "calling mpt_lan_send_reply (turbo)\n"));
202 
203 			// Potential BUG here?
204 			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
205 			//  If/when mpt_lan_send_turbo would return 1 here,
206 			//  calling routine (mptbase.c|mpt_interrupt)
207 			//  would Oops because mf has already been set
208 			//  to NULL.  So after return from this func,
209 			//  mpt_interrupt() will attempt to put (NULL) mf ptr
210 			//  item back onto its adapter FreeQ - Oops!:-(
211 			//  It's Ok, since mpt_lan_send_turbo() *currently*
212 			//  always returns 0, but..., just in case:
213 
214 			(void) mpt_lan_send_turbo(dev, tmsg);
215 			FreeReqFrame = 0;
216 
217 			break;
218 
219 		case LAN_REPLY_FORM_RECEIVE_SINGLE:
220 //			dioprintk((KERN_INFO MYNAM "@lan_reply: "
221 //				  "rcv-Turbo = %08x\n", tmsg));
222 			mpt_lan_receive_post_turbo(dev, tmsg);
223 			break;
224 
225 		default:
226 			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
227 				"that I don't know what to do with\n");
228 
229 			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
230 
231 			break;
232 		}
233 
234 		return FreeReqFrame;
235 	}
236 
237 //	msg = (u32 *) reply;
238 //	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
239 //		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
240 //		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
241 //	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
242 //		  reply->u.hdr.Function));
243 
244 	switch (reply->u.hdr.Function) {
245 
246 	case MPI_FUNCTION_LAN_SEND:
247 	{
248 		LANSendReply_t *pSendRep;
249 
250 		pSendRep = (LANSendReply_t *) reply;
251 		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
252 		break;
253 	}
254 
255 	case MPI_FUNCTION_LAN_RECEIVE:
256 	{
257 		LANReceivePostReply_t *pRecvRep;
258 
259 		pRecvRep = (LANReceivePostReply_t *) reply;
260 		if (pRecvRep->NumberOfContexts) {
261 			mpt_lan_receive_post_reply(dev, pRecvRep);
262 			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
263 				FreeReqFrame = 1;
264 		} else
265 			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
266 				  "ReceivePostReply received.\n"));
267 		break;
268 	}
269 
270 	case MPI_FUNCTION_LAN_RESET:
271 		/* Just a default reply. Might want to check it to
272 		 * make sure that everything went ok.
273 		 */
274 		FreeReqFrame = 1;
275 		break;
276 
277 	case MPI_FUNCTION_EVENT_NOTIFICATION:
278 	case MPI_FUNCTION_EVENT_ACK:
279 		/*  _EVENT_NOTIFICATION should NOT come down this path any more.
280 		 *  Should be routed to mpt_lan_event_process(), but just in case...
281 		 */
282 		FreeReqFrame = 1;
283 		break;
284 
285 	default:
286 		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
287 			"reply that I don't know what to do with\n");
288 
289 		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
290 		FreeReqFrame = 1;
291 
292 		break;
293 	}
294 
295 	return FreeReqFrame;
296 }
297 
298 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
299 static int
300 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
301 {
302 	struct net_device *dev = ioc->netdev;
303 	struct mpt_lan_priv *priv;
304 
305 	if (dev == NULL)
306 		return(1);
307 	else
308 		priv = netdev_priv(dev);
309 
310 	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
311 			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
312 			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
313 
314 	if (priv->mpt_rxfidx == NULL)
315 		return (1);
316 
317 	if (reset_phase == MPT_IOC_SETUP_RESET) {
318 		;
319 	} else if (reset_phase == MPT_IOC_PRE_RESET) {
320 		int i;
321 		unsigned long flags;
322 
323 		netif_stop_queue(dev);
324 
325 		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
326 
327 		atomic_set(&priv->buckets_out, 0);
328 
329 		/* Reset Rx Free Tail index and re-populate the queue. */
330 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
331 		priv->mpt_rxfidx_tail = -1;
332 		for (i = 0; i < priv->max_buckets_out; i++)
333 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
334 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
335 	} else {
336 		mpt_lan_post_receive_buckets(priv);
337 		netif_wake_queue(dev);
338 	}
339 
340 	return 1;
341 }
342 
343 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
344 static int
345 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
346 {
347 	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
348 
349 	switch (le32_to_cpu(pEvReply->Event)) {
350 	case MPI_EVENT_NONE:				/* 00 */
351 	case MPI_EVENT_LOG_DATA:			/* 01 */
352 	case MPI_EVENT_STATE_CHANGE:			/* 02 */
353 	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
354 	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
355 	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
356 	case MPI_EVENT_RESCAN:				/* 06 */
357 		/* Ok, do we need to do anything here? As far as
358 		   I can tell, this is when a new device gets added
359 		   to the loop. */
360 	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
361 	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
362 	case MPI_EVENT_LOGOUT:				/* 09 */
363 	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
364 	default:
365 		break;
366 	}
367 
368 	/*
369 	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
370 	 *  Do NOT do it here now!
371 	 */
372 
373 	return 1;
374 }
375 
376 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
377 static int
378 mpt_lan_open(struct net_device *dev)
379 {
380 	struct mpt_lan_priv *priv = netdev_priv(dev);
381 	int i;
382 
383 	if (mpt_lan_reset(dev) != 0) {
384 		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
385 
386 		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
387 
388 		if (mpt_dev->active)
389 			printk ("The ioc is active. Perhaps it needs to be"
390 				" reset?\n");
391 		else
392 			printk ("The ioc in inactive, most likely in the "
393 				"process of being reset. Please try again in "
394 				"a moment.\n");
395 	}
396 
397 	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
398 	if (priv->mpt_txfidx == NULL)
399 		goto out;
400 	priv->mpt_txfidx_tail = -1;
401 
402 	priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
403 				GFP_KERNEL);
404 	if (priv->SendCtl == NULL)
405 		goto out_mpt_txfidx;
406 	for (i = 0; i < priv->tx_max_out; i++)
407 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
408 
409 	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
410 
411 	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
412 				   GFP_KERNEL);
413 	if (priv->mpt_rxfidx == NULL)
414 		goto out_SendCtl;
415 	priv->mpt_rxfidx_tail = -1;
416 
417 	priv->RcvCtl = kcalloc(priv->max_buckets_out,
418 			       sizeof(struct BufferControl),
419 			       GFP_KERNEL);
420 	if (priv->RcvCtl == NULL)
421 		goto out_mpt_rxfidx;
422 	for (i = 0; i < priv->max_buckets_out; i++)
423 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
424 
425 /**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
426 /**/	for (i = 0; i < priv->tx_max_out; i++)
427 /**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
428 /**/	dlprintk(("\n"));
429 
430 	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
431 
432 	mpt_lan_post_receive_buckets(priv);
433 	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
434 			IOC_AND_NETDEV_NAMES_s_s(dev));
435 
436 	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
437 		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
438 			" Notifications. This is a bad thing! We're not going "
439 			"to go ahead, but I'd be leery of system stability at "
440 			"this point.\n");
441 	}
442 
443 	netif_start_queue(dev);
444 	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
445 
446 	return 0;
447 out_mpt_rxfidx:
448 	kfree(priv->mpt_rxfidx);
449 	priv->mpt_rxfidx = NULL;
450 out_SendCtl:
451 	kfree(priv->SendCtl);
452 	priv->SendCtl = NULL;
453 out_mpt_txfidx:
454 	kfree(priv->mpt_txfidx);
455 	priv->mpt_txfidx = NULL;
456 out:	return -ENOMEM;
457 }
458 
459 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
460 /* Send a LanReset message to the FW. This should result in the FW returning
461    any buckets it still has. */
462 static int
463 mpt_lan_reset(struct net_device *dev)
464 {
465 	MPT_FRAME_HDR *mf;
466 	LANResetRequest_t *pResetReq;
467 	struct mpt_lan_priv *priv = netdev_priv(dev);
468 
469 	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
470 
471 	if (mf == NULL) {
472 /*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
473 		"Unable to allocate a request frame.\n"));
474 */
475 		return -1;
476 	}
477 
478 	pResetReq = (LANResetRequest_t *) mf;
479 
480 	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
481 	pResetReq->ChainOffset	= 0;
482 	pResetReq->Reserved	= 0;
483 	pResetReq->PortNumber	= priv->pnum;
484 	pResetReq->MsgFlags	= 0;
485 	pResetReq->Reserved2	= 0;
486 
487 	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
488 
489 	return 0;
490 }
491 
492 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
493 static int
494 mpt_lan_close(struct net_device *dev)
495 {
496 	struct mpt_lan_priv *priv = netdev_priv(dev);
497 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
498 	unsigned long timeout;
499 	int i;
500 
501 	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
502 
503 	mpt_event_deregister(LanCtx);
504 
505 	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
506 		  "since driver was loaded, %d still out\n",
507 		  priv->total_posted,atomic_read(&priv->buckets_out)));
508 
509 	netif_stop_queue(dev);
510 
511 	mpt_lan_reset(dev);
512 
513 	timeout = jiffies + 2 * HZ;
514 	while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
515 		schedule_timeout_interruptible(1);
516 
517 	for (i = 0; i < priv->max_buckets_out; i++) {
518 		if (priv->RcvCtl[i].skb != NULL) {
519 /**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
520 /**/				  "is still out\n", i));
521 			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
522 					 priv->RcvCtl[i].len,
523 					 PCI_DMA_FROMDEVICE);
524 			dev_kfree_skb(priv->RcvCtl[i].skb);
525 		}
526 	}
527 
528 	kfree(priv->RcvCtl);
529 	kfree(priv->mpt_rxfidx);
530 
531 	for (i = 0; i < priv->tx_max_out; i++) {
532 		if (priv->SendCtl[i].skb != NULL) {
533 			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
534 					 priv->SendCtl[i].len,
535 					 PCI_DMA_TODEVICE);
536 			dev_kfree_skb(priv->SendCtl[i].skb);
537 		}
538 	}
539 
540 	kfree(priv->SendCtl);
541 	kfree(priv->mpt_txfidx);
542 
543 	atomic_set(&priv->buckets_out, 0);
544 
545 	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
546 			IOC_AND_NETDEV_NAMES_s_s(dev));
547 
548 	return 0;
549 }
550 
551 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
552 /* Tx timeout handler. */
553 static void
554 mpt_lan_tx_timeout(struct net_device *dev)
555 {
556 	struct mpt_lan_priv *priv = netdev_priv(dev);
557 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
558 
559 	if (mpt_dev->active) {
560 		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
561 		netif_wake_queue(dev);
562 	}
563 }
564 
565 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
566 //static inline int
567 static int
568 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
569 {
570 	struct mpt_lan_priv *priv = netdev_priv(dev);
571 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
572 	struct sk_buff *sent;
573 	unsigned long flags;
574 	u32 ctx;
575 
576 	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
577 	sent = priv->SendCtl[ctx].skb;
578 
579 	dev->stats.tx_packets++;
580 	dev->stats.tx_bytes += sent->len;
581 
582 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
583 			IOC_AND_NETDEV_NAMES_s_s(dev),
584 			__func__, sent));
585 
586 	priv->SendCtl[ctx].skb = NULL;
587 	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
588 			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
589 	dev_kfree_skb_irq(sent);
590 
591 	spin_lock_irqsave(&priv->txfidx_lock, flags);
592 	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
593 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
594 
595 	netif_wake_queue(dev);
596 	return 0;
597 }
598 
599 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
600 static int
601 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
602 {
603 	struct mpt_lan_priv *priv = netdev_priv(dev);
604 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
605 	struct sk_buff *sent;
606 	unsigned long flags;
607 	int FreeReqFrame = 0;
608 	u32 *pContext;
609 	u32 ctx;
610 	u8 count;
611 
612 	count = pSendRep->NumberOfContexts;
613 
614 	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
615 		 le16_to_cpu(pSendRep->IOCStatus)));
616 
617 	/* Add check for Loginfo Flag in IOCStatus */
618 
619 	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
620 	case MPI_IOCSTATUS_SUCCESS:
621 		dev->stats.tx_packets += count;
622 		break;
623 
624 	case MPI_IOCSTATUS_LAN_CANCELED:
625 	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
626 		break;
627 
628 	case MPI_IOCSTATUS_INVALID_SGL:
629 		dev->stats.tx_errors += count;
630 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
631 				IOC_AND_NETDEV_NAMES_s_s(dev));
632 		goto out;
633 
634 	default:
635 		dev->stats.tx_errors += count;
636 		break;
637 	}
638 
639 	pContext = &pSendRep->BufferContext;
640 
641 	spin_lock_irqsave(&priv->txfidx_lock, flags);
642 	while (count > 0) {
643 		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
644 
645 		sent = priv->SendCtl[ctx].skb;
646 		dev->stats.tx_bytes += sent->len;
647 
648 		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
649 				IOC_AND_NETDEV_NAMES_s_s(dev),
650 				__func__, sent));
651 
652 		priv->SendCtl[ctx].skb = NULL;
653 		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
654 				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
655 		dev_kfree_skb_irq(sent);
656 
657 		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
658 
659 		pContext++;
660 		count--;
661 	}
662 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
663 
664 out:
665 	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
666 		FreeReqFrame = 1;
667 
668 	netif_wake_queue(dev);
669 	return FreeReqFrame;
670 }
671 
672 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
673 static int
674 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
675 {
676 	struct mpt_lan_priv *priv = netdev_priv(dev);
677 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
678 	MPT_FRAME_HDR *mf;
679 	LANSendRequest_t *pSendReq;
680 	SGETransaction32_t *pTrans;
681 	SGESimple64_t *pSimple;
682 	const unsigned char *mac;
683 	dma_addr_t dma;
684 	unsigned long flags;
685 	int ctx;
686 	u16 cur_naa = 0x1000;
687 
688 	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
689 			__func__, skb));
690 
691 	spin_lock_irqsave(&priv->txfidx_lock, flags);
692 	if (priv->mpt_txfidx_tail < 0) {
693 		netif_stop_queue(dev);
694 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
695 
696 		printk (KERN_ERR "%s: no tx context available: %u\n",
697 			__func__, priv->mpt_txfidx_tail);
698 		return NETDEV_TX_BUSY;
699 	}
700 
701 	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
702 	if (mf == NULL) {
703 		netif_stop_queue(dev);
704 		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
705 
706 		printk (KERN_ERR "%s: Unable to alloc request frame\n",
707 			__func__);
708 		return NETDEV_TX_BUSY;
709 	}
710 
711 	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
712 	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
713 
714 //	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
715 //			IOC_AND_NETDEV_NAMES_s_s(dev)));
716 
717 	pSendReq = (LANSendRequest_t *) mf;
718 
719 	/* Set the mac.raw pointer, since this apparently isn't getting
720 	 * done before we get the skb. Pull the data pointer past the mac data.
721 	 */
722 	skb_reset_mac_header(skb);
723 	skb_pull(skb, 12);
724 
725         dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
726 			     PCI_DMA_TODEVICE);
727 
728 	priv->SendCtl[ctx].skb = skb;
729 	priv->SendCtl[ctx].dma = dma;
730 	priv->SendCtl[ctx].len = skb->len;
731 
732 	/* Message Header */
733 	pSendReq->Reserved    = 0;
734 	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
735 	pSendReq->ChainOffset = 0;
736 	pSendReq->Reserved2   = 0;
737 	pSendReq->MsgFlags    = 0;
738 	pSendReq->PortNumber  = priv->pnum;
739 
740 	/* Transaction Context Element */
741 	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
742 
743 	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
744 	pTrans->ContextSize   = sizeof(u32);
745 	pTrans->DetailsLength = 2 * sizeof(u32);
746 	pTrans->Flags         = 0;
747 	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
748 
749 //	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
750 //			IOC_AND_NETDEV_NAMES_s_s(dev),
751 //			ctx, skb, skb->data));
752 
753 	mac = skb_mac_header(skb);
754 
755 	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
756 						    (mac[0] <<  8) |
757 						    (mac[1] <<  0));
758 	pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
759 						    (mac[3] << 16) |
760 						    (mac[4] <<  8) |
761 						    (mac[5] <<  0));
762 
763 	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
764 
765 	/* If we ever decide to send more than one Simple SGE per LANSend, then
766 	   we will need to make sure that LAST_ELEMENT only gets set on the
767 	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
768 	pSimple->FlagsLength = cpu_to_le32(
769 			((MPI_SGE_FLAGS_LAST_ELEMENT |
770 			  MPI_SGE_FLAGS_END_OF_BUFFER |
771 			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
772 			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
773 			  MPI_SGE_FLAGS_HOST_TO_IOC |
774 			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
775 			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
776 			skb->len);
777 	pSimple->Address.Low = cpu_to_le32((u32) dma);
778 	if (sizeof(dma_addr_t) > sizeof(u32))
779 		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
780 	else
781 		pSimple->Address.High = 0;
782 
783 	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
784 	netif_trans_update(dev);
785 
786 	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
787 			IOC_AND_NETDEV_NAMES_s_s(dev),
788 			le32_to_cpu(pSimple->FlagsLength)));
789 
790 	return NETDEV_TX_OK;
791 }
792 
793 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
794 static void
795 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
796 /*
797  * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
798  */
799 {
800 	struct mpt_lan_priv *priv = netdev_priv(dev);
801 
802 	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
803 		if (priority) {
804 			schedule_delayed_work(&priv->post_buckets_task, 0);
805 		} else {
806 			schedule_delayed_work(&priv->post_buckets_task, 1);
807 			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
808 				   "timer.\n"));
809 		}
810 	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
811 			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
812 	}
813 }
814 
815 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
816 static int
817 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
818 {
819 	struct mpt_lan_priv *priv = netdev_priv(dev);
820 
821 	skb->protocol = mpt_lan_type_trans(skb, dev);
822 
823 	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
824 		 "delivered to upper level.\n",
825 			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
826 
827 	dev->stats.rx_bytes += skb->len;
828 	dev->stats.rx_packets++;
829 
830 	skb->dev = dev;
831 	netif_rx(skb);
832 
833 	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
834 		 atomic_read(&priv->buckets_out)));
835 
836 	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
837 		mpt_lan_wake_post_buckets_task(dev, 1);
838 
839 	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
840 		  "remaining, %d received back since sod\n",
841 		  atomic_read(&priv->buckets_out), priv->total_received));
842 
843 	return 0;
844 }
845 
846 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
847 //static inline int
848 static int
849 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
850 {
851 	struct mpt_lan_priv *priv = netdev_priv(dev);
852 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
853 	struct sk_buff *skb, *old_skb;
854 	unsigned long flags;
855 	u32 ctx, len;
856 
857 	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
858 	skb = priv->RcvCtl[ctx].skb;
859 
860 	len = GET_LAN_PACKET_LENGTH(tmsg);
861 
862 	if (len < MPT_LAN_RX_COPYBREAK) {
863 		old_skb = skb;
864 
865 		skb = (struct sk_buff *)dev_alloc_skb(len);
866 		if (!skb) {
867 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
868 					IOC_AND_NETDEV_NAMES_s_s(dev),
869 					__FILE__, __LINE__);
870 			return -ENOMEM;
871 		}
872 
873 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
874 					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
875 
876 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
877 
878 		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
879 					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
880 		goto out;
881 	}
882 
883 	skb_put(skb, len);
884 
885 	priv->RcvCtl[ctx].skb = NULL;
886 
887 	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
888 			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
889 
890 out:
891 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
892 	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
893 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
894 
895 	atomic_dec(&priv->buckets_out);
896 	priv->total_received++;
897 
898 	return mpt_lan_receive_skb(dev, skb);
899 }
900 
901 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
902 static int
903 mpt_lan_receive_post_free(struct net_device *dev,
904 			  LANReceivePostReply_t *pRecvRep)
905 {
906 	struct mpt_lan_priv *priv = netdev_priv(dev);
907 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
908 	unsigned long flags;
909 	struct sk_buff *skb;
910 	u32 ctx;
911 	int count;
912 	int i;
913 
914 	count = pRecvRep->NumberOfContexts;
915 
916 /**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
917 		  "IOC returned %d buckets, freeing them...\n", count));
918 
919 	spin_lock_irqsave(&priv->rxfidx_lock, flags);
920 	for (i = 0; i < count; i++) {
921 		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
922 
923 		skb = priv->RcvCtl[ctx].skb;
924 
925 //		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
926 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
927 //		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
928 //				priv, &(priv->buckets_out)));
929 //		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
930 
931 		priv->RcvCtl[ctx].skb = NULL;
932 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
933 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
934 		dev_kfree_skb_any(skb);
935 
936 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
937 	}
938 	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
939 
940 	atomic_sub(count, &priv->buckets_out);
941 
942 //	for (i = 0; i < priv->max_buckets_out; i++)
943 //		if (priv->RcvCtl[i].skb != NULL)
944 //			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
945 //				  "is still out\n", i));
946 
947 /*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
948 		  count));
949 */
950 /**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
951 /**/		  "remaining, %d received back since sod.\n",
952 /**/		  atomic_read(&priv->buckets_out), priv->total_received));
953 	return 0;
954 }
955 
956 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
957 static int
958 mpt_lan_receive_post_reply(struct net_device *dev,
959 			   LANReceivePostReply_t *pRecvRep)
960 {
961 	struct mpt_lan_priv *priv = netdev_priv(dev);
962 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
963 	struct sk_buff *skb, *old_skb;
964 	unsigned long flags;
965 	u32 len, ctx, offset;
966 	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
967 	int count;
968 	int i, l;
969 
970 	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
971 	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
972 		 le16_to_cpu(pRecvRep->IOCStatus)));
973 
974 	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
975 						MPI_IOCSTATUS_LAN_CANCELED)
976 		return mpt_lan_receive_post_free(dev, pRecvRep);
977 
978 	len = le32_to_cpu(pRecvRep->PacketLength);
979 	if (len == 0) {
980 		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
981 			"ReceivePostReply w/ PacketLength zero!\n",
982 				IOC_AND_NETDEV_NAMES_s_s(dev));
983 		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
984 				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
985 		return -1;
986 	}
987 
988 	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
989 	count  = pRecvRep->NumberOfContexts;
990 	skb    = priv->RcvCtl[ctx].skb;
991 
992 	offset = le32_to_cpu(pRecvRep->PacketOffset);
993 //	if (offset != 0) {
994 //		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
995 //			"w/ PacketOffset %u\n",
996 //				IOC_AND_NETDEV_NAMES_s_s(dev),
997 //				offset);
998 //	}
999 
1000 	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
1001 			IOC_AND_NETDEV_NAMES_s_s(dev),
1002 			offset, len));
1003 
1004 	if (count > 1) {
1005 		int szrem = len;
1006 
1007 //		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
1008 //			"for single packet, concatenating...\n",
1009 //				IOC_AND_NETDEV_NAMES_s_s(dev)));
1010 
1011 		skb = (struct sk_buff *)dev_alloc_skb(len);
1012 		if (!skb) {
1013 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1014 					IOC_AND_NETDEV_NAMES_s_s(dev),
1015 					__FILE__, __LINE__);
1016 			return -ENOMEM;
1017 		}
1018 
1019 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1020 		for (i = 0; i < count; i++) {
1021 
1022 			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
1023 			old_skb = priv->RcvCtl[ctx].skb;
1024 
1025 			l = priv->RcvCtl[ctx].len;
1026 			if (szrem < l)
1027 				l = szrem;
1028 
1029 //			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
1030 //					IOC_AND_NETDEV_NAMES_s_s(dev),
1031 //					i, l));
1032 
1033 			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1034 						    priv->RcvCtl[ctx].dma,
1035 						    priv->RcvCtl[ctx].len,
1036 						    PCI_DMA_FROMDEVICE);
1037 			skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
1038 
1039 			pci_dma_sync_single_for_device(mpt_dev->pcidev,
1040 						       priv->RcvCtl[ctx].dma,
1041 						       priv->RcvCtl[ctx].len,
1042 						       PCI_DMA_FROMDEVICE);
1043 
1044 			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1045 			szrem -= l;
1046 		}
1047 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1048 
1049 	} else if (len < MPT_LAN_RX_COPYBREAK) {
1050 
1051 		old_skb = skb;
1052 
1053 		skb = (struct sk_buff *)dev_alloc_skb(len);
1054 		if (!skb) {
1055 			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
1056 					IOC_AND_NETDEV_NAMES_s_s(dev),
1057 					__FILE__, __LINE__);
1058 			return -ENOMEM;
1059 		}
1060 
1061 		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
1062 					    priv->RcvCtl[ctx].dma,
1063 					    priv->RcvCtl[ctx].len,
1064 					    PCI_DMA_FROMDEVICE);
1065 
1066 		skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
1067 
1068 		pci_dma_sync_single_for_device(mpt_dev->pcidev,
1069 					       priv->RcvCtl[ctx].dma,
1070 					       priv->RcvCtl[ctx].len,
1071 					       PCI_DMA_FROMDEVICE);
1072 
1073 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1074 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1075 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1076 
1077 	} else {
1078 		spin_lock_irqsave(&priv->rxfidx_lock, flags);
1079 
1080 		priv->RcvCtl[ctx].skb = NULL;
1081 
1082 		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
1083 				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
1084 		priv->RcvCtl[ctx].dma = 0;
1085 
1086 		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1087 		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1088 
1089 		skb_put(skb,len);
1090 	}
1091 
1092 	atomic_sub(count, &priv->buckets_out);
1093 	priv->total_received += count;
1094 
1095 	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
1096 		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
1097 			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
1098 				IOC_AND_NETDEV_NAMES_s_s(dev),
1099 				priv->mpt_rxfidx_tail,
1100 				MPT_LAN_MAX_BUCKETS_OUT);
1101 
1102 		return -1;
1103 	}
1104 
1105 	if (remaining == 0)
1106 		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
1107 			"(priv->buckets_out = %d)\n",
1108 			IOC_AND_NETDEV_NAMES_s_s(dev),
1109 			atomic_read(&priv->buckets_out));
1110 	else if (remaining < 10)
1111 		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
1112 			"(priv->buckets_out = %d)\n",
1113 			IOC_AND_NETDEV_NAMES_s_s(dev),
1114 			remaining, atomic_read(&priv->buckets_out));
1115 
1116 	if ((remaining < priv->bucketthresh) &&
1117 	    ((atomic_read(&priv->buckets_out) - remaining) >
1118 	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
1119 
1120 		printk (KERN_WARNING MYNAM " Mismatch between driver's "
1121 			"buckets_out count and fw's BucketsRemaining "
1122 			"count has crossed the threshold, issuing a "
1123 			"LanReset to clear the fw's hashtable. You may "
1124 			"want to check your /var/log/messages for \"CRC "
1125 			"error\" event notifications.\n");
1126 
1127 		mpt_lan_reset(dev);
1128 		mpt_lan_wake_post_buckets_task(dev, 0);
1129 	}
1130 
1131 	return mpt_lan_receive_skb(dev, skb);
1132 }
1133 
1134 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1135 /* Simple SGE's only at the moment */
1136 
1137 static void
1138 mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
1139 {
1140 	struct net_device *dev = priv->dev;
1141 	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
1142 	MPT_FRAME_HDR *mf;
1143 	LANReceivePostRequest_t *pRecvReq;
1144 	SGETransaction32_t *pTrans;
1145 	SGESimple64_t *pSimple;
1146 	struct sk_buff *skb;
1147 	dma_addr_t dma;
1148 	u32 curr, buckets, count, max;
1149 	u32 len = (dev->mtu + dev->hard_header_len + 4);
1150 	unsigned long flags;
1151 	int i;
1152 
1153 	curr = atomic_read(&priv->buckets_out);
1154 	buckets = (priv->max_buckets_out - curr);
1155 
1156 	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
1157 			IOC_AND_NETDEV_NAMES_s_s(dev),
1158 			__func__, buckets, curr));
1159 
1160 	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
1161 			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
1162 
1163 	while (buckets) {
1164 		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
1165 		if (mf == NULL) {
1166 			printk (KERN_ERR "%s: Unable to alloc request frame\n",
1167 				__func__);
1168 			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
1169 				 __func__, buckets));
1170 			goto out;
1171 		}
1172 		pRecvReq = (LANReceivePostRequest_t *) mf;
1173 
1174 		i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
1175 		mpt_dev->RequestNB[i] = 0;
1176 		count = buckets;
1177 		if (count > max)
1178 			count = max;
1179 
1180 		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
1181 		pRecvReq->ChainOffset = 0;
1182 		pRecvReq->MsgFlags    = 0;
1183 		pRecvReq->PortNumber  = priv->pnum;
1184 
1185 		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
1186 		pSimple = NULL;
1187 
1188 		for (i = 0; i < count; i++) {
1189 			int ctx;
1190 
1191 			spin_lock_irqsave(&priv->rxfidx_lock, flags);
1192 			if (priv->mpt_rxfidx_tail < 0) {
1193 				printk (KERN_ERR "%s: Can't alloc context\n",
1194 					__func__);
1195 				spin_unlock_irqrestore(&priv->rxfidx_lock,
1196 						       flags);
1197 				break;
1198 			}
1199 
1200 			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
1201 
1202 			skb = priv->RcvCtl[ctx].skb;
1203 			if (skb && (priv->RcvCtl[ctx].len != len)) {
1204 				pci_unmap_single(mpt_dev->pcidev,
1205 						 priv->RcvCtl[ctx].dma,
1206 						 priv->RcvCtl[ctx].len,
1207 						 PCI_DMA_FROMDEVICE);
1208 				dev_kfree_skb(priv->RcvCtl[ctx].skb);
1209 				skb = priv->RcvCtl[ctx].skb = NULL;
1210 			}
1211 
1212 			if (skb == NULL) {
1213 				skb = dev_alloc_skb(len);
1214 				if (skb == NULL) {
1215 					printk (KERN_WARNING
1216 						MYNAM "/%s: Can't alloc skb\n",
1217 						__func__);
1218 					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
1219 					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1220 					break;
1221 				}
1222 
1223 				dma = pci_map_single(mpt_dev->pcidev, skb->data,
1224 						     len, PCI_DMA_FROMDEVICE);
1225 
1226 				priv->RcvCtl[ctx].skb = skb;
1227 				priv->RcvCtl[ctx].dma = dma;
1228 				priv->RcvCtl[ctx].len = len;
1229 			}
1230 
1231 			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
1232 
1233 			pTrans->ContextSize   = sizeof(u32);
1234 			pTrans->DetailsLength = 0;
1235 			pTrans->Flags         = 0;
1236 			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
1237 
1238 			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
1239 
1240 			pSimple->FlagsLength = cpu_to_le32(
1241 				((MPI_SGE_FLAGS_END_OF_BUFFER |
1242 				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1243 				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
1244 			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
1245 			if (sizeof(dma_addr_t) > sizeof(u32))
1246 				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
1247 			else
1248 				pSimple->Address.High = 0;
1249 
1250 			pTrans = (SGETransaction32_t *) (pSimple + 1);
1251 		}
1252 
1253 		if (pSimple == NULL) {
1254 /**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
1255 /**/				__func__);
1256 			mpt_free_msg_frame(mpt_dev, mf);
1257 			goto out;
1258 		}
1259 
1260 		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
1261 
1262 		pRecvReq->BucketCount = cpu_to_le32(i);
1263 
1264 /*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
1265  *	for (i = 0; i < j + 2; i ++)
1266  *	    printk (" %08x", le32_to_cpu(msg[i]));
1267  *	printk ("\n");
1268  */
1269 
1270 		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
1271 
1272 		priv->total_posted += i;
1273 		buckets -= i;
1274 		atomic_add(i, &priv->buckets_out);
1275 	}
1276 
1277 out:
1278 	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
1279 		  __func__, buckets, atomic_read(&priv->buckets_out)));
1280 	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
1281 	__func__, priv->total_posted, priv->total_received));
1282 
1283 	clear_bit(0, &priv->post_buckets_active);
1284 }
1285 
1286 static void
1287 mpt_lan_post_receive_buckets_work(struct work_struct *work)
1288 {
1289 	mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
1290 						  post_buckets_task.work));
1291 }
1292 
1293 static const struct net_device_ops mpt_netdev_ops = {
1294 	.ndo_open       = mpt_lan_open,
1295 	.ndo_stop       = mpt_lan_close,
1296 	.ndo_start_xmit = mpt_lan_sdu_send,
1297 	.ndo_tx_timeout = mpt_lan_tx_timeout,
1298 };
1299 
1300 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1301 static struct net_device *
1302 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
1303 {
1304 	struct net_device *dev;
1305 	struct mpt_lan_priv *priv;
1306 	u8 HWaddr[FC_ALEN], *a;
1307 
1308 	dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
1309 	if (!dev)
1310 		return NULL;
1311 
1312 	dev->mtu = MPT_LAN_MTU;
1313 
1314 	priv = netdev_priv(dev);
1315 
1316 	priv->dev = dev;
1317 	priv->mpt_dev = mpt_dev;
1318 	priv->pnum = pnum;
1319 
1320 	INIT_DELAYED_WORK(&priv->post_buckets_task,
1321 			  mpt_lan_post_receive_buckets_work);
1322 	priv->post_buckets_active = 0;
1323 
1324 	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
1325 			__LINE__, dev->mtu + dev->hard_header_len + 4));
1326 
1327 	atomic_set(&priv->buckets_out, 0);
1328 	priv->total_posted = 0;
1329 	priv->total_received = 0;
1330 	priv->max_buckets_out = max_buckets_out;
1331 	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
1332 		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
1333 
1334 	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
1335 			__LINE__,
1336 			mpt_dev->pfacts[0].MaxLanBuckets,
1337 			max_buckets_out,
1338 			priv->max_buckets_out));
1339 
1340 	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
1341 	spin_lock_init(&priv->txfidx_lock);
1342 	spin_lock_init(&priv->rxfidx_lock);
1343 
1344 	/*  Grab pre-fetched LANPage1 stuff. :-) */
1345 	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
1346 
1347 	HWaddr[0] = a[5];
1348 	HWaddr[1] = a[4];
1349 	HWaddr[2] = a[3];
1350 	HWaddr[3] = a[2];
1351 	HWaddr[4] = a[1];
1352 	HWaddr[5] = a[0];
1353 
1354 	dev->addr_len = FC_ALEN;
1355 	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
1356 	memset(dev->broadcast, 0xff, FC_ALEN);
1357 
1358 	/* The Tx queue is 127 deep on the 909.
1359 	 * Give ourselves some breathing room.
1360 	 */
1361 	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
1362 			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
1363 
1364 	dev->netdev_ops = &mpt_netdev_ops;
1365 	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
1366 
1367 	/* MTU range: 96 - 65280 */
1368 	dev->min_mtu = MPT_LAN_MIN_MTU;
1369 	dev->max_mtu = MPT_LAN_MAX_MTU;
1370 
1371 	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
1372 		"and setting initial values\n"));
1373 
1374 	if (register_netdev(dev) != 0) {
1375 		free_netdev(dev);
1376 		dev = NULL;
1377 	}
1378 	return dev;
1379 }
1380 
1381 static int
1382 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1383 {
1384 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1385 	struct net_device	*dev;
1386 	int			i;
1387 
1388 	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
1389 		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
1390 		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
1391 		       ioc->name, ioc->pfacts[i].PortNumber,
1392 		       ioc->pfacts[i].ProtocolFlags,
1393 		       MPT_PROTOCOL_FLAGS_c_c_c_c(
1394 			       ioc->pfacts[i].ProtocolFlags));
1395 
1396 		if (!(ioc->pfacts[i].ProtocolFlags &
1397 					MPI_PORTFACTS_PROTOCOL_LAN)) {
1398 			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
1399 			       "seems to be disabled on this adapter port!\n",
1400 			       ioc->name);
1401 			continue;
1402 		}
1403 
1404 		dev = mpt_register_lan_device(ioc, i);
1405 		if (!dev) {
1406 			printk(KERN_ERR MYNAM ": %s: Unable to register "
1407 			       "port%d as a LAN device\n", ioc->name,
1408 			       ioc->pfacts[i].PortNumber);
1409 			continue;
1410 		}
1411 
1412 		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
1413 		       "registered as '%s'\n", ioc->name, dev->name);
1414 		printk(KERN_INFO MYNAM ": %s/%s: "
1415 		       "LanAddr = %pM\n",
1416 		       IOC_AND_NETDEV_NAMES_s_s(dev),
1417 		       dev->dev_addr);
1418 
1419 		ioc->netdev = dev;
1420 
1421 		return 0;
1422 	}
1423 
1424 	return -ENODEV;
1425 }
1426 
1427 static void
1428 mptlan_remove(struct pci_dev *pdev)
1429 {
1430 	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
1431 	struct net_device	*dev = ioc->netdev;
1432 
1433 	if(dev != NULL) {
1434 		unregister_netdev(dev);
1435 		free_netdev(dev);
1436 	}
1437 }
1438 
1439 static struct mpt_pci_driver mptlan_driver = {
1440 	.probe		= mptlan_probe,
1441 	.remove		= mptlan_remove,
1442 };
1443 
1444 static int __init mpt_lan_init (void)
1445 {
1446 	show_mptmod_ver(LANAME, LANVER);
1447 
1448 	LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
1449 				"lan_reply");
1450 	if (LanCtx <= 0) {
1451 		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
1452 		return -EBUSY;
1453 	}
1454 
1455 	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
1456 
1457 	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
1458 		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
1459 		       "handler with mptbase! The world is at an end! "
1460 		       "Everything is fading to black! Goodbye.\n");
1461 		return -EBUSY;
1462 	}
1463 
1464 	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
1465 
1466 	mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
1467 	return 0;
1468 }
1469 
1470 static void __exit mpt_lan_exit(void)
1471 {
1472 	mpt_device_driver_deregister(MPTLAN_DRIVER);
1473 	mpt_reset_deregister(LanCtx);
1474 
1475 	if (LanCtx) {
1476 		mpt_deregister(LanCtx);
1477 		LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
1478 	}
1479 }
1480 
1481 module_init(mpt_lan_init);
1482 module_exit(mpt_lan_exit);
1483 
1484 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1485 static unsigned short
1486 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
1487 {
1488 	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
1489 	struct fcllc *fcllc;
1490 
1491 	skb_reset_mac_header(skb);
1492 	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
1493 
1494 	if (fch->dtype == htons(0xffff)) {
1495 		u32 *p = (u32 *) fch;
1496 
1497 		swab32s(p + 0);
1498 		swab32s(p + 1);
1499 		swab32s(p + 2);
1500 		swab32s(p + 3);
1501 
1502 		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
1503 				NETDEV_PTR_TO_IOC_NAME_s(dev));
1504 		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
1505 				fch->saddr);
1506 	}
1507 
1508 	if (*fch->daddr & 1) {
1509 		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
1510 			skb->pkt_type = PACKET_BROADCAST;
1511 		} else {
1512 			skb->pkt_type = PACKET_MULTICAST;
1513 		}
1514 	} else {
1515 		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
1516 			skb->pkt_type = PACKET_OTHERHOST;
1517 		} else {
1518 			skb->pkt_type = PACKET_HOST;
1519 		}
1520 	}
1521 
1522 	fcllc = (struct fcllc *)skb->data;
1523 
1524 	/* Strip the SNAP header from ARP packets since we don't
1525 	 * pass them through to the 802.2/SNAP layers.
1526 	 */
1527 	if (fcllc->dsap == EXTENDED_SAP &&
1528 		(fcllc->ethertype == htons(ETH_P_IP) ||
1529 		 fcllc->ethertype == htons(ETH_P_ARP))) {
1530 		skb_pull(skb, sizeof(struct fcllc));
1531 		return fcllc->ethertype;
1532 	}
1533 
1534 	return htons(ETH_P_802_2);
1535 }
1536 
1537 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
1538