1 /* 2 * linux/drivers/message/fusion/mptlan.c 3 * IP Over Fibre Channel device driver. 4 * For use with PCI chip/adapter(s): 5 * LSIFC9xx/LSI409xx Fibre Channel 6 * running LSI Logic Fusion MPT (Message Passing Technology) firmware. 7 * 8 * Credits: 9 * This driver would not exist if not for Alan Cox's development 10 * of the linux i2o driver. 11 * 12 * Special thanks goes to the I2O LAN driver people at the 13 * University of Helsinki, who, unbeknownst to them, provided 14 * the inspiration and initial structure for this driver. 15 * 16 * A huge debt of gratitude is owed to David S. Miller (DaveM) 17 * for fixing much of the stupid and broken stuff in the early 18 * driver while porting to sparc64 platform. THANK YOU! 19 * 20 * A really huge debt of gratitude is owed to Eddie C. Dost 21 * for gobs of hard work fixing and optimizing LAN code. 22 * THANK YOU! 23 * 24 * (see also mptbase.c) 25 * 26 * Copyright (c) 2000-2004 LSI Logic Corporation 27 * Originally By: Noah Romer 28 * (mailto:mpt_linux_developer@lsil.com) 29 * 30 * $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $ 31 */ 32 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 33 /* 34 This program is free software; you can redistribute it and/or modify 35 it under the terms of the GNU General Public License as published by 36 the Free Software Foundation; version 2 of the License. 37 38 This program is distributed in the hope that it will be useful, 39 but WITHOUT ANY WARRANTY; without even the implied warranty of 40 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 41 GNU General Public License for more details. 42 43 NO WARRANTY 44 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR 45 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT 46 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, 47 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is 48 solely responsible for determining the appropriateness of using and 49 distributing the Program and assumes all risks associated with its 50 exercise of rights under this Agreement, including but not limited to 51 the risks and costs of program errors, damage to or loss of data, 52 programs or equipment, and unavailability or interruption of operations. 53 54 DISCLAIMER OF LIABILITY 55 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY 56 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND 58 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 59 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 60 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED 61 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES 62 63 You should have received a copy of the GNU General Public License 64 along with this program; if not, write to the Free Software 65 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 66 */ 67 68 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 69 /* 70 * Define statements used for debugging 71 */ 72 //#define MPT_LAN_IO_DEBUG 73 74 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 75 76 #include "mptlan.h" 77 #include <linux/init.h> 78 #include <linux/module.h> 79 #include <linux/fs.h> 80 81 #define MYNAM "mptlan" 82 83 MODULE_LICENSE("GPL"); 84 85 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 86 /* 87 * MPT LAN message sizes without variable part. 88 */ 89 #define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \ 90 (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION)) 91 92 #define MPT_LAN_TRANSACTION32_SIZE \ 93 (sizeof(SGETransaction32_t) - sizeof(u32)) 94 95 /* 96 * Fusion MPT LAN private structures 97 */ 98 99 struct NAA_Hosed { 100 u16 NAA; 101 u8 ieee[FC_ALEN]; 102 struct NAA_Hosed *next; 103 }; 104 105 struct BufferControl { 106 struct sk_buff *skb; 107 dma_addr_t dma; 108 unsigned int len; 109 }; 110 111 struct mpt_lan_priv { 112 MPT_ADAPTER *mpt_dev; 113 u8 pnum; /* Port number in the IOC. This is not a Unix network port! */ 114 115 atomic_t buckets_out; /* number of unused buckets on IOC */ 116 int bucketthresh; /* Send more when this many left */ 117 118 int *mpt_txfidx; /* Free Tx Context list */ 119 int mpt_txfidx_tail; 120 spinlock_t txfidx_lock; 121 122 int *mpt_rxfidx; /* Free Rx Context list */ 123 int mpt_rxfidx_tail; 124 spinlock_t rxfidx_lock; 125 126 struct BufferControl *RcvCtl; /* Receive BufferControl structs */ 127 struct BufferControl *SendCtl; /* Send BufferControl structs */ 128 129 int max_buckets_out; /* Max buckets to send to IOC */ 130 int tx_max_out; /* IOC's Tx queue len */ 131 132 u32 total_posted; 133 u32 total_received; 134 struct net_device_stats stats; /* Per device statistics */ 135 136 struct work_struct post_buckets_task; 137 unsigned long post_buckets_active; 138 }; 139 140 struct mpt_lan_ohdr { 141 u16 dtype; 142 u8 daddr[FC_ALEN]; 143 u16 stype; 144 u8 saddr[FC_ALEN]; 145 }; 146 147 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 148 149 /* 150 * Forward protos... 151 */ 152 static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, 153 MPT_FRAME_HDR *reply); 154 static int mpt_lan_open(struct net_device *dev); 155 static int mpt_lan_reset(struct net_device *dev); 156 static int mpt_lan_close(struct net_device *dev); 157 static void mpt_lan_post_receive_buckets(void *dev_id); 158 static void mpt_lan_wake_post_buckets_task(struct net_device *dev, 159 int priority); 160 static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); 161 static int mpt_lan_receive_post_reply(struct net_device *dev, 162 LANReceivePostReply_t *pRecvRep); 163 static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg); 164 static int mpt_lan_send_reply(struct net_device *dev, 165 LANSendReply_t *pSendRep); 166 static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase); 167 static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); 168 static unsigned short mpt_lan_type_trans(struct sk_buff *skb, 169 struct net_device *dev); 170 171 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 172 /* 173 * Fusion MPT LAN private data 174 */ 175 static int LanCtx = -1; 176 177 static u32 max_buckets_out = 127; 178 static u32 tx_max_out_p = 127 - 16; 179 180 #ifdef QLOGIC_NAA_WORKAROUND 181 static struct NAA_Hosed *mpt_bad_naa = NULL; 182 DEFINE_RWLOCK(bad_naa_lock); 183 #endif 184 185 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 186 /* 187 * Fusion MPT LAN external data 188 */ 189 extern int mpt_lan_index; 190 191 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 192 /** 193 * lan_reply - Handle all data sent from the hardware. 194 * @ioc: Pointer to MPT_ADAPTER structure 195 * @mf: Pointer to original MPT request frame (NULL if TurboReply) 196 * @reply: Pointer to MPT reply frame 197 * 198 * Returns 1 indicating original alloc'd request frame ptr 199 * should be freed, or 0 if it shouldn't. 200 */ 201 static int 202 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) 203 { 204 struct net_device *dev = ioc->netdev; 205 int FreeReqFrame = 0; 206 207 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n", 208 IOC_AND_NETDEV_NAMES_s_s(dev))); 209 210 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n", 211 // mf, reply)); 212 213 if (mf == NULL) { 214 u32 tmsg = CAST_PTR_TO_U32(reply); 215 216 dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n", 217 IOC_AND_NETDEV_NAMES_s_s(dev), 218 tmsg)); 219 220 switch (GET_LAN_FORM(tmsg)) { 221 222 // NOTE! (Optimization) First case here is now caught in 223 // mptbase.c::mpt_interrupt() routine and callcack here 224 // is now skipped for this case! 20001218 -sralston 225 #if 0 226 case LAN_REPLY_FORM_MESSAGE_CONTEXT: 227 // dioprintk((KERN_INFO MYNAM "/lan_reply: " 228 // "MessageContext turbo reply received\n")); 229 FreeReqFrame = 1; 230 break; 231 #endif 232 233 case LAN_REPLY_FORM_SEND_SINGLE: 234 // dioprintk((MYNAM "/lan_reply: " 235 // "calling mpt_lan_send_reply (turbo)\n")); 236 237 // Potential BUG here? -sralston 238 // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg); 239 // If/when mpt_lan_send_turbo would return 1 here, 240 // calling routine (mptbase.c|mpt_interrupt) 241 // would Oops because mf has already been set 242 // to NULL. So after return from this func, 243 // mpt_interrupt() will attempt to put (NULL) mf ptr 244 // item back onto its adapter FreeQ - Oops!:-( 245 // It's Ok, since mpt_lan_send_turbo() *currently* 246 // always returns 0, but..., just in case: 247 248 (void) mpt_lan_send_turbo(dev, tmsg); 249 FreeReqFrame = 0; 250 251 break; 252 253 case LAN_REPLY_FORM_RECEIVE_SINGLE: 254 // dioprintk((KERN_INFO MYNAM "@lan_reply: " 255 // "rcv-Turbo = %08x\n", tmsg)); 256 mpt_lan_receive_post_turbo(dev, tmsg); 257 break; 258 259 default: 260 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply " 261 "that I don't know what to do with\n"); 262 263 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */ 264 265 break; 266 } 267 268 return FreeReqFrame; 269 } 270 271 // msg = (u32 *) reply; 272 // dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n", 273 // le32_to_cpu(msg[0]), le32_to_cpu(msg[1]), 274 // le32_to_cpu(msg[2]), le32_to_cpu(msg[3]))); 275 // dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n", 276 // reply->u.hdr.Function)); 277 278 switch (reply->u.hdr.Function) { 279 280 case MPI_FUNCTION_LAN_SEND: 281 { 282 LANSendReply_t *pSendRep; 283 284 pSendRep = (LANSendReply_t *) reply; 285 FreeReqFrame = mpt_lan_send_reply(dev, pSendRep); 286 break; 287 } 288 289 case MPI_FUNCTION_LAN_RECEIVE: 290 { 291 LANReceivePostReply_t *pRecvRep; 292 293 pRecvRep = (LANReceivePostReply_t *) reply; 294 if (pRecvRep->NumberOfContexts) { 295 mpt_lan_receive_post_reply(dev, pRecvRep); 296 if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) 297 FreeReqFrame = 1; 298 } else 299 dioprintk((KERN_INFO MYNAM "@lan_reply: zero context " 300 "ReceivePostReply received.\n")); 301 break; 302 } 303 304 case MPI_FUNCTION_LAN_RESET: 305 /* Just a default reply. Might want to check it to 306 * make sure that everything went ok. 307 */ 308 FreeReqFrame = 1; 309 break; 310 311 case MPI_FUNCTION_EVENT_NOTIFICATION: 312 case MPI_FUNCTION_EVENT_ACK: 313 /* UPDATE! 20010120 -sralston 314 * _EVENT_NOTIFICATION should NOT come down this path any more. 315 * Should be routed to mpt_lan_event_process(), but just in case... 316 */ 317 FreeReqFrame = 1; 318 break; 319 320 default: 321 printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo " 322 "reply that I don't know what to do with\n"); 323 324 /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */ 325 FreeReqFrame = 1; 326 327 break; 328 } 329 330 return FreeReqFrame; 331 } 332 333 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 334 static int 335 mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) 336 { 337 struct net_device *dev = ioc->netdev; 338 struct mpt_lan_priv *priv = netdev_priv(dev); 339 340 dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n", 341 reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( 342 reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); 343 344 if (priv->mpt_rxfidx == NULL) 345 return (1); 346 347 if (reset_phase == MPT_IOC_SETUP_RESET) { 348 ; 349 } else if (reset_phase == MPT_IOC_PRE_RESET) { 350 int i; 351 unsigned long flags; 352 353 netif_stop_queue(dev); 354 355 dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name)); 356 357 atomic_set(&priv->buckets_out, 0); 358 359 /* Reset Rx Free Tail index and re-populate the queue. */ 360 spin_lock_irqsave(&priv->rxfidx_lock, flags); 361 priv->mpt_rxfidx_tail = -1; 362 for (i = 0; i < priv->max_buckets_out; i++) 363 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; 364 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 365 } else { 366 mpt_lan_post_receive_buckets(dev); 367 netif_wake_queue(dev); 368 } 369 370 return 1; 371 } 372 373 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 374 static int 375 mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) 376 { 377 dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n")); 378 379 switch (le32_to_cpu(pEvReply->Event)) { 380 case MPI_EVENT_NONE: /* 00 */ 381 case MPI_EVENT_LOG_DATA: /* 01 */ 382 case MPI_EVENT_STATE_CHANGE: /* 02 */ 383 case MPI_EVENT_UNIT_ATTENTION: /* 03 */ 384 case MPI_EVENT_IOC_BUS_RESET: /* 04 */ 385 case MPI_EVENT_EXT_BUS_RESET: /* 05 */ 386 case MPI_EVENT_RESCAN: /* 06 */ 387 /* Ok, do we need to do anything here? As far as 388 I can tell, this is when a new device gets added 389 to the loop. */ 390 case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */ 391 case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */ 392 case MPI_EVENT_LOGOUT: /* 09 */ 393 case MPI_EVENT_EVENT_CHANGE: /* 0A */ 394 default: 395 break; 396 } 397 398 /* 399 * NOTE: pEvent->AckRequired handling now done in mptbase.c; 400 * Do NOT do it here now! 401 */ 402 403 return 1; 404 } 405 406 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 407 static int 408 mpt_lan_open(struct net_device *dev) 409 { 410 struct mpt_lan_priv *priv = netdev_priv(dev); 411 int i; 412 413 if (mpt_lan_reset(dev) != 0) { 414 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 415 416 printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed."); 417 418 if (mpt_dev->active) 419 printk ("The ioc is active. Perhaps it needs to be" 420 " reset?\n"); 421 else 422 printk ("The ioc in inactive, most likely in the " 423 "process of being reset. Please try again in " 424 "a moment.\n"); 425 } 426 427 priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL); 428 if (priv->mpt_txfidx == NULL) 429 goto out; 430 priv->mpt_txfidx_tail = -1; 431 432 priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl), 433 GFP_KERNEL); 434 if (priv->SendCtl == NULL) 435 goto out_mpt_txfidx; 436 for (i = 0; i < priv->tx_max_out; i++) { 437 memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl)); 438 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i; 439 } 440 441 dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n")); 442 443 priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int), 444 GFP_KERNEL); 445 if (priv->mpt_rxfidx == NULL) 446 goto out_SendCtl; 447 priv->mpt_rxfidx_tail = -1; 448 449 priv->RcvCtl = kmalloc(priv->max_buckets_out * 450 sizeof(struct BufferControl), 451 GFP_KERNEL); 452 if (priv->RcvCtl == NULL) 453 goto out_mpt_rxfidx; 454 for (i = 0; i < priv->max_buckets_out; i++) { 455 memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl)); 456 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; 457 } 458 459 /**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - ")); 460 /**/ for (i = 0; i < priv->tx_max_out; i++) 461 /**/ dlprintk((" %xh", priv->mpt_txfidx[i])); 462 /**/ dlprintk(("\n")); 463 464 dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); 465 466 mpt_lan_post_receive_buckets(dev); 467 printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", 468 IOC_AND_NETDEV_NAMES_s_s(dev)); 469 470 if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) { 471 printk (KERN_WARNING MYNAM "/lo: Unable to register for Event" 472 " Notifications. This is a bad thing! We're not going " 473 "to go ahead, but I'd be leery of system stability at " 474 "this point.\n"); 475 } 476 477 netif_start_queue(dev); 478 dlprintk((KERN_INFO MYNAM "/lo: Done.\n")); 479 480 return 0; 481 out_mpt_rxfidx: 482 kfree(priv->mpt_rxfidx); 483 priv->mpt_rxfidx = NULL; 484 out_SendCtl: 485 kfree(priv->SendCtl); 486 priv->SendCtl = NULL; 487 out_mpt_txfidx: 488 kfree(priv->mpt_txfidx); 489 priv->mpt_txfidx = NULL; 490 out: return -ENOMEM; 491 } 492 493 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 494 /* Send a LanReset message to the FW. This should result in the FW returning 495 any buckets it still has. */ 496 static int 497 mpt_lan_reset(struct net_device *dev) 498 { 499 MPT_FRAME_HDR *mf; 500 LANResetRequest_t *pResetReq; 501 struct mpt_lan_priv *priv = netdev_priv(dev); 502 503 mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev); 504 505 if (mf == NULL) { 506 /* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! " 507 "Unable to allocate a request frame.\n")); 508 */ 509 return -1; 510 } 511 512 pResetReq = (LANResetRequest_t *) mf; 513 514 pResetReq->Function = MPI_FUNCTION_LAN_RESET; 515 pResetReq->ChainOffset = 0; 516 pResetReq->Reserved = 0; 517 pResetReq->PortNumber = priv->pnum; 518 pResetReq->MsgFlags = 0; 519 pResetReq->Reserved2 = 0; 520 521 mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf); 522 523 return 0; 524 } 525 526 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 527 static int 528 mpt_lan_close(struct net_device *dev) 529 { 530 struct mpt_lan_priv *priv = netdev_priv(dev); 531 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 532 unsigned int timeout; 533 int i; 534 535 dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n")); 536 537 mpt_event_deregister(LanCtx); 538 539 dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets " 540 "since driver was loaded, %d still out\n", 541 priv->total_posted,atomic_read(&priv->buckets_out))); 542 543 netif_stop_queue(dev); 544 545 mpt_lan_reset(dev); 546 547 timeout = 2 * HZ; 548 while (atomic_read(&priv->buckets_out) && --timeout) { 549 set_current_state(TASK_INTERRUPTIBLE); 550 schedule_timeout(1); 551 } 552 553 for (i = 0; i < priv->max_buckets_out; i++) { 554 if (priv->RcvCtl[i].skb != NULL) { 555 /**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x " 556 /**/ "is still out\n", i)); 557 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma, 558 priv->RcvCtl[i].len, 559 PCI_DMA_FROMDEVICE); 560 dev_kfree_skb(priv->RcvCtl[i].skb); 561 } 562 } 563 564 kfree (priv->RcvCtl); 565 kfree (priv->mpt_rxfidx); 566 567 for (i = 0; i < priv->tx_max_out; i++) { 568 if (priv->SendCtl[i].skb != NULL) { 569 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma, 570 priv->SendCtl[i].len, 571 PCI_DMA_TODEVICE); 572 dev_kfree_skb(priv->SendCtl[i].skb); 573 } 574 } 575 576 kfree(priv->SendCtl); 577 kfree(priv->mpt_txfidx); 578 579 atomic_set(&priv->buckets_out, 0); 580 581 printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n", 582 IOC_AND_NETDEV_NAMES_s_s(dev)); 583 584 return 0; 585 } 586 587 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 588 static struct net_device_stats * 589 mpt_lan_get_stats(struct net_device *dev) 590 { 591 struct mpt_lan_priv *priv = netdev_priv(dev); 592 593 return (struct net_device_stats *) &priv->stats; 594 } 595 596 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 597 static int 598 mpt_lan_change_mtu(struct net_device *dev, int new_mtu) 599 { 600 if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU)) 601 return -EINVAL; 602 dev->mtu = new_mtu; 603 return 0; 604 } 605 606 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 607 /* Tx timeout handler. */ 608 static void 609 mpt_lan_tx_timeout(struct net_device *dev) 610 { 611 struct mpt_lan_priv *priv = netdev_priv(dev); 612 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 613 614 if (mpt_dev->active) { 615 dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name)); 616 netif_wake_queue(dev); 617 } 618 } 619 620 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 621 //static inline int 622 static int 623 mpt_lan_send_turbo(struct net_device *dev, u32 tmsg) 624 { 625 struct mpt_lan_priv *priv = netdev_priv(dev); 626 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 627 struct sk_buff *sent; 628 unsigned long flags; 629 u32 ctx; 630 631 ctx = GET_LAN_BUFFER_CONTEXT(tmsg); 632 sent = priv->SendCtl[ctx].skb; 633 634 priv->stats.tx_packets++; 635 priv->stats.tx_bytes += sent->len; 636 637 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 638 IOC_AND_NETDEV_NAMES_s_s(dev), 639 __FUNCTION__, sent)); 640 641 priv->SendCtl[ctx].skb = NULL; 642 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, 643 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE); 644 dev_kfree_skb_irq(sent); 645 646 spin_lock_irqsave(&priv->txfidx_lock, flags); 647 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; 648 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 649 650 netif_wake_queue(dev); 651 return 0; 652 } 653 654 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 655 static int 656 mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep) 657 { 658 struct mpt_lan_priv *priv = netdev_priv(dev); 659 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 660 struct sk_buff *sent; 661 unsigned long flags; 662 int FreeReqFrame = 0; 663 u32 *pContext; 664 u32 ctx; 665 u8 count; 666 667 count = pSendRep->NumberOfContexts; 668 669 dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n", 670 le16_to_cpu(pSendRep->IOCStatus))); 671 672 /* Add check for Loginfo Flag in IOCStatus */ 673 674 switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) { 675 case MPI_IOCSTATUS_SUCCESS: 676 priv->stats.tx_packets += count; 677 break; 678 679 case MPI_IOCSTATUS_LAN_CANCELED: 680 case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED: 681 break; 682 683 case MPI_IOCSTATUS_INVALID_SGL: 684 priv->stats.tx_errors += count; 685 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n", 686 IOC_AND_NETDEV_NAMES_s_s(dev)); 687 goto out; 688 689 default: 690 priv->stats.tx_errors += count; 691 break; 692 } 693 694 pContext = &pSendRep->BufferContext; 695 696 spin_lock_irqsave(&priv->txfidx_lock, flags); 697 while (count > 0) { 698 ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext)); 699 700 sent = priv->SendCtl[ctx].skb; 701 priv->stats.tx_bytes += sent->len; 702 703 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", 704 IOC_AND_NETDEV_NAMES_s_s(dev), 705 __FUNCTION__, sent)); 706 707 priv->SendCtl[ctx].skb = NULL; 708 pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, 709 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE); 710 dev_kfree_skb_irq(sent); 711 712 priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; 713 714 pContext++; 715 count--; 716 } 717 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 718 719 out: 720 if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) 721 FreeReqFrame = 1; 722 723 netif_wake_queue(dev); 724 return FreeReqFrame; 725 } 726 727 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 728 static int 729 mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev) 730 { 731 struct mpt_lan_priv *priv = netdev_priv(dev); 732 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 733 MPT_FRAME_HDR *mf; 734 LANSendRequest_t *pSendReq; 735 SGETransaction32_t *pTrans; 736 SGESimple64_t *pSimple; 737 dma_addr_t dma; 738 unsigned long flags; 739 int ctx; 740 u16 cur_naa = 0x1000; 741 742 dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", 743 __FUNCTION__, skb)); 744 745 spin_lock_irqsave(&priv->txfidx_lock, flags); 746 if (priv->mpt_txfidx_tail < 0) { 747 netif_stop_queue(dev); 748 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 749 750 printk (KERN_ERR "%s: no tx context available: %u\n", 751 __FUNCTION__, priv->mpt_txfidx_tail); 752 return 1; 753 } 754 755 mf = mpt_get_msg_frame(LanCtx, mpt_dev); 756 if (mf == NULL) { 757 netif_stop_queue(dev); 758 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 759 760 printk (KERN_ERR "%s: Unable to alloc request frame\n", 761 __FUNCTION__); 762 return 1; 763 } 764 765 ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--]; 766 spin_unlock_irqrestore(&priv->txfidx_lock, flags); 767 768 // dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n", 769 // IOC_AND_NETDEV_NAMES_s_s(dev))); 770 771 pSendReq = (LANSendRequest_t *) mf; 772 773 /* Set the mac.raw pointer, since this apparently isn't getting 774 * done before we get the skb. Pull the data pointer past the mac data. 775 */ 776 skb->mac.raw = skb->data; 777 skb_pull(skb, 12); 778 779 dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, 780 PCI_DMA_TODEVICE); 781 782 priv->SendCtl[ctx].skb = skb; 783 priv->SendCtl[ctx].dma = dma; 784 priv->SendCtl[ctx].len = skb->len; 785 786 /* Message Header */ 787 pSendReq->Reserved = 0; 788 pSendReq->Function = MPI_FUNCTION_LAN_SEND; 789 pSendReq->ChainOffset = 0; 790 pSendReq->Reserved2 = 0; 791 pSendReq->MsgFlags = 0; 792 pSendReq->PortNumber = priv->pnum; 793 794 /* Transaction Context Element */ 795 pTrans = (SGETransaction32_t *) pSendReq->SG_List; 796 797 /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */ 798 pTrans->ContextSize = sizeof(u32); 799 pTrans->DetailsLength = 2 * sizeof(u32); 800 pTrans->Flags = 0; 801 pTrans->TransactionContext[0] = cpu_to_le32(ctx); 802 803 // dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n", 804 // IOC_AND_NETDEV_NAMES_s_s(dev), 805 // ctx, skb, skb->data)); 806 807 #ifdef QLOGIC_NAA_WORKAROUND 808 { 809 struct NAA_Hosed *nh; 810 811 /* Munge the NAA for Tx packets to QLogic boards, which don't follow 812 RFC 2625. The longer I look at this, the more my opinion of Qlogic 813 drops. */ 814 read_lock_irq(&bad_naa_lock); 815 for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { 816 if ((nh->ieee[0] == skb->mac.raw[0]) && 817 (nh->ieee[1] == skb->mac.raw[1]) && 818 (nh->ieee[2] == skb->mac.raw[2]) && 819 (nh->ieee[3] == skb->mac.raw[3]) && 820 (nh->ieee[4] == skb->mac.raw[4]) && 821 (nh->ieee[5] == skb->mac.raw[5])) { 822 cur_naa = nh->NAA; 823 dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " 824 "= %04x.\n", cur_naa)); 825 break; 826 } 827 } 828 read_unlock_irq(&bad_naa_lock); 829 } 830 #endif 831 832 pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | 833 (skb->mac.raw[0] << 8) | 834 (skb->mac.raw[1] << 0)); 835 pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) | 836 (skb->mac.raw[3] << 16) | 837 (skb->mac.raw[4] << 8) | 838 (skb->mac.raw[5] << 0)); 839 840 pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; 841 842 /* If we ever decide to send more than one Simple SGE per LANSend, then 843 we will need to make sure that LAST_ELEMENT only gets set on the 844 last one. Otherwise, bad voodoo and evil funkiness will commence. */ 845 pSimple->FlagsLength = cpu_to_le32( 846 ((MPI_SGE_FLAGS_LAST_ELEMENT | 847 MPI_SGE_FLAGS_END_OF_BUFFER | 848 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 849 MPI_SGE_FLAGS_SYSTEM_ADDRESS | 850 MPI_SGE_FLAGS_HOST_TO_IOC | 851 MPI_SGE_FLAGS_64_BIT_ADDRESSING | 852 MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) | 853 skb->len); 854 pSimple->Address.Low = cpu_to_le32((u32) dma); 855 if (sizeof(dma_addr_t) > sizeof(u32)) 856 pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32)); 857 else 858 pSimple->Address.High = 0; 859 860 mpt_put_msg_frame (LanCtx, mpt_dev, mf); 861 dev->trans_start = jiffies; 862 863 dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n", 864 IOC_AND_NETDEV_NAMES_s_s(dev), 865 le32_to_cpu(pSimple->FlagsLength))); 866 867 return 0; 868 } 869 870 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 871 static inline void 872 mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) 873 /* 874 * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue 875 */ 876 { 877 struct mpt_lan_priv *priv = dev->priv; 878 879 if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { 880 if (priority) { 881 schedule_work(&priv->post_buckets_task); 882 } else { 883 schedule_delayed_work(&priv->post_buckets_task, 1); 884 dioprintk((KERN_INFO MYNAM ": post_buckets queued on " 885 "timer.\n")); 886 } 887 dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n", 888 IOC_AND_NETDEV_NAMES_s_s(dev) )); 889 } 890 } 891 892 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 893 static inline int 894 mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb) 895 { 896 struct mpt_lan_priv *priv = dev->priv; 897 898 skb->protocol = mpt_lan_type_trans(skb, dev); 899 900 dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) " 901 "delivered to upper level.\n", 902 IOC_AND_NETDEV_NAMES_s_s(dev), skb->len)); 903 904 priv->stats.rx_bytes += skb->len; 905 priv->stats.rx_packets++; 906 907 skb->dev = dev; 908 netif_rx(skb); 909 910 dioprintk((MYNAM "/receive_skb: %d buckets remaining\n", 911 atomic_read(&priv->buckets_out))); 912 913 if (atomic_read(&priv->buckets_out) < priv->bucketthresh) 914 mpt_lan_wake_post_buckets_task(dev, 1); 915 916 dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets " 917 "remaining, %d received back since sod\n", 918 atomic_read(&priv->buckets_out), priv->total_received)); 919 920 return 0; 921 } 922 923 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 924 //static inline int 925 static int 926 mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg) 927 { 928 struct mpt_lan_priv *priv = dev->priv; 929 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 930 struct sk_buff *skb, *old_skb; 931 unsigned long flags; 932 u32 ctx, len; 933 934 ctx = GET_LAN_BUCKET_CONTEXT(tmsg); 935 skb = priv->RcvCtl[ctx].skb; 936 937 len = GET_LAN_PACKET_LENGTH(tmsg); 938 939 if (len < MPT_LAN_RX_COPYBREAK) { 940 old_skb = skb; 941 942 skb = (struct sk_buff *)dev_alloc_skb(len); 943 if (!skb) { 944 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", 945 IOC_AND_NETDEV_NAMES_s_s(dev), 946 __FILE__, __LINE__); 947 return -ENOMEM; 948 } 949 950 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 951 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 952 953 memcpy(skb_put(skb, len), old_skb->data, len); 954 955 pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 956 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 957 goto out; 958 } 959 960 skb_put(skb, len); 961 962 priv->RcvCtl[ctx].skb = NULL; 963 964 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 965 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 966 967 out: 968 spin_lock_irqsave(&priv->rxfidx_lock, flags); 969 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 970 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 971 972 atomic_dec(&priv->buckets_out); 973 priv->total_received++; 974 975 return mpt_lan_receive_skb(dev, skb); 976 } 977 978 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 979 static int 980 mpt_lan_receive_post_free(struct net_device *dev, 981 LANReceivePostReply_t *pRecvRep) 982 { 983 struct mpt_lan_priv *priv = dev->priv; 984 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 985 unsigned long flags; 986 struct sk_buff *skb; 987 u32 ctx; 988 int count; 989 int i; 990 991 count = pRecvRep->NumberOfContexts; 992 993 /**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: " 994 "IOC returned %d buckets, freeing them...\n", count)); 995 996 spin_lock_irqsave(&priv->rxfidx_lock, flags); 997 for (i = 0; i < count; i++) { 998 ctx = le32_to_cpu(pRecvRep->BucketContext[i]); 999 1000 skb = priv->RcvCtl[ctx].skb; 1001 1002 // dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n", 1003 // IOC_AND_NETDEV_NAMES_s_s(dev))); 1004 // dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p", 1005 // priv, &(priv->buckets_out))); 1006 // dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n")); 1007 1008 priv->RcvCtl[ctx].skb = NULL; 1009 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 1010 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 1011 dev_kfree_skb_any(skb); 1012 1013 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1014 } 1015 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1016 1017 atomic_sub(count, &priv->buckets_out); 1018 1019 // for (i = 0; i < priv->max_buckets_out; i++) 1020 // if (priv->RcvCtl[i].skb != NULL) 1021 // dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x " 1022 // "is still out\n", i)); 1023 1024 /* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n", 1025 count)); 1026 */ 1027 /**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets " 1028 /**/ "remaining, %d received back since sod.\n", 1029 /**/ atomic_read(&priv->buckets_out), priv->total_received)); 1030 return 0; 1031 } 1032 1033 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1034 static int 1035 mpt_lan_receive_post_reply(struct net_device *dev, 1036 LANReceivePostReply_t *pRecvRep) 1037 { 1038 struct mpt_lan_priv *priv = dev->priv; 1039 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 1040 struct sk_buff *skb, *old_skb; 1041 unsigned long flags; 1042 u32 len, ctx, offset; 1043 u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining); 1044 int count; 1045 int i, l; 1046 1047 dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n")); 1048 dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n", 1049 le16_to_cpu(pRecvRep->IOCStatus))); 1050 1051 if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) == 1052 MPI_IOCSTATUS_LAN_CANCELED) 1053 return mpt_lan_receive_post_free(dev, pRecvRep); 1054 1055 len = le32_to_cpu(pRecvRep->PacketLength); 1056 if (len == 0) { 1057 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO " 1058 "ReceivePostReply w/ PacketLength zero!\n", 1059 IOC_AND_NETDEV_NAMES_s_s(dev)); 1060 printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n", 1061 pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus)); 1062 return -1; 1063 } 1064 1065 ctx = le32_to_cpu(pRecvRep->BucketContext[0]); 1066 count = pRecvRep->NumberOfContexts; 1067 skb = priv->RcvCtl[ctx].skb; 1068 1069 offset = le32_to_cpu(pRecvRep->PacketOffset); 1070 // if (offset != 0) { 1071 // printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply " 1072 // "w/ PacketOffset %u\n", 1073 // IOC_AND_NETDEV_NAMES_s_s(dev), 1074 // offset); 1075 // } 1076 1077 dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n", 1078 IOC_AND_NETDEV_NAMES_s_s(dev), 1079 offset, len)); 1080 1081 if (count > 1) { 1082 int szrem = len; 1083 1084 // dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned " 1085 // "for single packet, concatenating...\n", 1086 // IOC_AND_NETDEV_NAMES_s_s(dev))); 1087 1088 skb = (struct sk_buff *)dev_alloc_skb(len); 1089 if (!skb) { 1090 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", 1091 IOC_AND_NETDEV_NAMES_s_s(dev), 1092 __FILE__, __LINE__); 1093 return -ENOMEM; 1094 } 1095 1096 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1097 for (i = 0; i < count; i++) { 1098 1099 ctx = le32_to_cpu(pRecvRep->BucketContext[i]); 1100 old_skb = priv->RcvCtl[ctx].skb; 1101 1102 l = priv->RcvCtl[ctx].len; 1103 if (szrem < l) 1104 l = szrem; 1105 1106 // dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n", 1107 // IOC_AND_NETDEV_NAMES_s_s(dev), 1108 // i, l)); 1109 1110 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, 1111 priv->RcvCtl[ctx].dma, 1112 priv->RcvCtl[ctx].len, 1113 PCI_DMA_FROMDEVICE); 1114 memcpy(skb_put(skb, l), old_skb->data, l); 1115 1116 pci_dma_sync_single_for_device(mpt_dev->pcidev, 1117 priv->RcvCtl[ctx].dma, 1118 priv->RcvCtl[ctx].len, 1119 PCI_DMA_FROMDEVICE); 1120 1121 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1122 szrem -= l; 1123 } 1124 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1125 1126 } else if (len < MPT_LAN_RX_COPYBREAK) { 1127 1128 old_skb = skb; 1129 1130 skb = (struct sk_buff *)dev_alloc_skb(len); 1131 if (!skb) { 1132 printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n", 1133 IOC_AND_NETDEV_NAMES_s_s(dev), 1134 __FILE__, __LINE__); 1135 return -ENOMEM; 1136 } 1137 1138 pci_dma_sync_single_for_cpu(mpt_dev->pcidev, 1139 priv->RcvCtl[ctx].dma, 1140 priv->RcvCtl[ctx].len, 1141 PCI_DMA_FROMDEVICE); 1142 1143 memcpy(skb_put(skb, len), old_skb->data, len); 1144 1145 pci_dma_sync_single_for_device(mpt_dev->pcidev, 1146 priv->RcvCtl[ctx].dma, 1147 priv->RcvCtl[ctx].len, 1148 PCI_DMA_FROMDEVICE); 1149 1150 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1151 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1152 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1153 1154 } else { 1155 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1156 1157 priv->RcvCtl[ctx].skb = NULL; 1158 1159 pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma, 1160 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE); 1161 priv->RcvCtl[ctx].dma = 0; 1162 1163 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1164 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1165 1166 skb_put(skb,len); 1167 } 1168 1169 atomic_sub(count, &priv->buckets_out); 1170 priv->total_received += count; 1171 1172 if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) { 1173 printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, " 1174 "MPT_LAN_MAX_BUCKETS_OUT = %d\n", 1175 IOC_AND_NETDEV_NAMES_s_s(dev), 1176 priv->mpt_rxfidx_tail, 1177 MPT_LAN_MAX_BUCKETS_OUT); 1178 1179 panic("Damn it Jim! I'm a doctor, not a programmer! " 1180 "Oh, wait a sec, I am a programmer. " 1181 "And, who's Jim?!?!\n" 1182 "Arrgghh! We've done it again!\n"); 1183 } 1184 1185 if (remaining == 0) 1186 printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! " 1187 "(priv->buckets_out = %d)\n", 1188 IOC_AND_NETDEV_NAMES_s_s(dev), 1189 atomic_read(&priv->buckets_out)); 1190 else if (remaining < 10) 1191 printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. " 1192 "(priv->buckets_out = %d)\n", 1193 IOC_AND_NETDEV_NAMES_s_s(dev), 1194 remaining, atomic_read(&priv->buckets_out)); 1195 1196 if ((remaining < priv->bucketthresh) && 1197 ((atomic_read(&priv->buckets_out) - remaining) > 1198 MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) { 1199 1200 printk (KERN_WARNING MYNAM " Mismatch between driver's " 1201 "buckets_out count and fw's BucketsRemaining " 1202 "count has crossed the threshold, issuing a " 1203 "LanReset to clear the fw's hashtable. You may " 1204 "want to check your /var/log/messages for \"CRC " 1205 "error\" event notifications.\n"); 1206 1207 mpt_lan_reset(dev); 1208 mpt_lan_wake_post_buckets_task(dev, 0); 1209 } 1210 1211 return mpt_lan_receive_skb(dev, skb); 1212 } 1213 1214 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1215 /* Simple SGE's only at the moment */ 1216 1217 static void 1218 mpt_lan_post_receive_buckets(void *dev_id) 1219 { 1220 struct net_device *dev = dev_id; 1221 struct mpt_lan_priv *priv = dev->priv; 1222 MPT_ADAPTER *mpt_dev = priv->mpt_dev; 1223 MPT_FRAME_HDR *mf; 1224 LANReceivePostRequest_t *pRecvReq; 1225 SGETransaction32_t *pTrans; 1226 SGESimple64_t *pSimple; 1227 struct sk_buff *skb; 1228 dma_addr_t dma; 1229 u32 curr, buckets, count, max; 1230 u32 len = (dev->mtu + dev->hard_header_len + 4); 1231 unsigned long flags; 1232 int i; 1233 1234 curr = atomic_read(&priv->buckets_out); 1235 buckets = (priv->max_buckets_out - curr); 1236 1237 dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n", 1238 IOC_AND_NETDEV_NAMES_s_s(dev), 1239 __FUNCTION__, buckets, curr)); 1240 1241 max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) / 1242 (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t)); 1243 1244 while (buckets) { 1245 mf = mpt_get_msg_frame(LanCtx, mpt_dev); 1246 if (mf == NULL) { 1247 printk (KERN_ERR "%s: Unable to alloc request frame\n", 1248 __FUNCTION__); 1249 dioprintk((KERN_ERR "%s: %u buckets remaining\n", 1250 __FUNCTION__, buckets)); 1251 goto out; 1252 } 1253 pRecvReq = (LANReceivePostRequest_t *) mf; 1254 1255 count = buckets; 1256 if (count > max) 1257 count = max; 1258 1259 pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE; 1260 pRecvReq->ChainOffset = 0; 1261 pRecvReq->MsgFlags = 0; 1262 pRecvReq->PortNumber = priv->pnum; 1263 1264 pTrans = (SGETransaction32_t *) pRecvReq->SG_List; 1265 pSimple = NULL; 1266 1267 for (i = 0; i < count; i++) { 1268 int ctx; 1269 1270 spin_lock_irqsave(&priv->rxfidx_lock, flags); 1271 if (priv->mpt_rxfidx_tail < 0) { 1272 printk (KERN_ERR "%s: Can't alloc context\n", 1273 __FUNCTION__); 1274 spin_unlock_irqrestore(&priv->rxfidx_lock, 1275 flags); 1276 break; 1277 } 1278 1279 ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--]; 1280 1281 skb = priv->RcvCtl[ctx].skb; 1282 if (skb && (priv->RcvCtl[ctx].len != len)) { 1283 pci_unmap_single(mpt_dev->pcidev, 1284 priv->RcvCtl[ctx].dma, 1285 priv->RcvCtl[ctx].len, 1286 PCI_DMA_FROMDEVICE); 1287 dev_kfree_skb(priv->RcvCtl[ctx].skb); 1288 skb = priv->RcvCtl[ctx].skb = NULL; 1289 } 1290 1291 if (skb == NULL) { 1292 skb = dev_alloc_skb(len); 1293 if (skb == NULL) { 1294 printk (KERN_WARNING 1295 MYNAM "/%s: Can't alloc skb\n", 1296 __FUNCTION__); 1297 priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx; 1298 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1299 break; 1300 } 1301 1302 dma = pci_map_single(mpt_dev->pcidev, skb->data, 1303 len, PCI_DMA_FROMDEVICE); 1304 1305 priv->RcvCtl[ctx].skb = skb; 1306 priv->RcvCtl[ctx].dma = dma; 1307 priv->RcvCtl[ctx].len = len; 1308 } 1309 1310 spin_unlock_irqrestore(&priv->rxfidx_lock, flags); 1311 1312 pTrans->ContextSize = sizeof(u32); 1313 pTrans->DetailsLength = 0; 1314 pTrans->Flags = 0; 1315 pTrans->TransactionContext[0] = cpu_to_le32(ctx); 1316 1317 pSimple = (SGESimple64_t *) pTrans->TransactionDetails; 1318 1319 pSimple->FlagsLength = cpu_to_le32( 1320 ((MPI_SGE_FLAGS_END_OF_BUFFER | 1321 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1322 MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len); 1323 pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma); 1324 if (sizeof(dma_addr_t) > sizeof(u32)) 1325 pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32)); 1326 else 1327 pSimple->Address.High = 0; 1328 1329 pTrans = (SGETransaction32_t *) (pSimple + 1); 1330 } 1331 1332 if (pSimple == NULL) { 1333 /**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n", 1334 /**/ __FUNCTION__); 1335 mpt_free_msg_frame(mpt_dev, mf); 1336 goto out; 1337 } 1338 1339 pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT); 1340 1341 pRecvReq->BucketCount = cpu_to_le32(i); 1342 1343 /* printk(KERN_INFO MYNAM ": posting buckets\n "); 1344 * for (i = 0; i < j + 2; i ++) 1345 * printk (" %08x", le32_to_cpu(msg[i])); 1346 * printk ("\n"); 1347 */ 1348 1349 mpt_put_msg_frame(LanCtx, mpt_dev, mf); 1350 1351 priv->total_posted += i; 1352 buckets -= i; 1353 atomic_add(i, &priv->buckets_out); 1354 } 1355 1356 out: 1357 dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n", 1358 __FUNCTION__, buckets, atomic_read(&priv->buckets_out))); 1359 dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n", 1360 __FUNCTION__, priv->total_posted, priv->total_received)); 1361 1362 clear_bit(0, &priv->post_buckets_active); 1363 } 1364 1365 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1366 static struct net_device * 1367 mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) 1368 { 1369 struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv)); 1370 struct mpt_lan_priv *priv = NULL; 1371 u8 HWaddr[FC_ALEN], *a; 1372 1373 if (!dev) 1374 return NULL; 1375 1376 dev->mtu = MPT_LAN_MTU; 1377 1378 priv = netdev_priv(dev); 1379 1380 priv->mpt_dev = mpt_dev; 1381 priv->pnum = pnum; 1382 1383 memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); 1384 INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); 1385 priv->post_buckets_active = 0; 1386 1387 dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", 1388 __LINE__, dev->mtu + dev->hard_header_len + 4)); 1389 1390 atomic_set(&priv->buckets_out, 0); 1391 priv->total_posted = 0; 1392 priv->total_received = 0; 1393 priv->max_buckets_out = max_buckets_out; 1394 if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out) 1395 priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets; 1396 1397 dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n", 1398 __LINE__, 1399 mpt_dev->pfacts[0].MaxLanBuckets, 1400 max_buckets_out, 1401 priv->max_buckets_out)); 1402 1403 priv->bucketthresh = priv->max_buckets_out * 2 / 3; 1404 spin_lock_init(&priv->txfidx_lock); 1405 spin_lock_init(&priv->rxfidx_lock); 1406 1407 memset(&priv->stats, 0, sizeof(priv->stats)); 1408 1409 /* Grab pre-fetched LANPage1 stuff. :-) */ 1410 a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow; 1411 1412 HWaddr[0] = a[5]; 1413 HWaddr[1] = a[4]; 1414 HWaddr[2] = a[3]; 1415 HWaddr[3] = a[2]; 1416 HWaddr[4] = a[1]; 1417 HWaddr[5] = a[0]; 1418 1419 dev->addr_len = FC_ALEN; 1420 memcpy(dev->dev_addr, HWaddr, FC_ALEN); 1421 memset(dev->broadcast, 0xff, FC_ALEN); 1422 1423 /* The Tx queue is 127 deep on the 909. 1424 * Give ourselves some breathing room. 1425 */ 1426 priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ? 1427 tx_max_out_p : MPT_TX_MAX_OUT_LIM; 1428 1429 dev->open = mpt_lan_open; 1430 dev->stop = mpt_lan_close; 1431 dev->get_stats = mpt_lan_get_stats; 1432 dev->set_multicast_list = NULL; 1433 dev->change_mtu = mpt_lan_change_mtu; 1434 dev->hard_start_xmit = mpt_lan_sdu_send; 1435 1436 /* Not in 2.3.42. Need 2.3.45+ */ 1437 dev->tx_timeout = mpt_lan_tx_timeout; 1438 dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT; 1439 1440 dlprintk((KERN_INFO MYNAM ": Finished registering dev " 1441 "and setting initial values\n")); 1442 1443 SET_MODULE_OWNER(dev); 1444 1445 if (register_netdev(dev) != 0) { 1446 free_netdev(dev); 1447 dev = NULL; 1448 } 1449 return dev; 1450 } 1451 1452 static int 1453 mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1454 { 1455 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 1456 struct net_device *dev; 1457 int i; 1458 1459 for (i = 0; i < ioc->facts.NumberOfPorts; i++) { 1460 printk(KERN_INFO MYNAM ": %s: PortNum=%x, " 1461 "ProtocolFlags=%02Xh (%c%c%c%c)\n", 1462 ioc->name, ioc->pfacts[i].PortNumber, 1463 ioc->pfacts[i].ProtocolFlags, 1464 MPT_PROTOCOL_FLAGS_c_c_c_c( 1465 ioc->pfacts[i].ProtocolFlags)); 1466 1467 if (!(ioc->pfacts[i].ProtocolFlags & 1468 MPI_PORTFACTS_PROTOCOL_LAN)) { 1469 printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol " 1470 "seems to be disabled on this adapter port!\n", 1471 ioc->name); 1472 continue; 1473 } 1474 1475 dev = mpt_register_lan_device(ioc, i); 1476 if (!dev) { 1477 printk(KERN_ERR MYNAM ": %s: Unable to register " 1478 "port%d as a LAN device\n", ioc->name, 1479 ioc->pfacts[i].PortNumber); 1480 continue; 1481 } 1482 1483 printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device " 1484 "registered as '%s'\n", ioc->name, dev->name); 1485 printk(KERN_INFO MYNAM ": %s/%s: " 1486 "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", 1487 IOC_AND_NETDEV_NAMES_s_s(dev), 1488 dev->dev_addr[0], dev->dev_addr[1], 1489 dev->dev_addr[2], dev->dev_addr[3], 1490 dev->dev_addr[4], dev->dev_addr[5]); 1491 1492 ioc->netdev = dev; 1493 1494 return 0; 1495 } 1496 1497 return -ENODEV; 1498 } 1499 1500 static void 1501 mptlan_remove(struct pci_dev *pdev) 1502 { 1503 MPT_ADAPTER *ioc = pci_get_drvdata(pdev); 1504 struct net_device *dev = ioc->netdev; 1505 1506 if(dev != NULL) { 1507 unregister_netdev(dev); 1508 free_netdev(dev); 1509 } 1510 } 1511 1512 static struct mpt_pci_driver mptlan_driver = { 1513 .probe = mptlan_probe, 1514 .remove = mptlan_remove, 1515 }; 1516 1517 static int __init mpt_lan_init (void) 1518 { 1519 show_mptmod_ver(LANAME, LANVER); 1520 1521 if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) { 1522 printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n"); 1523 return -EBUSY; 1524 } 1525 1526 /* Set the callback index to be used by driver core for turbo replies */ 1527 mpt_lan_index = LanCtx; 1528 1529 dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx)); 1530 1531 if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) { 1532 printk(KERN_ERR MYNAM ": Eieee! unable to register a reset " 1533 "handler with mptbase! The world is at an end! " 1534 "Everything is fading to black! Goodbye.\n"); 1535 return -EBUSY; 1536 } 1537 1538 dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); 1539 1540 if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER)) 1541 dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n")); 1542 return 0; 1543 } 1544 1545 static void __exit mpt_lan_exit(void) 1546 { 1547 mpt_device_driver_deregister(MPTLAN_DRIVER); 1548 mpt_reset_deregister(LanCtx); 1549 1550 if (LanCtx >= 0) { 1551 mpt_deregister(LanCtx); 1552 LanCtx = -1; 1553 mpt_lan_index = 0; 1554 } 1555 } 1556 1557 module_init(mpt_lan_init); 1558 module_exit(mpt_lan_exit); 1559 1560 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1561 static unsigned short 1562 mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev) 1563 { 1564 struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data; 1565 struct fcllc *fcllc; 1566 1567 skb->mac.raw = skb->data; 1568 skb_pull(skb, sizeof(struct mpt_lan_ohdr)); 1569 1570 if (fch->dtype == htons(0xffff)) { 1571 u32 *p = (u32 *) fch; 1572 1573 swab32s(p + 0); 1574 swab32s(p + 1); 1575 swab32s(p + 2); 1576 swab32s(p + 3); 1577 1578 printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n", 1579 NETDEV_PTR_TO_IOC_NAME_s(dev)); 1580 printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 1581 fch->saddr[0], fch->saddr[1], fch->saddr[2], 1582 fch->saddr[3], fch->saddr[4], fch->saddr[5]); 1583 } 1584 1585 if (*fch->daddr & 1) { 1586 if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) { 1587 skb->pkt_type = PACKET_BROADCAST; 1588 } else { 1589 skb->pkt_type = PACKET_MULTICAST; 1590 } 1591 } else { 1592 if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) { 1593 skb->pkt_type = PACKET_OTHERHOST; 1594 } else { 1595 skb->pkt_type = PACKET_HOST; 1596 } 1597 } 1598 1599 fcllc = (struct fcllc *)skb->data; 1600 1601 #ifdef QLOGIC_NAA_WORKAROUND 1602 { 1603 u16 source_naa = fch->stype, found = 0; 1604 1605 /* Workaround for QLogic not following RFC 2625 in regards to the NAA 1606 value. */ 1607 1608 if ((source_naa & 0xF000) == 0) 1609 source_naa = swab16(source_naa); 1610 1611 if (fcllc->ethertype == htons(ETH_P_ARP)) 1612 dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of " 1613 "%04x.\n", source_naa)); 1614 1615 if ((fcllc->ethertype == htons(ETH_P_ARP)) && 1616 ((source_naa >> 12) != MPT_LAN_NAA_RFC2625)){ 1617 struct NAA_Hosed *nh, *prevnh; 1618 int i; 1619 1620 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from " 1621 "system with non-RFC 2625 NAA value (%04x).\n", 1622 source_naa)); 1623 1624 write_lock_irq(&bad_naa_lock); 1625 for (prevnh = nh = mpt_bad_naa; nh != NULL; 1626 prevnh=nh, nh=nh->next) { 1627 if ((nh->ieee[0] == fch->saddr[0]) && 1628 (nh->ieee[1] == fch->saddr[1]) && 1629 (nh->ieee[2] == fch->saddr[2]) && 1630 (nh->ieee[3] == fch->saddr[3]) && 1631 (nh->ieee[4] == fch->saddr[4]) && 1632 (nh->ieee[5] == fch->saddr[5])) { 1633 found = 1; 1634 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re" 1635 "q/Rep w/ bad NAA from system already" 1636 " in DB.\n")); 1637 break; 1638 } 1639 } 1640 1641 if ((!found) && (nh == NULL)) { 1642 1643 nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL); 1644 dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/" 1645 " bad NAA from system not yet in DB.\n")); 1646 1647 if (nh != NULL) { 1648 nh->next = NULL; 1649 if (!mpt_bad_naa) 1650 mpt_bad_naa = nh; 1651 if (prevnh) 1652 prevnh->next = nh; 1653 1654 nh->NAA = source_naa; /* Set the S_NAA value. */ 1655 for (i = 0; i < FC_ALEN; i++) 1656 nh->ieee[i] = fch->saddr[i]; 1657 dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:" 1658 "%02x:%02x with non-compliant S_NAA value.\n", 1659 fch->saddr[0], fch->saddr[1], fch->saddr[2], 1660 fch->saddr[3], fch->saddr[4],fch->saddr[5])); 1661 } else { 1662 printk (KERN_ERR "mptlan/type_trans: Unable to" 1663 " kmalloc a NAA_Hosed struct.\n"); 1664 } 1665 } else if (!found) { 1666 printk (KERN_ERR "mptlan/type_trans: found not" 1667 " set, but nh isn't null. Evil " 1668 "funkiness abounds.\n"); 1669 } 1670 write_unlock_irq(&bad_naa_lock); 1671 } 1672 } 1673 #endif 1674 1675 /* Strip the SNAP header from ARP packets since we don't 1676 * pass them through to the 802.2/SNAP layers. 1677 */ 1678 if (fcllc->dsap == EXTENDED_SAP && 1679 (fcllc->ethertype == htons(ETH_P_IP) || 1680 fcllc->ethertype == htons(ETH_P_ARP))) { 1681 skb_pull(skb, sizeof(struct fcllc)); 1682 return fcllc->ethertype; 1683 } 1684 1685 return htons(ETH_P_802_2); 1686 } 1687 1688 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 1689