1 /* 2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 */ 18 #include <linux/errno.h> 19 #include <linux/pci.h> 20 #include <linux/slab.h> 21 #include <linux/skbuff.h> 22 #include <linux/interrupt.h> 23 #include <linux/spinlock.h> 24 #include <linux/if_ether.h> 25 #include <linux/if_vlan.h> 26 #include <linux/workqueue.h> 27 #include <scsi/fc/fc_fip.h> 28 #include <scsi/fc/fc_els.h> 29 #include <scsi/fc/fc_fcoe.h> 30 #include <scsi/fc_frame.h> 31 #include <scsi/libfc.h> 32 #include "fnic_io.h" 33 #include "fnic.h" 34 #include "fnic_fip.h" 35 #include "cq_enet_desc.h" 36 #include "cq_exch_desc.h" 37 38 static u8 fcoe_all_fcfs[ETH_ALEN]; 39 struct workqueue_struct *fnic_fip_queue; 40 struct workqueue_struct *fnic_event_queue; 41 42 static void fnic_set_eth_mode(struct fnic *); 43 static void fnic_fcoe_send_vlan_req(struct fnic *fnic); 44 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); 45 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); 46 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); 47 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); 48 49 void fnic_handle_link(struct work_struct *work) 50 { 51 struct fnic *fnic = container_of(work, struct fnic, link_work); 52 unsigned long flags; 53 int old_link_status; 54 u32 old_link_down_cnt; 55 56 spin_lock_irqsave(&fnic->fnic_lock, flags); 57 58 if (fnic->stop_rx_link_events) { 59 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 60 return; 61 } 62 63 old_link_down_cnt = fnic->link_down_cnt; 64 old_link_status = fnic->link_status; 65 fnic->link_status = vnic_dev_link_status(fnic->vdev); 66 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); 67 68 if (old_link_status == fnic->link_status) { 69 if (!fnic->link_status) 70 /* DOWN -> DOWN */ 71 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 72 else { 73 if (old_link_down_cnt != fnic->link_down_cnt) { 74 /* UP -> DOWN -> UP */ 75 fnic->lport->host_stats.link_failure_count++; 76 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 77 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 78 "link down\n"); 79 fcoe_ctlr_link_down(&fnic->ctlr); 80 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 81 /* start FCoE VLAN discovery */ 82 fnic_fcoe_send_vlan_req(fnic); 83 return; 84 } 85 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 86 "link up\n"); 87 fcoe_ctlr_link_up(&fnic->ctlr); 88 } else 89 /* UP -> UP */ 90 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 91 } 92 } else if (fnic->link_status) { 93 /* DOWN -> UP */ 94 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 95 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 96 /* start FCoE VLAN discovery */ 97 fnic_fcoe_send_vlan_req(fnic); 98 return; 99 } 100 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); 101 fcoe_ctlr_link_up(&fnic->ctlr); 102 } else { 103 /* UP -> DOWN */ 104 fnic->lport->host_stats.link_failure_count++; 105 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 106 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); 107 fcoe_ctlr_link_down(&fnic->ctlr); 108 } 109 110 } 111 112 /* 113 * This function passes incoming fabric frames to libFC 114 */ 115 void fnic_handle_frame(struct work_struct *work) 116 { 117 struct fnic *fnic = container_of(work, struct fnic, frame_work); 118 struct fc_lport *lp = fnic->lport; 119 unsigned long flags; 120 struct sk_buff *skb; 121 struct fc_frame *fp; 122 123 while ((skb = skb_dequeue(&fnic->frame_queue))) { 124 125 spin_lock_irqsave(&fnic->fnic_lock, flags); 126 if (fnic->stop_rx_link_events) { 127 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 128 dev_kfree_skb(skb); 129 return; 130 } 131 fp = (struct fc_frame *)skb; 132 133 /* 134 * If we're in a transitional state, just re-queue and return. 135 * The queue will be serviced when we get to a stable state. 136 */ 137 if (fnic->state != FNIC_IN_FC_MODE && 138 fnic->state != FNIC_IN_ETH_MODE) { 139 skb_queue_head(&fnic->frame_queue, skb); 140 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 141 return; 142 } 143 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 144 145 fc_exch_recv(lp, fp); 146 } 147 } 148 149 void fnic_fcoe_evlist_free(struct fnic *fnic) 150 { 151 struct fnic_event *fevt = NULL; 152 struct fnic_event *next = NULL; 153 unsigned long flags; 154 155 spin_lock_irqsave(&fnic->fnic_lock, flags); 156 if (list_empty(&fnic->evlist)) { 157 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 158 return; 159 } 160 161 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { 162 list_del(&fevt->list); 163 kfree(fevt); 164 } 165 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 166 } 167 168 void fnic_handle_event(struct work_struct *work) 169 { 170 struct fnic *fnic = container_of(work, struct fnic, event_work); 171 struct fnic_event *fevt = NULL; 172 struct fnic_event *next = NULL; 173 unsigned long flags; 174 175 spin_lock_irqsave(&fnic->fnic_lock, flags); 176 if (list_empty(&fnic->evlist)) { 177 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 178 return; 179 } 180 181 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { 182 if (fnic->stop_rx_link_events) { 183 list_del(&fevt->list); 184 kfree(fevt); 185 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 186 return; 187 } 188 /* 189 * If we're in a transitional state, just re-queue and return. 190 * The queue will be serviced when we get to a stable state. 191 */ 192 if (fnic->state != FNIC_IN_FC_MODE && 193 fnic->state != FNIC_IN_ETH_MODE) { 194 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 195 return; 196 } 197 198 list_del(&fevt->list); 199 switch (fevt->event) { 200 case FNIC_EVT_START_VLAN_DISC: 201 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 202 fnic_fcoe_send_vlan_req(fnic); 203 spin_lock_irqsave(&fnic->fnic_lock, flags); 204 break; 205 case FNIC_EVT_START_FCF_DISC: 206 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 207 "Start FCF Discovery\n"); 208 fnic_fcoe_start_fcf_disc(fnic); 209 break; 210 default: 211 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 212 "Unknown event 0x%x\n", fevt->event); 213 break; 214 } 215 kfree(fevt); 216 } 217 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 218 } 219 220 /** 221 * Check if the Received FIP FLOGI frame is rejected 222 * @fip: The FCoE controller that received the frame 223 * @skb: The received FIP frame 224 * 225 * Returns non-zero if the frame is rejected with unsupported cmd with 226 * insufficient resource els explanation. 227 */ 228 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, 229 struct sk_buff *skb) 230 { 231 struct fc_lport *lport = fip->lp; 232 struct fip_header *fiph; 233 struct fc_frame_header *fh = NULL; 234 struct fip_desc *desc; 235 struct fip_encaps *els; 236 enum fip_desc_type els_dtype = 0; 237 u16 op; 238 u8 els_op; 239 u8 sub; 240 241 size_t els_len = 0; 242 size_t rlen; 243 size_t dlen = 0; 244 245 if (skb_linearize(skb)) 246 return 0; 247 248 if (skb->len < sizeof(*fiph)) 249 return 0; 250 251 fiph = (struct fip_header *)skb->data; 252 op = ntohs(fiph->fip_op); 253 sub = fiph->fip_subcode; 254 255 if (op != FIP_OP_LS) 256 return 0; 257 258 if (sub != FIP_SC_REP) 259 return 0; 260 261 rlen = ntohs(fiph->fip_dl_len) * 4; 262 if (rlen + sizeof(*fiph) > skb->len) 263 return 0; 264 265 desc = (struct fip_desc *)(fiph + 1); 266 dlen = desc->fip_dlen * FIP_BPW; 267 268 if (desc->fip_dtype == FIP_DT_FLOGI) { 269 270 shost_printk(KERN_DEBUG, lport->host, 271 " FIP TYPE FLOGI: fab name:%llx " 272 "vfid:%d map:%x\n", 273 fip->sel_fcf->fabric_name, fip->sel_fcf->vfid, 274 fip->sel_fcf->fc_map); 275 if (dlen < sizeof(*els) + sizeof(*fh) + 1) 276 return 0; 277 278 els_len = dlen - sizeof(*els); 279 els = (struct fip_encaps *)desc; 280 fh = (struct fc_frame_header *)(els + 1); 281 els_dtype = desc->fip_dtype; 282 283 if (!fh) 284 return 0; 285 286 /* 287 * ELS command code, reason and explanation should be = Reject, 288 * unsupported command and insufficient resource 289 */ 290 els_op = *(u8 *)(fh + 1); 291 if (els_op == ELS_LS_RJT) { 292 shost_printk(KERN_INFO, lport->host, 293 "Flogi Request Rejected by Switch\n"); 294 return 1; 295 } 296 shost_printk(KERN_INFO, lport->host, 297 "Flogi Request Accepted by Switch\n"); 298 } 299 return 0; 300 } 301 302 static void fnic_fcoe_send_vlan_req(struct fnic *fnic) 303 { 304 struct fcoe_ctlr *fip = &fnic->ctlr; 305 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 306 struct sk_buff *skb; 307 char *eth_fr; 308 int fr_len; 309 struct fip_vlan *vlan; 310 u64 vlan_tov; 311 312 fnic_fcoe_reset_vlans(fnic); 313 fnic->set_vlan(fnic, 0); 314 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 315 "Sending VLAN request...\n"); 316 skb = dev_alloc_skb(sizeof(struct fip_vlan)); 317 if (!skb) 318 return; 319 320 fr_len = sizeof(*vlan); 321 eth_fr = (char *)skb->data; 322 vlan = (struct fip_vlan *)eth_fr; 323 324 memset(vlan, 0, sizeof(*vlan)); 325 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); 326 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); 327 vlan->eth.h_proto = htons(ETH_P_FIP); 328 329 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); 330 vlan->fip.fip_op = htons(FIP_OP_VLAN); 331 vlan->fip.fip_subcode = FIP_SC_VL_REQ; 332 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); 333 334 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; 335 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; 336 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); 337 338 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; 339 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; 340 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); 341 atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); 342 343 skb_put(skb, sizeof(*vlan)); 344 skb->protocol = htons(ETH_P_FIP); 345 skb_reset_mac_header(skb); 346 skb_reset_network_header(skb); 347 fip->send(fip, skb); 348 349 /* set a timer so that we can retry if there no response */ 350 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); 351 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); 352 } 353 354 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) 355 { 356 struct fcoe_ctlr *fip = &fnic->ctlr; 357 struct fip_header *fiph; 358 struct fip_desc *desc; 359 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 360 u16 vid; 361 size_t rlen; 362 size_t dlen; 363 struct fcoe_vlan *vlan; 364 u64 sol_time; 365 unsigned long flags; 366 367 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 368 "Received VLAN response...\n"); 369 370 fiph = (struct fip_header *) skb->data; 371 372 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 373 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", 374 ntohs(fiph->fip_op), fiph->fip_subcode); 375 376 rlen = ntohs(fiph->fip_dl_len) * 4; 377 fnic_fcoe_reset_vlans(fnic); 378 spin_lock_irqsave(&fnic->vlans_lock, flags); 379 desc = (struct fip_desc *)(fiph + 1); 380 while (rlen > 0) { 381 dlen = desc->fip_dlen * FIP_BPW; 382 switch (desc->fip_dtype) { 383 case FIP_DT_VLAN: 384 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); 385 shost_printk(KERN_INFO, fnic->lport->host, 386 "process_vlan_resp: FIP VLAN %d\n", vid); 387 vlan = kmalloc(sizeof(*vlan), 388 GFP_ATOMIC); 389 if (!vlan) { 390 /* retry from timer */ 391 spin_unlock_irqrestore(&fnic->vlans_lock, 392 flags); 393 goto out; 394 } 395 memset(vlan, 0, sizeof(struct fcoe_vlan)); 396 vlan->vid = vid & 0x0fff; 397 vlan->state = FIP_VLAN_AVAIL; 398 list_add_tail(&vlan->list, &fnic->vlans); 399 break; 400 } 401 desc = (struct fip_desc *)((char *)desc + dlen); 402 rlen -= dlen; 403 } 404 405 /* any VLAN descriptors present ? */ 406 if (list_empty(&fnic->vlans)) { 407 /* retry from timer */ 408 atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); 409 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 410 "No VLAN descriptors in FIP VLAN response\n"); 411 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 412 goto out; 413 } 414 415 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); 416 fnic->set_vlan(fnic, vlan->vid); 417 vlan->state = FIP_VLAN_SENT; /* sent now */ 418 vlan->sol_count++; 419 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 420 421 /* start the solicitation */ 422 fcoe_ctlr_link_up(fip); 423 424 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); 425 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); 426 out: 427 return; 428 } 429 430 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) 431 { 432 unsigned long flags; 433 struct fcoe_vlan *vlan; 434 u64 sol_time; 435 436 spin_lock_irqsave(&fnic->vlans_lock, flags); 437 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); 438 fnic->set_vlan(fnic, vlan->vid); 439 vlan->state = FIP_VLAN_SENT; /* sent now */ 440 vlan->sol_count = 1; 441 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 442 443 /* start the solicitation */ 444 fcoe_ctlr_link_up(&fnic->ctlr); 445 446 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); 447 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); 448 } 449 450 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) 451 { 452 unsigned long flags; 453 struct fcoe_vlan *fvlan; 454 455 spin_lock_irqsave(&fnic->vlans_lock, flags); 456 if (list_empty(&fnic->vlans)) { 457 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 458 return -EINVAL; 459 } 460 461 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); 462 if (fvlan->state == FIP_VLAN_USED) { 463 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 464 return 0; 465 } 466 467 if (fvlan->state == FIP_VLAN_SENT) { 468 fvlan->state = FIP_VLAN_USED; 469 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 470 return 0; 471 } 472 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 473 return -EINVAL; 474 } 475 476 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) 477 { 478 struct fnic_event *fevt; 479 unsigned long flags; 480 481 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); 482 if (!fevt) 483 return; 484 485 fevt->fnic = fnic; 486 fevt->event = ev; 487 488 spin_lock_irqsave(&fnic->fnic_lock, flags); 489 list_add_tail(&fevt->list, &fnic->evlist); 490 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 491 492 schedule_work(&fnic->event_work); 493 } 494 495 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) 496 { 497 struct fip_header *fiph; 498 int ret = 1; 499 u16 op; 500 u8 sub; 501 502 if (!skb || !(skb->data)) 503 return -1; 504 505 if (skb_linearize(skb)) 506 goto drop; 507 508 fiph = (struct fip_header *)skb->data; 509 op = ntohs(fiph->fip_op); 510 sub = fiph->fip_subcode; 511 512 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) 513 goto drop; 514 515 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) 516 goto drop; 517 518 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { 519 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) 520 goto drop; 521 /* pass it on to fcoe */ 522 ret = 1; 523 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) { 524 /* set the vlan as used */ 525 fnic_fcoe_process_vlan_resp(fnic, skb); 526 ret = 0; 527 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { 528 /* received CVL request, restart vlan disc */ 529 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); 530 /* pass it on to fcoe */ 531 ret = 1; 532 } 533 drop: 534 return ret; 535 } 536 537 void fnic_handle_fip_frame(struct work_struct *work) 538 { 539 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); 540 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 541 unsigned long flags; 542 struct sk_buff *skb; 543 struct ethhdr *eh; 544 545 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { 546 spin_lock_irqsave(&fnic->fnic_lock, flags); 547 if (fnic->stop_rx_link_events) { 548 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 549 dev_kfree_skb(skb); 550 return; 551 } 552 /* 553 * If we're in a transitional state, just re-queue and return. 554 * The queue will be serviced when we get to a stable state. 555 */ 556 if (fnic->state != FNIC_IN_FC_MODE && 557 fnic->state != FNIC_IN_ETH_MODE) { 558 skb_queue_head(&fnic->fip_frame_queue, skb); 559 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 560 return; 561 } 562 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 563 eh = (struct ethhdr *)skb->data; 564 if (eh->h_proto == htons(ETH_P_FIP)) { 565 skb_pull(skb, sizeof(*eh)); 566 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { 567 dev_kfree_skb(skb); 568 continue; 569 } 570 /* 571 * If there's FLOGI rejects - clear all 572 * fcf's & restart from scratch 573 */ 574 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { 575 atomic64_inc( 576 &fnic_stats->vlan_stats.flogi_rejects); 577 shost_printk(KERN_INFO, fnic->lport->host, 578 "Trigger a Link down - VLAN Disc\n"); 579 fcoe_ctlr_link_down(&fnic->ctlr); 580 /* start FCoE VLAN discovery */ 581 fnic_fcoe_send_vlan_req(fnic); 582 dev_kfree_skb(skb); 583 continue; 584 } 585 fcoe_ctlr_recv(&fnic->ctlr, skb); 586 continue; 587 } 588 } 589 } 590 591 /** 592 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. 593 * @fnic: fnic instance. 594 * @skb: Ethernet Frame. 595 */ 596 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) 597 { 598 struct fc_frame *fp; 599 struct ethhdr *eh; 600 struct fcoe_hdr *fcoe_hdr; 601 struct fcoe_crc_eof *ft; 602 603 /* 604 * Undo VLAN encapsulation if present. 605 */ 606 eh = (struct ethhdr *)skb->data; 607 if (eh->h_proto == htons(ETH_P_8021Q)) { 608 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); 609 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); 610 skb_reset_mac_header(skb); 611 } 612 if (eh->h_proto == htons(ETH_P_FIP)) { 613 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { 614 printk(KERN_ERR "Dropped FIP frame, as firmware " 615 "uses non-FIP mode, Enable FIP " 616 "using UCSM\n"); 617 goto drop; 618 } 619 skb_queue_tail(&fnic->fip_frame_queue, skb); 620 queue_work(fnic_fip_queue, &fnic->fip_frame_work); 621 return 1; /* let caller know packet was used */ 622 } 623 if (eh->h_proto != htons(ETH_P_FCOE)) 624 goto drop; 625 skb_set_network_header(skb, sizeof(*eh)); 626 skb_pull(skb, sizeof(*eh)); 627 628 fcoe_hdr = (struct fcoe_hdr *)skb->data; 629 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) 630 goto drop; 631 632 fp = (struct fc_frame *)skb; 633 fc_frame_init(fp); 634 fr_sof(fp) = fcoe_hdr->fcoe_sof; 635 skb_pull(skb, sizeof(struct fcoe_hdr)); 636 skb_reset_transport_header(skb); 637 638 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); 639 fr_eof(fp) = ft->fcoe_eof; 640 skb_trim(skb, skb->len - sizeof(*ft)); 641 return 0; 642 drop: 643 dev_kfree_skb_irq(skb); 644 return -1; 645 } 646 647 /** 648 * fnic_update_mac_locked() - set data MAC address and filters. 649 * @fnic: fnic instance. 650 * @new: newly-assigned FCoE MAC address. 651 * 652 * Called with the fnic lock held. 653 */ 654 void fnic_update_mac_locked(struct fnic *fnic, u8 *new) 655 { 656 u8 *ctl = fnic->ctlr.ctl_src_addr; 657 u8 *data = fnic->data_src_addr; 658 659 if (is_zero_ether_addr(new)) 660 new = ctl; 661 if (ether_addr_equal(data, new)) 662 return; 663 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); 664 if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) 665 vnic_dev_del_addr(fnic->vdev, data); 666 memcpy(data, new, ETH_ALEN); 667 if (!ether_addr_equal(new, ctl)) 668 vnic_dev_add_addr(fnic->vdev, new); 669 } 670 671 /** 672 * fnic_update_mac() - set data MAC address and filters. 673 * @lport: local port. 674 * @new: newly-assigned FCoE MAC address. 675 */ 676 void fnic_update_mac(struct fc_lport *lport, u8 *new) 677 { 678 struct fnic *fnic = lport_priv(lport); 679 680 spin_lock_irq(&fnic->fnic_lock); 681 fnic_update_mac_locked(fnic, new); 682 spin_unlock_irq(&fnic->fnic_lock); 683 } 684 685 /** 686 * fnic_set_port_id() - set the port_ID after successful FLOGI. 687 * @lport: local port. 688 * @port_id: assigned FC_ID. 689 * @fp: received frame containing the FLOGI accept or NULL. 690 * 691 * This is called from libfc when a new FC_ID has been assigned. 692 * This causes us to reset the firmware to FC_MODE and setup the new MAC 693 * address and FC_ID. 694 * 695 * It is also called with FC_ID 0 when we're logged off. 696 * 697 * If the FC_ID is due to point-to-point, fp may be NULL. 698 */ 699 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) 700 { 701 struct fnic *fnic = lport_priv(lport); 702 u8 *mac; 703 int ret; 704 705 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", 706 port_id, fp); 707 708 /* 709 * If we're clearing the FC_ID, change to use the ctl_src_addr. 710 * Set ethernet mode to send FLOGI. 711 */ 712 if (!port_id) { 713 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); 714 fnic_set_eth_mode(fnic); 715 return; 716 } 717 718 if (fp) { 719 mac = fr_cb(fp)->granted_mac; 720 if (is_zero_ether_addr(mac)) { 721 /* non-FIP - FLOGI already accepted - ignore return */ 722 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); 723 } 724 fnic_update_mac(lport, mac); 725 } 726 727 /* Change state to reflect transition to FC mode */ 728 spin_lock_irq(&fnic->fnic_lock); 729 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) 730 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; 731 else { 732 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 733 "Unexpected fnic state %s while" 734 " processing flogi resp\n", 735 fnic_state_to_str(fnic->state)); 736 spin_unlock_irq(&fnic->fnic_lock); 737 return; 738 } 739 spin_unlock_irq(&fnic->fnic_lock); 740 741 /* 742 * Send FLOGI registration to firmware to set up FC mode. 743 * The new address will be set up when registration completes. 744 */ 745 ret = fnic_flogi_reg_handler(fnic, port_id); 746 747 if (ret < 0) { 748 spin_lock_irq(&fnic->fnic_lock); 749 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) 750 fnic->state = FNIC_IN_ETH_MODE; 751 spin_unlock_irq(&fnic->fnic_lock); 752 } 753 } 754 755 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc 756 *cq_desc, struct vnic_rq_buf *buf, 757 int skipped __attribute__((unused)), 758 void *opaque) 759 { 760 struct fnic *fnic = vnic_dev_priv(rq->vdev); 761 struct sk_buff *skb; 762 struct fc_frame *fp; 763 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 764 unsigned int eth_hdrs_stripped; 765 u8 type, color, eop, sop, ingress_port, vlan_stripped; 766 u8 fcoe = 0, fcoe_sof, fcoe_eof; 767 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; 768 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 769 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; 770 u8 fcs_ok = 1, packet_error = 0; 771 u16 q_number, completed_index, bytes_written = 0, vlan, checksum; 772 u32 rss_hash; 773 u16 exchange_id, tmpl; 774 u8 sof = 0; 775 u8 eof = 0; 776 u32 fcp_bytes_written = 0; 777 unsigned long flags; 778 779 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 780 PCI_DMA_FROMDEVICE); 781 skb = buf->os_buf; 782 fp = (struct fc_frame *)skb; 783 buf->os_buf = NULL; 784 785 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); 786 if (type == CQ_DESC_TYPE_RQ_FCP) { 787 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, 788 &type, &color, &q_number, &completed_index, 789 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, 790 &tmpl, &fcp_bytes_written, &sof, &eof, 791 &ingress_port, &packet_error, 792 &fcoe_enc_error, &fcs_ok, &vlan_stripped, 793 &vlan); 794 eth_hdrs_stripped = 1; 795 skb_trim(skb, fcp_bytes_written); 796 fr_sof(fp) = sof; 797 fr_eof(fp) = eof; 798 799 } else if (type == CQ_DESC_TYPE_RQ_ENET) { 800 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 801 &type, &color, &q_number, &completed_index, 802 &ingress_port, &fcoe, &eop, &sop, 803 &rss_type, &csum_not_calc, &rss_hash, 804 &bytes_written, &packet_error, 805 &vlan_stripped, &vlan, &checksum, 806 &fcoe_sof, &fcoe_fc_crc_ok, 807 &fcoe_enc_error, &fcoe_eof, 808 &tcp_udp_csum_ok, &udp, &tcp, 809 &ipv4_csum_ok, &ipv6, &ipv4, 810 &ipv4_fragment, &fcs_ok); 811 eth_hdrs_stripped = 0; 812 skb_trim(skb, bytes_written); 813 if (!fcs_ok) { 814 atomic64_inc(&fnic_stats->misc_stats.frame_errors); 815 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 816 "fcs error. dropping packet.\n"); 817 goto drop; 818 } 819 if (fnic_import_rq_eth_pkt(fnic, skb)) 820 return; 821 822 } else { 823 /* wrong CQ type*/ 824 shost_printk(KERN_ERR, fnic->lport->host, 825 "fnic rq_cmpl wrong cq type x%x\n", type); 826 goto drop; 827 } 828 829 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { 830 atomic64_inc(&fnic_stats->misc_stats.frame_errors); 831 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 832 "fnic rq_cmpl fcoe x%x fcsok x%x" 833 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" 834 " x%x\n", 835 fcoe, fcs_ok, packet_error, 836 fcoe_fc_crc_ok, fcoe_enc_error); 837 goto drop; 838 } 839 840 spin_lock_irqsave(&fnic->fnic_lock, flags); 841 if (fnic->stop_rx_link_events) { 842 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 843 goto drop; 844 } 845 fr_dev(fp) = fnic->lport; 846 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 847 848 skb_queue_tail(&fnic->frame_queue, skb); 849 queue_work(fnic_event_queue, &fnic->frame_work); 850 851 return; 852 drop: 853 dev_kfree_skb_irq(skb); 854 } 855 856 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, 857 struct cq_desc *cq_desc, u8 type, 858 u16 q_number, u16 completed_index, 859 void *opaque) 860 { 861 struct fnic *fnic = vnic_dev_priv(vdev); 862 863 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, 864 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv, 865 NULL); 866 return 0; 867 } 868 869 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) 870 { 871 unsigned int tot_rq_work_done = 0, cur_work_done; 872 unsigned int i; 873 int err; 874 875 for (i = 0; i < fnic->rq_count; i++) { 876 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, 877 fnic_rq_cmpl_handler_cont, 878 NULL); 879 if (cur_work_done) { 880 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); 881 if (err) 882 shost_printk(KERN_ERR, fnic->lport->host, 883 "fnic_alloc_rq_frame can't alloc" 884 " frame\n"); 885 } 886 tot_rq_work_done += cur_work_done; 887 } 888 889 return tot_rq_work_done; 890 } 891 892 /* 893 * This function is called once at init time to allocate and fill RQ 894 * buffers. Subsequently, it is called in the interrupt context after RQ 895 * buffer processing to replenish the buffers in the RQ 896 */ 897 int fnic_alloc_rq_frame(struct vnic_rq *rq) 898 { 899 struct fnic *fnic = vnic_dev_priv(rq->vdev); 900 struct sk_buff *skb; 901 u16 len; 902 dma_addr_t pa; 903 904 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; 905 skb = dev_alloc_skb(len); 906 if (!skb) { 907 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 908 "Unable to allocate RQ sk_buff\n"); 909 return -ENOMEM; 910 } 911 skb_reset_mac_header(skb); 912 skb_reset_transport_header(skb); 913 skb_reset_network_header(skb); 914 skb_put(skb, len); 915 pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); 916 fnic_queue_rq_desc(rq, skb, pa, len); 917 return 0; 918 } 919 920 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) 921 { 922 struct fc_frame *fp = buf->os_buf; 923 struct fnic *fnic = vnic_dev_priv(rq->vdev); 924 925 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 926 PCI_DMA_FROMDEVICE); 927 928 dev_kfree_skb(fp_skb(fp)); 929 buf->os_buf = NULL; 930 } 931 932 /** 933 * fnic_eth_send() - Send Ethernet frame. 934 * @fip: fcoe_ctlr instance. 935 * @skb: Ethernet Frame, FIP, without VLAN encapsulation. 936 */ 937 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 938 { 939 struct fnic *fnic = fnic_from_ctlr(fip); 940 struct vnic_wq *wq = &fnic->wq[0]; 941 dma_addr_t pa; 942 struct ethhdr *eth_hdr; 943 struct vlan_ethhdr *vlan_hdr; 944 unsigned long flags; 945 946 if (!fnic->vlan_hw_insert) { 947 eth_hdr = (struct ethhdr *)skb_mac_header(skb); 948 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, 949 sizeof(*vlan_hdr) - sizeof(*eth_hdr)); 950 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); 951 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); 952 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; 953 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); 954 } 955 956 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 957 958 spin_lock_irqsave(&fnic->wq_lock[0], flags); 959 if (!vnic_wq_desc_avail(wq)) { 960 pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); 961 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 962 kfree_skb(skb); 963 return; 964 } 965 966 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, 967 0 /* hw inserts cos value */, 968 fnic->vlan_id, 1); 969 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 970 } 971 972 /* 973 * Send FC frame. 974 */ 975 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) 976 { 977 struct vnic_wq *wq = &fnic->wq[0]; 978 struct sk_buff *skb; 979 dma_addr_t pa; 980 struct ethhdr *eth_hdr; 981 struct vlan_ethhdr *vlan_hdr; 982 struct fcoe_hdr *fcoe_hdr; 983 struct fc_frame_header *fh; 984 u32 tot_len, eth_hdr_len; 985 int ret = 0; 986 unsigned long flags; 987 988 fh = fc_frame_header_get(fp); 989 skb = fp_skb(fp); 990 991 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && 992 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) 993 return 0; 994 995 if (!fnic->vlan_hw_insert) { 996 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); 997 vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len); 998 eth_hdr = (struct ethhdr *)vlan_hdr; 999 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); 1000 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); 1001 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); 1002 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); 1003 } else { 1004 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); 1005 eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len); 1006 eth_hdr->h_proto = htons(ETH_P_FCOE); 1007 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); 1008 } 1009 1010 if (fnic->ctlr.map_dest) 1011 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); 1012 else 1013 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); 1014 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); 1015 1016 tot_len = skb->len; 1017 BUG_ON(tot_len % 4); 1018 1019 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); 1020 fcoe_hdr->fcoe_sof = fr_sof(fp); 1021 if (FC_FCOE_VER) 1022 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); 1023 1024 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); 1025 1026 spin_lock_irqsave(&fnic->wq_lock[0], flags); 1027 1028 if (!vnic_wq_desc_avail(wq)) { 1029 pci_unmap_single(fnic->pdev, pa, 1030 tot_len, PCI_DMA_TODEVICE); 1031 ret = -1; 1032 goto fnic_send_frame_end; 1033 } 1034 1035 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), 1036 0 /* hw inserts cos value */, 1037 fnic->vlan_id, 1, 1, 1); 1038 fnic_send_frame_end: 1039 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 1040 1041 if (ret) 1042 dev_kfree_skb_any(fp_skb(fp)); 1043 1044 return ret; 1045 } 1046 1047 /* 1048 * fnic_send 1049 * Routine to send a raw frame 1050 */ 1051 int fnic_send(struct fc_lport *lp, struct fc_frame *fp) 1052 { 1053 struct fnic *fnic = lport_priv(lp); 1054 unsigned long flags; 1055 1056 if (fnic->in_remove) { 1057 dev_kfree_skb(fp_skb(fp)); 1058 return -1; 1059 } 1060 1061 /* 1062 * Queue frame if in a transitional state. 1063 * This occurs while registering the Port_ID / MAC address after FLOGI. 1064 */ 1065 spin_lock_irqsave(&fnic->fnic_lock, flags); 1066 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { 1067 skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); 1068 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1069 return 0; 1070 } 1071 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1072 1073 return fnic_send_frame(fnic, fp); 1074 } 1075 1076 /** 1077 * fnic_flush_tx() - send queued frames. 1078 * @fnic: fnic device 1079 * 1080 * Send frames that were waiting to go out in FC or Ethernet mode. 1081 * Whenever changing modes we purge queued frames, so these frames should 1082 * be queued for the stable mode that we're in, either FC or Ethernet. 1083 * 1084 * Called without fnic_lock held. 1085 */ 1086 void fnic_flush_tx(struct fnic *fnic) 1087 { 1088 struct sk_buff *skb; 1089 struct fc_frame *fp; 1090 1091 while ((skb = skb_dequeue(&fnic->tx_queue))) { 1092 fp = (struct fc_frame *)skb; 1093 fnic_send_frame(fnic, fp); 1094 } 1095 } 1096 1097 /** 1098 * fnic_set_eth_mode() - put fnic into ethernet mode. 1099 * @fnic: fnic device 1100 * 1101 * Called without fnic lock held. 1102 */ 1103 static void fnic_set_eth_mode(struct fnic *fnic) 1104 { 1105 unsigned long flags; 1106 enum fnic_state old_state; 1107 int ret; 1108 1109 spin_lock_irqsave(&fnic->fnic_lock, flags); 1110 again: 1111 old_state = fnic->state; 1112 switch (old_state) { 1113 case FNIC_IN_FC_MODE: 1114 case FNIC_IN_ETH_TRANS_FC_MODE: 1115 default: 1116 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1117 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1118 1119 ret = fnic_fw_reset_handler(fnic); 1120 1121 spin_lock_irqsave(&fnic->fnic_lock, flags); 1122 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) 1123 goto again; 1124 if (ret) 1125 fnic->state = old_state; 1126 break; 1127 1128 case FNIC_IN_FC_TRANS_ETH_MODE: 1129 case FNIC_IN_ETH_MODE: 1130 break; 1131 } 1132 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1133 } 1134 1135 static void fnic_wq_complete_frame_send(struct vnic_wq *wq, 1136 struct cq_desc *cq_desc, 1137 struct vnic_wq_buf *buf, void *opaque) 1138 { 1139 struct sk_buff *skb = buf->os_buf; 1140 struct fc_frame *fp = (struct fc_frame *)skb; 1141 struct fnic *fnic = vnic_dev_priv(wq->vdev); 1142 1143 pci_unmap_single(fnic->pdev, buf->dma_addr, 1144 buf->len, PCI_DMA_TODEVICE); 1145 dev_kfree_skb_irq(fp_skb(fp)); 1146 buf->os_buf = NULL; 1147 } 1148 1149 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev, 1150 struct cq_desc *cq_desc, u8 type, 1151 u16 q_number, u16 completed_index, 1152 void *opaque) 1153 { 1154 struct fnic *fnic = vnic_dev_priv(vdev); 1155 unsigned long flags; 1156 1157 spin_lock_irqsave(&fnic->wq_lock[q_number], flags); 1158 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, 1159 fnic_wq_complete_frame_send, NULL); 1160 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); 1161 1162 return 0; 1163 } 1164 1165 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) 1166 { 1167 unsigned int wq_work_done = 0; 1168 unsigned int i; 1169 1170 for (i = 0; i < fnic->raw_wq_count; i++) { 1171 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], 1172 work_to_do, 1173 fnic_wq_cmpl_handler_cont, 1174 NULL); 1175 } 1176 1177 return wq_work_done; 1178 } 1179 1180 1181 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 1182 { 1183 struct fc_frame *fp = buf->os_buf; 1184 struct fnic *fnic = vnic_dev_priv(wq->vdev); 1185 1186 pci_unmap_single(fnic->pdev, buf->dma_addr, 1187 buf->len, PCI_DMA_TODEVICE); 1188 1189 dev_kfree_skb(fp_skb(fp)); 1190 buf->os_buf = NULL; 1191 } 1192 1193 void fnic_fcoe_reset_vlans(struct fnic *fnic) 1194 { 1195 unsigned long flags; 1196 struct fcoe_vlan *vlan; 1197 struct fcoe_vlan *next; 1198 1199 /* 1200 * indicate a link down to fcoe so that all fcf's are free'd 1201 * might not be required since we did this before sending vlan 1202 * discovery request 1203 */ 1204 spin_lock_irqsave(&fnic->vlans_lock, flags); 1205 if (!list_empty(&fnic->vlans)) { 1206 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { 1207 list_del(&vlan->list); 1208 kfree(vlan); 1209 } 1210 } 1211 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1212 } 1213 1214 void fnic_handle_fip_timer(struct fnic *fnic) 1215 { 1216 unsigned long flags; 1217 struct fcoe_vlan *vlan; 1218 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 1219 u64 sol_time; 1220 1221 spin_lock_irqsave(&fnic->fnic_lock, flags); 1222 if (fnic->stop_rx_link_events) { 1223 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1224 return; 1225 } 1226 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1227 1228 if (fnic->ctlr.mode == FIP_ST_NON_FIP) 1229 return; 1230 1231 spin_lock_irqsave(&fnic->vlans_lock, flags); 1232 if (list_empty(&fnic->vlans)) { 1233 /* no vlans available, try again */ 1234 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 1235 "Start VLAN Discovery\n"); 1236 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1237 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); 1238 return; 1239 } 1240 1241 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); 1242 shost_printk(KERN_DEBUG, fnic->lport->host, 1243 "fip_timer: vlan %d state %d sol_count %d\n", 1244 vlan->vid, vlan->state, vlan->sol_count); 1245 switch (vlan->state) { 1246 case FIP_VLAN_USED: 1247 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 1248 "FIP VLAN is selected for FC transaction\n"); 1249 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1250 break; 1251 case FIP_VLAN_FAILED: 1252 /* if all vlans are in failed state, restart vlan disc */ 1253 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 1254 "Start VLAN Discovery\n"); 1255 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1256 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); 1257 break; 1258 case FIP_VLAN_SENT: 1259 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { 1260 /* 1261 * no response on this vlan, remove from the list. 1262 * Try the next vlan 1263 */ 1264 shost_printk(KERN_INFO, fnic->lport->host, 1265 "Dequeue this VLAN ID %d from list\n", 1266 vlan->vid); 1267 list_del(&vlan->list); 1268 kfree(vlan); 1269 vlan = NULL; 1270 if (list_empty(&fnic->vlans)) { 1271 /* we exhausted all vlans, restart vlan disc */ 1272 spin_unlock_irqrestore(&fnic->vlans_lock, 1273 flags); 1274 shost_printk(KERN_INFO, fnic->lport->host, 1275 "fip_timer: vlan list empty, " 1276 "trigger vlan disc\n"); 1277 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); 1278 return; 1279 } 1280 /* check the next vlan */ 1281 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, 1282 list); 1283 fnic->set_vlan(fnic, vlan->vid); 1284 vlan->state = FIP_VLAN_SENT; /* sent now */ 1285 } 1286 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1287 atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); 1288 vlan->sol_count++; 1289 sol_time = jiffies + msecs_to_jiffies 1290 (FCOE_CTLR_START_DELAY); 1291 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); 1292 break; 1293 } 1294 } 1295