1 /* 2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 */ 18 #include <linux/errno.h> 19 #include <linux/pci.h> 20 #include <linux/slab.h> 21 #include <linux/skbuff.h> 22 #include <linux/interrupt.h> 23 #include <linux/spinlock.h> 24 #include <linux/if_ether.h> 25 #include <linux/if_vlan.h> 26 #include <linux/workqueue.h> 27 #include <scsi/fc/fc_fip.h> 28 #include <scsi/fc/fc_els.h> 29 #include <scsi/fc/fc_fcoe.h> 30 #include <scsi/fc_frame.h> 31 #include <scsi/libfc.h> 32 #include "fnic_io.h" 33 #include "fnic.h" 34 #include "fnic_fip.h" 35 #include "cq_enet_desc.h" 36 #include "cq_exch_desc.h" 37 38 static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; 39 struct workqueue_struct *fnic_fip_queue; 40 struct workqueue_struct *fnic_event_queue; 41 42 static void fnic_set_eth_mode(struct fnic *); 43 static void fnic_fcoe_send_vlan_req(struct fnic *fnic); 44 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); 45 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); 46 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); 47 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); 48 49 void fnic_handle_link(struct work_struct *work) 50 { 51 struct fnic *fnic = container_of(work, struct fnic, link_work); 52 unsigned long flags; 53 int old_link_status; 54 u32 old_link_down_cnt; 55 56 spin_lock_irqsave(&fnic->fnic_lock, flags); 57 58 if (fnic->stop_rx_link_events) { 59 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 60 return; 61 } 62 63 old_link_down_cnt = fnic->link_down_cnt; 64 old_link_status = fnic->link_status; 65 fnic->link_status = vnic_dev_link_status(fnic->vdev); 66 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); 67 68 switch (vnic_dev_port_speed(fnic->vdev)) { 69 case DCEM_PORTSPEED_10G: 70 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; 71 fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; 72 break; 73 case DCEM_PORTSPEED_25G: 74 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; 75 fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; 76 break; 77 case DCEM_PORTSPEED_40G: 78 case DCEM_PORTSPEED_4x10G: 79 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; 80 fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; 81 break; 82 case DCEM_PORTSPEED_100G: 83 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; 84 fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; 85 break; 86 default: 87 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; 88 fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; 89 break; 90 } 91 92 if (old_link_status == fnic->link_status) { 93 if (!fnic->link_status) { 94 /* DOWN -> DOWN */ 95 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 96 fnic_fc_trace_set_data(fnic->lport->host->host_no, 97 FNIC_FC_LE, "Link Status: DOWN->DOWN", 98 strlen("Link Status: DOWN->DOWN")); 99 } else { 100 if (old_link_down_cnt != fnic->link_down_cnt) { 101 /* UP -> DOWN -> UP */ 102 fnic->lport->host_stats.link_failure_count++; 103 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 104 fnic_fc_trace_set_data( 105 fnic->lport->host->host_no, 106 FNIC_FC_LE, 107 "Link Status:UP_DOWN_UP", 108 strlen("Link_Status:UP_DOWN_UP") 109 ); 110 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 111 "link down\n"); 112 fcoe_ctlr_link_down(&fnic->ctlr); 113 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 114 /* start FCoE VLAN discovery */ 115 fnic_fc_trace_set_data( 116 fnic->lport->host->host_no, 117 FNIC_FC_LE, 118 "Link Status: UP_DOWN_UP_VLAN", 119 strlen( 120 "Link Status: UP_DOWN_UP_VLAN") 121 ); 122 fnic_fcoe_send_vlan_req(fnic); 123 return; 124 } 125 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 126 "link up\n"); 127 fcoe_ctlr_link_up(&fnic->ctlr); 128 } else { 129 /* UP -> UP */ 130 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 131 fnic_fc_trace_set_data( 132 fnic->lport->host->host_no, FNIC_FC_LE, 133 "Link Status: UP_UP", 134 strlen("Link Status: UP_UP")); 135 } 136 } 137 } else if (fnic->link_status) { 138 /* DOWN -> UP */ 139 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 140 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 141 /* start FCoE VLAN discovery */ 142 fnic_fc_trace_set_data( 143 fnic->lport->host->host_no, 144 FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", 145 strlen("Link Status: DOWN_UP_VLAN")); 146 fnic_fcoe_send_vlan_req(fnic); 147 return; 148 } 149 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); 150 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, 151 "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); 152 fcoe_ctlr_link_up(&fnic->ctlr); 153 } else { 154 /* UP -> DOWN */ 155 fnic->lport->host_stats.link_failure_count++; 156 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 157 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); 158 fnic_fc_trace_set_data( 159 fnic->lport->host->host_no, FNIC_FC_LE, 160 "Link Status: UP_DOWN", 161 strlen("Link Status: UP_DOWN")); 162 if (fnic->config.flags & VFCF_FIP_CAPABLE) { 163 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 164 "deleting fip-timer during link-down\n"); 165 del_timer_sync(&fnic->fip_timer); 166 } 167 fcoe_ctlr_link_down(&fnic->ctlr); 168 } 169 170 } 171 172 /* 173 * This function passes incoming fabric frames to libFC 174 */ 175 void fnic_handle_frame(struct work_struct *work) 176 { 177 struct fnic *fnic = container_of(work, struct fnic, frame_work); 178 struct fc_lport *lp = fnic->lport; 179 unsigned long flags; 180 struct sk_buff *skb; 181 struct fc_frame *fp; 182 183 while ((skb = skb_dequeue(&fnic->frame_queue))) { 184 185 spin_lock_irqsave(&fnic->fnic_lock, flags); 186 if (fnic->stop_rx_link_events) { 187 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 188 dev_kfree_skb(skb); 189 return; 190 } 191 fp = (struct fc_frame *)skb; 192 193 /* 194 * If we're in a transitional state, just re-queue and return. 195 * The queue will be serviced when we get to a stable state. 196 */ 197 if (fnic->state != FNIC_IN_FC_MODE && 198 fnic->state != FNIC_IN_ETH_MODE) { 199 skb_queue_head(&fnic->frame_queue, skb); 200 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 201 return; 202 } 203 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 204 205 fc_exch_recv(lp, fp); 206 } 207 } 208 209 void fnic_fcoe_evlist_free(struct fnic *fnic) 210 { 211 struct fnic_event *fevt = NULL; 212 struct fnic_event *next = NULL; 213 unsigned long flags; 214 215 spin_lock_irqsave(&fnic->fnic_lock, flags); 216 if (list_empty(&fnic->evlist)) { 217 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 218 return; 219 } 220 221 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { 222 list_del(&fevt->list); 223 kfree(fevt); 224 } 225 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 226 } 227 228 void fnic_handle_event(struct work_struct *work) 229 { 230 struct fnic *fnic = container_of(work, struct fnic, event_work); 231 struct fnic_event *fevt = NULL; 232 struct fnic_event *next = NULL; 233 unsigned long flags; 234 235 spin_lock_irqsave(&fnic->fnic_lock, flags); 236 if (list_empty(&fnic->evlist)) { 237 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 238 return; 239 } 240 241 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { 242 if (fnic->stop_rx_link_events) { 243 list_del(&fevt->list); 244 kfree(fevt); 245 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 246 return; 247 } 248 /* 249 * If we're in a transitional state, just re-queue and return. 250 * The queue will be serviced when we get to a stable state. 251 */ 252 if (fnic->state != FNIC_IN_FC_MODE && 253 fnic->state != FNIC_IN_ETH_MODE) { 254 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 255 return; 256 } 257 258 list_del(&fevt->list); 259 switch (fevt->event) { 260 case FNIC_EVT_START_VLAN_DISC: 261 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 262 fnic_fcoe_send_vlan_req(fnic); 263 spin_lock_irqsave(&fnic->fnic_lock, flags); 264 break; 265 case FNIC_EVT_START_FCF_DISC: 266 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 267 "Start FCF Discovery\n"); 268 fnic_fcoe_start_fcf_disc(fnic); 269 break; 270 default: 271 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 272 "Unknown event 0x%x\n", fevt->event); 273 break; 274 } 275 kfree(fevt); 276 } 277 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 278 } 279 280 /** 281 * Check if the Received FIP FLOGI frame is rejected 282 * @fip: The FCoE controller that received the frame 283 * @skb: The received FIP frame 284 * 285 * Returns non-zero if the frame is rejected with unsupported cmd with 286 * insufficient resource els explanation. 287 */ 288 static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, 289 struct sk_buff *skb) 290 { 291 struct fc_lport *lport = fip->lp; 292 struct fip_header *fiph; 293 struct fc_frame_header *fh = NULL; 294 struct fip_desc *desc; 295 struct fip_encaps *els; 296 enum fip_desc_type els_dtype = 0; 297 u16 op; 298 u8 els_op; 299 u8 sub; 300 301 size_t els_len = 0; 302 size_t rlen; 303 size_t dlen = 0; 304 305 if (skb_linearize(skb)) 306 return 0; 307 308 if (skb->len < sizeof(*fiph)) 309 return 0; 310 311 fiph = (struct fip_header *)skb->data; 312 op = ntohs(fiph->fip_op); 313 sub = fiph->fip_subcode; 314 315 if (op != FIP_OP_LS) 316 return 0; 317 318 if (sub != FIP_SC_REP) 319 return 0; 320 321 rlen = ntohs(fiph->fip_dl_len) * 4; 322 if (rlen + sizeof(*fiph) > skb->len) 323 return 0; 324 325 desc = (struct fip_desc *)(fiph + 1); 326 dlen = desc->fip_dlen * FIP_BPW; 327 328 if (desc->fip_dtype == FIP_DT_FLOGI) { 329 330 if (dlen < sizeof(*els) + sizeof(*fh) + 1) 331 return 0; 332 333 els_len = dlen - sizeof(*els); 334 els = (struct fip_encaps *)desc; 335 fh = (struct fc_frame_header *)(els + 1); 336 els_dtype = desc->fip_dtype; 337 338 if (!fh) 339 return 0; 340 341 /* 342 * ELS command code, reason and explanation should be = Reject, 343 * unsupported command and insufficient resource 344 */ 345 els_op = *(u8 *)(fh + 1); 346 if (els_op == ELS_LS_RJT) { 347 shost_printk(KERN_INFO, lport->host, 348 "Flogi Request Rejected by Switch\n"); 349 return 1; 350 } 351 shost_printk(KERN_INFO, lport->host, 352 "Flogi Request Accepted by Switch\n"); 353 } 354 return 0; 355 } 356 357 static void fnic_fcoe_send_vlan_req(struct fnic *fnic) 358 { 359 struct fcoe_ctlr *fip = &fnic->ctlr; 360 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 361 struct sk_buff *skb; 362 char *eth_fr; 363 int fr_len; 364 struct fip_vlan *vlan; 365 u64 vlan_tov; 366 367 fnic_fcoe_reset_vlans(fnic); 368 fnic->set_vlan(fnic, 0); 369 370 if (printk_ratelimit()) 371 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 372 "Sending VLAN request...\n"); 373 374 skb = dev_alloc_skb(sizeof(struct fip_vlan)); 375 if (!skb) 376 return; 377 378 fr_len = sizeof(*vlan); 379 eth_fr = (char *)skb->data; 380 vlan = (struct fip_vlan *)eth_fr; 381 382 memset(vlan, 0, sizeof(*vlan)); 383 memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); 384 memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); 385 vlan->eth.h_proto = htons(ETH_P_FIP); 386 387 vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); 388 vlan->fip.fip_op = htons(FIP_OP_VLAN); 389 vlan->fip.fip_subcode = FIP_SC_VL_REQ; 390 vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); 391 392 vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; 393 vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; 394 memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); 395 396 vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; 397 vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; 398 put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); 399 atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); 400 401 skb_put(skb, sizeof(*vlan)); 402 skb->protocol = htons(ETH_P_FIP); 403 skb_reset_mac_header(skb); 404 skb_reset_network_header(skb); 405 fip->send(fip, skb); 406 407 /* set a timer so that we can retry if there no response */ 408 vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); 409 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); 410 } 411 412 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) 413 { 414 struct fcoe_ctlr *fip = &fnic->ctlr; 415 struct fip_header *fiph; 416 struct fip_desc *desc; 417 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 418 u16 vid; 419 size_t rlen; 420 size_t dlen; 421 struct fcoe_vlan *vlan; 422 u64 sol_time; 423 unsigned long flags; 424 425 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 426 "Received VLAN response...\n"); 427 428 fiph = (struct fip_header *) skb->data; 429 430 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 431 "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", 432 ntohs(fiph->fip_op), fiph->fip_subcode); 433 434 rlen = ntohs(fiph->fip_dl_len) * 4; 435 fnic_fcoe_reset_vlans(fnic); 436 spin_lock_irqsave(&fnic->vlans_lock, flags); 437 desc = (struct fip_desc *)(fiph + 1); 438 while (rlen > 0) { 439 dlen = desc->fip_dlen * FIP_BPW; 440 switch (desc->fip_dtype) { 441 case FIP_DT_VLAN: 442 vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); 443 shost_printk(KERN_INFO, fnic->lport->host, 444 "process_vlan_resp: FIP VLAN %d\n", vid); 445 vlan = kmalloc(sizeof(*vlan), 446 GFP_ATOMIC); 447 if (!vlan) { 448 /* retry from timer */ 449 spin_unlock_irqrestore(&fnic->vlans_lock, 450 flags); 451 goto out; 452 } 453 memset(vlan, 0, sizeof(struct fcoe_vlan)); 454 vlan->vid = vid & 0x0fff; 455 vlan->state = FIP_VLAN_AVAIL; 456 list_add_tail(&vlan->list, &fnic->vlans); 457 break; 458 } 459 desc = (struct fip_desc *)((char *)desc + dlen); 460 rlen -= dlen; 461 } 462 463 /* any VLAN descriptors present ? */ 464 if (list_empty(&fnic->vlans)) { 465 /* retry from timer */ 466 atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); 467 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, 468 "No VLAN descriptors in FIP VLAN response\n"); 469 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 470 goto out; 471 } 472 473 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); 474 fnic->set_vlan(fnic, vlan->vid); 475 vlan->state = FIP_VLAN_SENT; /* sent now */ 476 vlan->sol_count++; 477 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 478 479 /* start the solicitation */ 480 fcoe_ctlr_link_up(fip); 481 482 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); 483 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); 484 out: 485 return; 486 } 487 488 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) 489 { 490 unsigned long flags; 491 struct fcoe_vlan *vlan; 492 u64 sol_time; 493 494 spin_lock_irqsave(&fnic->vlans_lock, flags); 495 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); 496 fnic->set_vlan(fnic, vlan->vid); 497 vlan->state = FIP_VLAN_SENT; /* sent now */ 498 vlan->sol_count = 1; 499 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 500 501 /* start the solicitation */ 502 fcoe_ctlr_link_up(&fnic->ctlr); 503 504 sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); 505 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); 506 } 507 508 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) 509 { 510 unsigned long flags; 511 struct fcoe_vlan *fvlan; 512 513 spin_lock_irqsave(&fnic->vlans_lock, flags); 514 if (list_empty(&fnic->vlans)) { 515 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 516 return -EINVAL; 517 } 518 519 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); 520 if (fvlan->state == FIP_VLAN_USED) { 521 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 522 return 0; 523 } 524 525 if (fvlan->state == FIP_VLAN_SENT) { 526 fvlan->state = FIP_VLAN_USED; 527 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 528 return 0; 529 } 530 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 531 return -EINVAL; 532 } 533 534 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) 535 { 536 struct fnic_event *fevt; 537 unsigned long flags; 538 539 fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); 540 if (!fevt) 541 return; 542 543 fevt->fnic = fnic; 544 fevt->event = ev; 545 546 spin_lock_irqsave(&fnic->fnic_lock, flags); 547 list_add_tail(&fevt->list, &fnic->evlist); 548 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 549 550 schedule_work(&fnic->event_work); 551 } 552 553 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) 554 { 555 struct fip_header *fiph; 556 int ret = 1; 557 u16 op; 558 u8 sub; 559 560 if (!skb || !(skb->data)) 561 return -1; 562 563 if (skb_linearize(skb)) 564 goto drop; 565 566 fiph = (struct fip_header *)skb->data; 567 op = ntohs(fiph->fip_op); 568 sub = fiph->fip_subcode; 569 570 if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) 571 goto drop; 572 573 if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) 574 goto drop; 575 576 if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { 577 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) 578 goto drop; 579 /* pass it on to fcoe */ 580 ret = 1; 581 } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { 582 /* set the vlan as used */ 583 fnic_fcoe_process_vlan_resp(fnic, skb); 584 ret = 0; 585 } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { 586 /* received CVL request, restart vlan disc */ 587 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); 588 /* pass it on to fcoe */ 589 ret = 1; 590 } 591 drop: 592 return ret; 593 } 594 595 void fnic_handle_fip_frame(struct work_struct *work) 596 { 597 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); 598 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 599 unsigned long flags; 600 struct sk_buff *skb; 601 struct ethhdr *eh; 602 603 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { 604 spin_lock_irqsave(&fnic->fnic_lock, flags); 605 if (fnic->stop_rx_link_events) { 606 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 607 dev_kfree_skb(skb); 608 return; 609 } 610 /* 611 * If we're in a transitional state, just re-queue and return. 612 * The queue will be serviced when we get to a stable state. 613 */ 614 if (fnic->state != FNIC_IN_FC_MODE && 615 fnic->state != FNIC_IN_ETH_MODE) { 616 skb_queue_head(&fnic->fip_frame_queue, skb); 617 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 618 return; 619 } 620 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 621 eh = (struct ethhdr *)skb->data; 622 if (eh->h_proto == htons(ETH_P_FIP)) { 623 skb_pull(skb, sizeof(*eh)); 624 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { 625 dev_kfree_skb(skb); 626 continue; 627 } 628 /* 629 * If there's FLOGI rejects - clear all 630 * fcf's & restart from scratch 631 */ 632 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { 633 atomic64_inc( 634 &fnic_stats->vlan_stats.flogi_rejects); 635 shost_printk(KERN_INFO, fnic->lport->host, 636 "Trigger a Link down - VLAN Disc\n"); 637 fcoe_ctlr_link_down(&fnic->ctlr); 638 /* start FCoE VLAN discovery */ 639 fnic_fcoe_send_vlan_req(fnic); 640 dev_kfree_skb(skb); 641 continue; 642 } 643 fcoe_ctlr_recv(&fnic->ctlr, skb); 644 continue; 645 } 646 } 647 } 648 649 /** 650 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. 651 * @fnic: fnic instance. 652 * @skb: Ethernet Frame. 653 */ 654 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) 655 { 656 struct fc_frame *fp; 657 struct ethhdr *eh; 658 struct fcoe_hdr *fcoe_hdr; 659 struct fcoe_crc_eof *ft; 660 661 /* 662 * Undo VLAN encapsulation if present. 663 */ 664 eh = (struct ethhdr *)skb->data; 665 if (eh->h_proto == htons(ETH_P_8021Q)) { 666 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); 667 eh = skb_pull(skb, VLAN_HLEN); 668 skb_reset_mac_header(skb); 669 } 670 if (eh->h_proto == htons(ETH_P_FIP)) { 671 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { 672 printk(KERN_ERR "Dropped FIP frame, as firmware " 673 "uses non-FIP mode, Enable FIP " 674 "using UCSM\n"); 675 goto drop; 676 } 677 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, 678 FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { 679 printk(KERN_ERR "fnic ctlr frame trace error!!!"); 680 } 681 skb_queue_tail(&fnic->fip_frame_queue, skb); 682 queue_work(fnic_fip_queue, &fnic->fip_frame_work); 683 return 1; /* let caller know packet was used */ 684 } 685 if (eh->h_proto != htons(ETH_P_FCOE)) 686 goto drop; 687 skb_set_network_header(skb, sizeof(*eh)); 688 skb_pull(skb, sizeof(*eh)); 689 690 fcoe_hdr = (struct fcoe_hdr *)skb->data; 691 if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) 692 goto drop; 693 694 fp = (struct fc_frame *)skb; 695 fc_frame_init(fp); 696 fr_sof(fp) = fcoe_hdr->fcoe_sof; 697 skb_pull(skb, sizeof(struct fcoe_hdr)); 698 skb_reset_transport_header(skb); 699 700 ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); 701 fr_eof(fp) = ft->fcoe_eof; 702 skb_trim(skb, skb->len - sizeof(*ft)); 703 return 0; 704 drop: 705 dev_kfree_skb_irq(skb); 706 return -1; 707 } 708 709 /** 710 * fnic_update_mac_locked() - set data MAC address and filters. 711 * @fnic: fnic instance. 712 * @new: newly-assigned FCoE MAC address. 713 * 714 * Called with the fnic lock held. 715 */ 716 void fnic_update_mac_locked(struct fnic *fnic, u8 *new) 717 { 718 u8 *ctl = fnic->ctlr.ctl_src_addr; 719 u8 *data = fnic->data_src_addr; 720 721 if (is_zero_ether_addr(new)) 722 new = ctl; 723 if (ether_addr_equal(data, new)) 724 return; 725 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); 726 if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) 727 vnic_dev_del_addr(fnic->vdev, data); 728 memcpy(data, new, ETH_ALEN); 729 if (!ether_addr_equal(new, ctl)) 730 vnic_dev_add_addr(fnic->vdev, new); 731 } 732 733 /** 734 * fnic_update_mac() - set data MAC address and filters. 735 * @lport: local port. 736 * @new: newly-assigned FCoE MAC address. 737 */ 738 void fnic_update_mac(struct fc_lport *lport, u8 *new) 739 { 740 struct fnic *fnic = lport_priv(lport); 741 742 spin_lock_irq(&fnic->fnic_lock); 743 fnic_update_mac_locked(fnic, new); 744 spin_unlock_irq(&fnic->fnic_lock); 745 } 746 747 /** 748 * fnic_set_port_id() - set the port_ID after successful FLOGI. 749 * @lport: local port. 750 * @port_id: assigned FC_ID. 751 * @fp: received frame containing the FLOGI accept or NULL. 752 * 753 * This is called from libfc when a new FC_ID has been assigned. 754 * This causes us to reset the firmware to FC_MODE and setup the new MAC 755 * address and FC_ID. 756 * 757 * It is also called with FC_ID 0 when we're logged off. 758 * 759 * If the FC_ID is due to point-to-point, fp may be NULL. 760 */ 761 void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) 762 { 763 struct fnic *fnic = lport_priv(lport); 764 u8 *mac; 765 int ret; 766 767 FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n", 768 port_id, fp); 769 770 /* 771 * If we're clearing the FC_ID, change to use the ctl_src_addr. 772 * Set ethernet mode to send FLOGI. 773 */ 774 if (!port_id) { 775 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); 776 fnic_set_eth_mode(fnic); 777 return; 778 } 779 780 if (fp) { 781 mac = fr_cb(fp)->granted_mac; 782 if (is_zero_ether_addr(mac)) { 783 /* non-FIP - FLOGI already accepted - ignore return */ 784 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); 785 } 786 fnic_update_mac(lport, mac); 787 } 788 789 /* Change state to reflect transition to FC mode */ 790 spin_lock_irq(&fnic->fnic_lock); 791 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) 792 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; 793 else { 794 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 795 "Unexpected fnic state %s while" 796 " processing flogi resp\n", 797 fnic_state_to_str(fnic->state)); 798 spin_unlock_irq(&fnic->fnic_lock); 799 return; 800 } 801 spin_unlock_irq(&fnic->fnic_lock); 802 803 /* 804 * Send FLOGI registration to firmware to set up FC mode. 805 * The new address will be set up when registration completes. 806 */ 807 ret = fnic_flogi_reg_handler(fnic, port_id); 808 809 if (ret < 0) { 810 spin_lock_irq(&fnic->fnic_lock); 811 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) 812 fnic->state = FNIC_IN_ETH_MODE; 813 spin_unlock_irq(&fnic->fnic_lock); 814 } 815 } 816 817 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc 818 *cq_desc, struct vnic_rq_buf *buf, 819 int skipped __attribute__((unused)), 820 void *opaque) 821 { 822 struct fnic *fnic = vnic_dev_priv(rq->vdev); 823 struct sk_buff *skb; 824 struct fc_frame *fp; 825 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 826 unsigned int eth_hdrs_stripped; 827 u8 type, color, eop, sop, ingress_port, vlan_stripped; 828 u8 fcoe = 0, fcoe_sof, fcoe_eof; 829 u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; 830 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; 831 u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; 832 u8 fcs_ok = 1, packet_error = 0; 833 u16 q_number, completed_index, bytes_written = 0, vlan, checksum; 834 u32 rss_hash; 835 u16 exchange_id, tmpl; 836 u8 sof = 0; 837 u8 eof = 0; 838 u32 fcp_bytes_written = 0; 839 unsigned long flags; 840 841 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 842 PCI_DMA_FROMDEVICE); 843 skb = buf->os_buf; 844 fp = (struct fc_frame *)skb; 845 buf->os_buf = NULL; 846 847 cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); 848 if (type == CQ_DESC_TYPE_RQ_FCP) { 849 cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, 850 &type, &color, &q_number, &completed_index, 851 &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, 852 &tmpl, &fcp_bytes_written, &sof, &eof, 853 &ingress_port, &packet_error, 854 &fcoe_enc_error, &fcs_ok, &vlan_stripped, 855 &vlan); 856 eth_hdrs_stripped = 1; 857 skb_trim(skb, fcp_bytes_written); 858 fr_sof(fp) = sof; 859 fr_eof(fp) = eof; 860 861 } else if (type == CQ_DESC_TYPE_RQ_ENET) { 862 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, 863 &type, &color, &q_number, &completed_index, 864 &ingress_port, &fcoe, &eop, &sop, 865 &rss_type, &csum_not_calc, &rss_hash, 866 &bytes_written, &packet_error, 867 &vlan_stripped, &vlan, &checksum, 868 &fcoe_sof, &fcoe_fc_crc_ok, 869 &fcoe_enc_error, &fcoe_eof, 870 &tcp_udp_csum_ok, &udp, &tcp, 871 &ipv4_csum_ok, &ipv6, &ipv4, 872 &ipv4_fragment, &fcs_ok); 873 eth_hdrs_stripped = 0; 874 skb_trim(skb, bytes_written); 875 if (!fcs_ok) { 876 atomic64_inc(&fnic_stats->misc_stats.frame_errors); 877 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 878 "fcs error. dropping packet.\n"); 879 goto drop; 880 } 881 if (fnic_import_rq_eth_pkt(fnic, skb)) 882 return; 883 884 } else { 885 /* wrong CQ type*/ 886 shost_printk(KERN_ERR, fnic->lport->host, 887 "fnic rq_cmpl wrong cq type x%x\n", type); 888 goto drop; 889 } 890 891 if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { 892 atomic64_inc(&fnic_stats->misc_stats.frame_errors); 893 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 894 "fnic rq_cmpl fcoe x%x fcsok x%x" 895 " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" 896 " x%x\n", 897 fcoe, fcs_ok, packet_error, 898 fcoe_fc_crc_ok, fcoe_enc_error); 899 goto drop; 900 } 901 902 spin_lock_irqsave(&fnic->fnic_lock, flags); 903 if (fnic->stop_rx_link_events) { 904 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 905 goto drop; 906 } 907 fr_dev(fp) = fnic->lport; 908 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 909 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, 910 (char *)skb->data, skb->len)) != 0) { 911 printk(KERN_ERR "fnic ctlr frame trace error!!!"); 912 } 913 914 skb_queue_tail(&fnic->frame_queue, skb); 915 queue_work(fnic_event_queue, &fnic->frame_work); 916 917 return; 918 drop: 919 dev_kfree_skb_irq(skb); 920 } 921 922 static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, 923 struct cq_desc *cq_desc, u8 type, 924 u16 q_number, u16 completed_index, 925 void *opaque) 926 { 927 struct fnic *fnic = vnic_dev_priv(vdev); 928 929 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, 930 VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv, 931 NULL); 932 return 0; 933 } 934 935 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) 936 { 937 unsigned int tot_rq_work_done = 0, cur_work_done; 938 unsigned int i; 939 int err; 940 941 for (i = 0; i < fnic->rq_count; i++) { 942 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, 943 fnic_rq_cmpl_handler_cont, 944 NULL); 945 if (cur_work_done) { 946 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); 947 if (err) 948 shost_printk(KERN_ERR, fnic->lport->host, 949 "fnic_alloc_rq_frame can't alloc" 950 " frame\n"); 951 } 952 tot_rq_work_done += cur_work_done; 953 } 954 955 return tot_rq_work_done; 956 } 957 958 /* 959 * This function is called once at init time to allocate and fill RQ 960 * buffers. Subsequently, it is called in the interrupt context after RQ 961 * buffer processing to replenish the buffers in the RQ 962 */ 963 int fnic_alloc_rq_frame(struct vnic_rq *rq) 964 { 965 struct fnic *fnic = vnic_dev_priv(rq->vdev); 966 struct sk_buff *skb; 967 u16 len; 968 dma_addr_t pa; 969 int r; 970 971 len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; 972 skb = dev_alloc_skb(len); 973 if (!skb) { 974 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 975 "Unable to allocate RQ sk_buff\n"); 976 return -ENOMEM; 977 } 978 skb_reset_mac_header(skb); 979 skb_reset_transport_header(skb); 980 skb_reset_network_header(skb); 981 skb_put(skb, len); 982 pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); 983 984 if (pci_dma_mapping_error(fnic->pdev, pa)) { 985 r = -ENOMEM; 986 printk(KERN_ERR "PCI mapping failed with error %d\n", r); 987 goto free_skb; 988 } 989 990 fnic_queue_rq_desc(rq, skb, pa, len); 991 return 0; 992 993 free_skb: 994 kfree_skb(skb); 995 return r; 996 } 997 998 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) 999 { 1000 struct fc_frame *fp = buf->os_buf; 1001 struct fnic *fnic = vnic_dev_priv(rq->vdev); 1002 1003 pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, 1004 PCI_DMA_FROMDEVICE); 1005 1006 dev_kfree_skb(fp_skb(fp)); 1007 buf->os_buf = NULL; 1008 } 1009 1010 /** 1011 * fnic_eth_send() - Send Ethernet frame. 1012 * @fip: fcoe_ctlr instance. 1013 * @skb: Ethernet Frame, FIP, without VLAN encapsulation. 1014 */ 1015 void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) 1016 { 1017 struct fnic *fnic = fnic_from_ctlr(fip); 1018 struct vnic_wq *wq = &fnic->wq[0]; 1019 dma_addr_t pa; 1020 struct ethhdr *eth_hdr; 1021 struct vlan_ethhdr *vlan_hdr; 1022 unsigned long flags; 1023 int r; 1024 1025 if (!fnic->vlan_hw_insert) { 1026 eth_hdr = (struct ethhdr *)skb_mac_header(skb); 1027 vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr)); 1028 memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); 1029 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); 1030 vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; 1031 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); 1032 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, 1033 FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { 1034 printk(KERN_ERR "fnic ctlr frame trace error!!!"); 1035 } 1036 } else { 1037 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, 1038 FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { 1039 printk(KERN_ERR "fnic ctlr frame trace error!!!"); 1040 } 1041 } 1042 1043 pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 1044 1045 r = pci_dma_mapping_error(fnic->pdev, pa); 1046 if (r) { 1047 printk(KERN_ERR "PCI mapping failed with error %d\n", r); 1048 goto free_skb; 1049 } 1050 1051 spin_lock_irqsave(&fnic->wq_lock[0], flags); 1052 if (!vnic_wq_desc_avail(wq)) 1053 goto irq_restore; 1054 1055 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, 1056 0 /* hw inserts cos value */, 1057 fnic->vlan_id, 1); 1058 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 1059 return; 1060 1061 irq_restore: 1062 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 1063 pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); 1064 free_skb: 1065 kfree_skb(skb); 1066 } 1067 1068 /* 1069 * Send FC frame. 1070 */ 1071 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) 1072 { 1073 struct vnic_wq *wq = &fnic->wq[0]; 1074 struct sk_buff *skb; 1075 dma_addr_t pa; 1076 struct ethhdr *eth_hdr; 1077 struct vlan_ethhdr *vlan_hdr; 1078 struct fcoe_hdr *fcoe_hdr; 1079 struct fc_frame_header *fh; 1080 u32 tot_len, eth_hdr_len; 1081 int ret = 0; 1082 unsigned long flags; 1083 1084 fh = fc_frame_header_get(fp); 1085 skb = fp_skb(fp); 1086 1087 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && 1088 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) 1089 return 0; 1090 1091 if (!fnic->vlan_hw_insert) { 1092 eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); 1093 vlan_hdr = skb_push(skb, eth_hdr_len); 1094 eth_hdr = (struct ethhdr *)vlan_hdr; 1095 vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); 1096 vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); 1097 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); 1098 fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); 1099 } else { 1100 eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); 1101 eth_hdr = skb_push(skb, eth_hdr_len); 1102 eth_hdr->h_proto = htons(ETH_P_FCOE); 1103 fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); 1104 } 1105 1106 if (fnic->ctlr.map_dest) 1107 fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); 1108 else 1109 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); 1110 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); 1111 1112 tot_len = skb->len; 1113 BUG_ON(tot_len % 4); 1114 1115 memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); 1116 fcoe_hdr->fcoe_sof = fr_sof(fp); 1117 if (FC_FCOE_VER) 1118 FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); 1119 1120 pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); 1121 1122 if (pci_dma_mapping_error(fnic->pdev, pa)) { 1123 ret = -ENOMEM; 1124 printk(KERN_ERR "DMA map failed with error %d\n", ret); 1125 goto free_skb_on_err; 1126 } 1127 1128 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, 1129 (char *)eth_hdr, tot_len)) != 0) { 1130 printk(KERN_ERR "fnic ctlr frame trace error!!!"); 1131 } 1132 1133 spin_lock_irqsave(&fnic->wq_lock[0], flags); 1134 1135 if (!vnic_wq_desc_avail(wq)) { 1136 pci_unmap_single(fnic->pdev, pa, 1137 tot_len, PCI_DMA_TODEVICE); 1138 ret = -1; 1139 goto irq_restore; 1140 } 1141 1142 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), 1143 0 /* hw inserts cos value */, 1144 fnic->vlan_id, 1, 1, 1); 1145 1146 irq_restore: 1147 spin_unlock_irqrestore(&fnic->wq_lock[0], flags); 1148 1149 free_skb_on_err: 1150 if (ret) 1151 dev_kfree_skb_any(fp_skb(fp)); 1152 1153 return ret; 1154 } 1155 1156 /* 1157 * fnic_send 1158 * Routine to send a raw frame 1159 */ 1160 int fnic_send(struct fc_lport *lp, struct fc_frame *fp) 1161 { 1162 struct fnic *fnic = lport_priv(lp); 1163 unsigned long flags; 1164 1165 if (fnic->in_remove) { 1166 dev_kfree_skb(fp_skb(fp)); 1167 return -1; 1168 } 1169 1170 /* 1171 * Queue frame if in a transitional state. 1172 * This occurs while registering the Port_ID / MAC address after FLOGI. 1173 */ 1174 spin_lock_irqsave(&fnic->fnic_lock, flags); 1175 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { 1176 skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); 1177 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1178 return 0; 1179 } 1180 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1181 1182 return fnic_send_frame(fnic, fp); 1183 } 1184 1185 /** 1186 * fnic_flush_tx() - send queued frames. 1187 * @fnic: fnic device 1188 * 1189 * Send frames that were waiting to go out in FC or Ethernet mode. 1190 * Whenever changing modes we purge queued frames, so these frames should 1191 * be queued for the stable mode that we're in, either FC or Ethernet. 1192 * 1193 * Called without fnic_lock held. 1194 */ 1195 void fnic_flush_tx(struct fnic *fnic) 1196 { 1197 struct sk_buff *skb; 1198 struct fc_frame *fp; 1199 1200 while ((skb = skb_dequeue(&fnic->tx_queue))) { 1201 fp = (struct fc_frame *)skb; 1202 fnic_send_frame(fnic, fp); 1203 } 1204 } 1205 1206 /** 1207 * fnic_set_eth_mode() - put fnic into ethernet mode. 1208 * @fnic: fnic device 1209 * 1210 * Called without fnic lock held. 1211 */ 1212 static void fnic_set_eth_mode(struct fnic *fnic) 1213 { 1214 unsigned long flags; 1215 enum fnic_state old_state; 1216 int ret; 1217 1218 spin_lock_irqsave(&fnic->fnic_lock, flags); 1219 again: 1220 old_state = fnic->state; 1221 switch (old_state) { 1222 case FNIC_IN_FC_MODE: 1223 case FNIC_IN_ETH_TRANS_FC_MODE: 1224 default: 1225 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; 1226 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1227 1228 ret = fnic_fw_reset_handler(fnic); 1229 1230 spin_lock_irqsave(&fnic->fnic_lock, flags); 1231 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) 1232 goto again; 1233 if (ret) 1234 fnic->state = old_state; 1235 break; 1236 1237 case FNIC_IN_FC_TRANS_ETH_MODE: 1238 case FNIC_IN_ETH_MODE: 1239 break; 1240 } 1241 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1242 } 1243 1244 static void fnic_wq_complete_frame_send(struct vnic_wq *wq, 1245 struct cq_desc *cq_desc, 1246 struct vnic_wq_buf *buf, void *opaque) 1247 { 1248 struct sk_buff *skb = buf->os_buf; 1249 struct fc_frame *fp = (struct fc_frame *)skb; 1250 struct fnic *fnic = vnic_dev_priv(wq->vdev); 1251 1252 pci_unmap_single(fnic->pdev, buf->dma_addr, 1253 buf->len, PCI_DMA_TODEVICE); 1254 dev_kfree_skb_irq(fp_skb(fp)); 1255 buf->os_buf = NULL; 1256 } 1257 1258 static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev, 1259 struct cq_desc *cq_desc, u8 type, 1260 u16 q_number, u16 completed_index, 1261 void *opaque) 1262 { 1263 struct fnic *fnic = vnic_dev_priv(vdev); 1264 unsigned long flags; 1265 1266 spin_lock_irqsave(&fnic->wq_lock[q_number], flags); 1267 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, 1268 fnic_wq_complete_frame_send, NULL); 1269 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags); 1270 1271 return 0; 1272 } 1273 1274 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) 1275 { 1276 unsigned int wq_work_done = 0; 1277 unsigned int i; 1278 1279 for (i = 0; i < fnic->raw_wq_count; i++) { 1280 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i], 1281 work_to_do, 1282 fnic_wq_cmpl_handler_cont, 1283 NULL); 1284 } 1285 1286 return wq_work_done; 1287 } 1288 1289 1290 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) 1291 { 1292 struct fc_frame *fp = buf->os_buf; 1293 struct fnic *fnic = vnic_dev_priv(wq->vdev); 1294 1295 pci_unmap_single(fnic->pdev, buf->dma_addr, 1296 buf->len, PCI_DMA_TODEVICE); 1297 1298 dev_kfree_skb(fp_skb(fp)); 1299 buf->os_buf = NULL; 1300 } 1301 1302 void fnic_fcoe_reset_vlans(struct fnic *fnic) 1303 { 1304 unsigned long flags; 1305 struct fcoe_vlan *vlan; 1306 struct fcoe_vlan *next; 1307 1308 /* 1309 * indicate a link down to fcoe so that all fcf's are free'd 1310 * might not be required since we did this before sending vlan 1311 * discovery request 1312 */ 1313 spin_lock_irqsave(&fnic->vlans_lock, flags); 1314 if (!list_empty(&fnic->vlans)) { 1315 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { 1316 list_del(&vlan->list); 1317 kfree(vlan); 1318 } 1319 } 1320 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1321 } 1322 1323 void fnic_handle_fip_timer(struct fnic *fnic) 1324 { 1325 unsigned long flags; 1326 struct fcoe_vlan *vlan; 1327 struct fnic_stats *fnic_stats = &fnic->fnic_stats; 1328 u64 sol_time; 1329 1330 spin_lock_irqsave(&fnic->fnic_lock, flags); 1331 if (fnic->stop_rx_link_events) { 1332 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1333 return; 1334 } 1335 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 1336 1337 if (fnic->ctlr.mode == FIP_MODE_NON_FIP) 1338 return; 1339 1340 spin_lock_irqsave(&fnic->vlans_lock, flags); 1341 if (list_empty(&fnic->vlans)) { 1342 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1343 /* no vlans available, try again */ 1344 if (printk_ratelimit()) 1345 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 1346 "Start VLAN Discovery\n"); 1347 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); 1348 return; 1349 } 1350 1351 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); 1352 shost_printk(KERN_DEBUG, fnic->lport->host, 1353 "fip_timer: vlan %d state %d sol_count %d\n", 1354 vlan->vid, vlan->state, vlan->sol_count); 1355 switch (vlan->state) { 1356 case FIP_VLAN_USED: 1357 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 1358 "FIP VLAN is selected for FC transaction\n"); 1359 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1360 break; 1361 case FIP_VLAN_FAILED: 1362 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1363 /* if all vlans are in failed state, restart vlan disc */ 1364 if (printk_ratelimit()) 1365 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 1366 "Start VLAN Discovery\n"); 1367 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); 1368 break; 1369 case FIP_VLAN_SENT: 1370 if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { 1371 /* 1372 * no response on this vlan, remove from the list. 1373 * Try the next vlan 1374 */ 1375 shost_printk(KERN_INFO, fnic->lport->host, 1376 "Dequeue this VLAN ID %d from list\n", 1377 vlan->vid); 1378 list_del(&vlan->list); 1379 kfree(vlan); 1380 vlan = NULL; 1381 if (list_empty(&fnic->vlans)) { 1382 /* we exhausted all vlans, restart vlan disc */ 1383 spin_unlock_irqrestore(&fnic->vlans_lock, 1384 flags); 1385 shost_printk(KERN_INFO, fnic->lport->host, 1386 "fip_timer: vlan list empty, " 1387 "trigger vlan disc\n"); 1388 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); 1389 return; 1390 } 1391 /* check the next vlan */ 1392 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, 1393 list); 1394 fnic->set_vlan(fnic, vlan->vid); 1395 vlan->state = FIP_VLAN_SENT; /* sent now */ 1396 } 1397 spin_unlock_irqrestore(&fnic->vlans_lock, flags); 1398 atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); 1399 vlan->sol_count++; 1400 sol_time = jiffies + msecs_to_jiffies 1401 (FCOE_CTLR_START_DELAY); 1402 mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); 1403 break; 1404 } 1405 } 1406