1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic FCoE Offload Driver 4 * Copyright (c) 2016-2018 Cavium Inc. 5 */ 6 #include <linux/init.h> 7 #include <linux/kernel.h> 8 #include <linux/module.h> 9 #include <linux/pci.h> 10 #include <linux/device.h> 11 #include <linux/highmem.h> 12 #include <linux/crc32.h> 13 #include <linux/interrupt.h> 14 #include <linux/list.h> 15 #include <linux/kthread.h> 16 #include <scsi/libfc.h> 17 #include <scsi/scsi_host.h> 18 #include <scsi/fc_frame.h> 19 #include <linux/if_ether.h> 20 #include <linux/if_vlan.h> 21 #include <linux/cpu.h> 22 #include "qedf.h" 23 #include "qedf_dbg.h" 24 #include <uapi/linux/pci_regs.h> 25 26 const struct qed_fcoe_ops *qed_ops; 27 28 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); 29 static void qedf_remove(struct pci_dev *pdev); 30 static void qedf_shutdown(struct pci_dev *pdev); 31 static void qedf_schedule_recovery_handler(void *dev); 32 static void qedf_recovery_handler(struct work_struct *work); 33 34 /* 35 * Driver module parameters. 36 */ 37 static unsigned int qedf_dev_loss_tmo = 60; 38 module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO); 39 MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached " 40 "remote ports (default 60)"); 41 42 uint qedf_debug = QEDF_LOG_INFO; 43 module_param_named(debug, qedf_debug, uint, S_IRUGO); 44 MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging" 45 " mask"); 46 47 static uint qedf_fipvlan_retries = 60; 48 module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO); 49 MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt " 50 "before giving up (default 60)"); 51 52 static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN; 53 module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO); 54 MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails " 55 "(default 1002)."); 56 57 static int qedf_default_prio = -1; 58 module_param_named(default_prio, qedf_default_prio, int, S_IRUGO); 59 MODULE_PARM_DESC(default_prio, " Override 802.1q priority for FIP and FCoE" 60 " traffic (value between 0 and 7, default 3)."); 61 62 uint qedf_dump_frames; 63 module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR); 64 MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames " 65 "(default off)"); 66 67 static uint qedf_queue_depth; 68 module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO); 69 MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered " 70 "by the qedf driver. Default is 0 (use OS default)."); 71 72 uint qedf_io_tracing; 73 module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR); 74 MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions " 75 "into trace buffer. (default off)."); 76 77 static uint qedf_max_lun = MAX_FIBRE_LUNS; 78 module_param_named(max_lun, qedf_max_lun, int, S_IRUGO); 79 MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver " 80 "supports. (default 0xffffffff)"); 81 82 uint qedf_link_down_tmo; 83 module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO); 84 MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the " 85 "link is down by N seconds."); 86 87 bool qedf_retry_delay; 88 module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR); 89 MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry " 90 "delay handling (default off)."); 91 92 static bool qedf_dcbx_no_wait; 93 module_param_named(dcbx_no_wait, qedf_dcbx_no_wait, bool, S_IRUGO | S_IWUSR); 94 MODULE_PARM_DESC(dcbx_no_wait, " Do not wait for DCBX convergence to start " 95 "sending FIP VLAN requests on link up (Default: off)."); 96 97 static uint qedf_dp_module; 98 module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO); 99 MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed " 100 "qed module during probe."); 101 102 static uint qedf_dp_level = QED_LEVEL_NOTICE; 103 module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO); 104 MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module " 105 "during probe (0-3: 0 more verbose)."); 106 107 struct workqueue_struct *qedf_io_wq; 108 109 static struct fcoe_percpu_s qedf_global; 110 static DEFINE_SPINLOCK(qedf_global_lock); 111 112 static struct kmem_cache *qedf_io_work_cache; 113 114 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) 115 { 116 int vlan_id_tmp = 0; 117 118 vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT); 119 qedf->vlan_id = vlan_id_tmp; 120 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 121 "Setting vlan_id=0x%04x prio=%d.\n", 122 vlan_id_tmp, qedf->prio); 123 } 124 125 /* Returns true if we have a valid vlan, false otherwise */ 126 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) 127 { 128 129 while (qedf->fipvlan_retries--) { 130 /* This is to catch if link goes down during fipvlan retries */ 131 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { 132 QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n"); 133 return false; 134 } 135 136 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { 137 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); 138 return false; 139 } 140 141 if (qedf->vlan_id > 0) { 142 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 143 "vlan = 0x%x already set, calling ctlr_link_up.\n", 144 qedf->vlan_id); 145 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) 146 fcoe_ctlr_link_up(&qedf->ctlr); 147 return true; 148 } 149 150 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 151 "Retry %d.\n", qedf->fipvlan_retries); 152 init_completion(&qedf->fipvlan_compl); 153 qedf_fcoe_send_vlan_req(qedf); 154 wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ); 155 } 156 157 return false; 158 } 159 160 static void qedf_handle_link_update(struct work_struct *work) 161 { 162 struct qedf_ctx *qedf = 163 container_of(work, struct qedf_ctx, link_update.work); 164 int rc; 165 166 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n", 167 atomic_read(&qedf->link_state)); 168 169 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { 170 rc = qedf_initiate_fipvlan_req(qedf); 171 if (rc) 172 return; 173 174 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { 175 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 176 "Link is down, resetting vlan_id.\n"); 177 qedf->vlan_id = 0; 178 return; 179 } 180 181 /* 182 * If we get here then we never received a repsonse to our 183 * fip vlan request so set the vlan_id to the default and 184 * tell FCoE that the link is up 185 */ 186 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " 187 "response, falling back to default VLAN %d.\n", 188 qedf_fallback_vlan); 189 qedf_set_vlan_id(qedf, qedf_fallback_vlan); 190 191 /* 192 * Zero out data_src_addr so we'll update it with the new 193 * lport port_id 194 */ 195 eth_zero_addr(qedf->data_src_addr); 196 fcoe_ctlr_link_up(&qedf->ctlr); 197 } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { 198 /* 199 * If we hit here and link_down_tmo_valid is still 1 it means 200 * that link_down_tmo timed out so set it to 0 to make sure any 201 * other readers have accurate state. 202 */ 203 atomic_set(&qedf->link_down_tmo_valid, 0); 204 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 205 "Calling fcoe_ctlr_link_down().\n"); 206 fcoe_ctlr_link_down(&qedf->ctlr); 207 if (qedf_wait_for_upload(qedf) == false) 208 QEDF_ERR(&qedf->dbg_ctx, 209 "Could not upload all sessions.\n"); 210 /* Reset the number of FIP VLAN retries */ 211 qedf->fipvlan_retries = qedf_fipvlan_retries; 212 } 213 } 214 215 #define QEDF_FCOE_MAC_METHOD_GRANGED_MAC 1 216 #define QEDF_FCOE_MAC_METHOD_FCF_MAP 2 217 #define QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC 3 218 static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp) 219 { 220 u8 *granted_mac; 221 struct fc_frame_header *fh = fc_frame_header_get(fp); 222 u8 fc_map[3]; 223 int method = 0; 224 225 /* Get granted MAC address from FIP FLOGI payload */ 226 granted_mac = fr_cb(fp)->granted_mac; 227 228 /* 229 * We set the source MAC for FCoE traffic based on the Granted MAC 230 * address from the switch. 231 * 232 * If granted_mac is non-zero, we used that. 233 * If the granted_mac is zeroed out, created the FCoE MAC based on 234 * the sel_fcf->fc_map and the d_id fo the FLOGI frame. 235 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the 236 * d_id of the FLOGI frame. 237 */ 238 if (!is_zero_ether_addr(granted_mac)) { 239 ether_addr_copy(qedf->data_src_addr, granted_mac); 240 method = QEDF_FCOE_MAC_METHOD_GRANGED_MAC; 241 } else if (qedf->ctlr.sel_fcf->fc_map != 0) { 242 hton24(fc_map, qedf->ctlr.sel_fcf->fc_map); 243 qedf->data_src_addr[0] = fc_map[0]; 244 qedf->data_src_addr[1] = fc_map[1]; 245 qedf->data_src_addr[2] = fc_map[2]; 246 qedf->data_src_addr[3] = fh->fh_d_id[0]; 247 qedf->data_src_addr[4] = fh->fh_d_id[1]; 248 qedf->data_src_addr[5] = fh->fh_d_id[2]; 249 method = QEDF_FCOE_MAC_METHOD_FCF_MAP; 250 } else { 251 fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id); 252 method = QEDF_FCOE_MAC_METHOD_FCOE_SET_MAC; 253 } 254 255 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 256 "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method); 257 } 258 259 static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, 260 void *arg) 261 { 262 struct fc_exch *exch = fc_seq_exch(seq); 263 struct fc_lport *lport = exch->lp; 264 struct qedf_ctx *qedf = lport_priv(lport); 265 266 if (!qedf) { 267 QEDF_ERR(NULL, "qedf is NULL.\n"); 268 return; 269 } 270 271 /* 272 * If ERR_PTR is set then don't try to stat anything as it will cause 273 * a crash when we access fp. 274 */ 275 if (IS_ERR(fp)) { 276 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 277 "fp has IS_ERR() set.\n"); 278 goto skip_stat; 279 } 280 281 /* Log stats for FLOGI reject */ 282 if (fc_frame_payload_op(fp) == ELS_LS_RJT) 283 qedf->flogi_failed++; 284 else if (fc_frame_payload_op(fp) == ELS_LS_ACC) { 285 /* Set the source MAC we will use for FCoE traffic */ 286 qedf_set_data_src_addr(qedf, fp); 287 qedf->flogi_pending = 0; 288 } 289 290 /* Complete flogi_compl so we can proceed to sending ADISCs */ 291 complete(&qedf->flogi_compl); 292 293 skip_stat: 294 /* Report response to libfc */ 295 fc_lport_flogi_resp(seq, fp, lport); 296 } 297 298 static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did, 299 struct fc_frame *fp, unsigned int op, 300 void (*resp)(struct fc_seq *, 301 struct fc_frame *, 302 void *), 303 void *arg, u32 timeout) 304 { 305 struct qedf_ctx *qedf = lport_priv(lport); 306 307 /* 308 * Intercept FLOGI for statistic purposes. Note we use the resp 309 * callback to tell if this is really a flogi. 310 */ 311 if (resp == fc_lport_flogi_resp) { 312 qedf->flogi_cnt++; 313 if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { 314 schedule_delayed_work(&qedf->stag_work, 2); 315 return NULL; 316 } 317 qedf->flogi_pending++; 318 return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp, 319 arg, timeout); 320 } 321 322 return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); 323 } 324 325 int qedf_send_flogi(struct qedf_ctx *qedf) 326 { 327 struct fc_lport *lport; 328 struct fc_frame *fp; 329 330 lport = qedf->lport; 331 332 if (!lport->tt.elsct_send) { 333 QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n"); 334 return -EINVAL; 335 } 336 337 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); 338 if (!fp) { 339 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n"); 340 return -ENOMEM; 341 } 342 343 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, 344 "Sending FLOGI to reestablish session with switch.\n"); 345 lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, 346 ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov); 347 348 init_completion(&qedf->flogi_compl); 349 350 return 0; 351 } 352 353 /* 354 * This function is called if link_down_tmo is in use. If we get a link up and 355 * link_down_tmo has not expired then use just FLOGI/ADISC to recover our 356 * sessions with targets. Otherwise, just call fcoe_ctlr_link_up(). 357 */ 358 static void qedf_link_recovery(struct work_struct *work) 359 { 360 struct qedf_ctx *qedf = 361 container_of(work, struct qedf_ctx, link_recovery.work); 362 struct fc_lport *lport = qedf->lport; 363 struct fc_rport_priv *rdata; 364 bool rc; 365 int retries = 30; 366 int rval, i; 367 struct list_head rdata_login_list; 368 369 INIT_LIST_HEAD(&rdata_login_list); 370 371 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 372 "Link down tmo did not expire.\n"); 373 374 /* 375 * Essentially reset the fcoe_ctlr here without affecting the state 376 * of the libfc structs. 377 */ 378 qedf->ctlr.state = FIP_ST_LINK_WAIT; 379 fcoe_ctlr_link_down(&qedf->ctlr); 380 381 /* 382 * Bring the link up before we send the fipvlan request so libfcoe 383 * can select a new fcf in parallel 384 */ 385 fcoe_ctlr_link_up(&qedf->ctlr); 386 387 /* Since the link when down and up to verify which vlan we're on */ 388 qedf->fipvlan_retries = qedf_fipvlan_retries; 389 rc = qedf_initiate_fipvlan_req(qedf); 390 /* If getting the VLAN fails, set the VLAN to the fallback one */ 391 if (!rc) 392 qedf_set_vlan_id(qedf, qedf_fallback_vlan); 393 394 /* 395 * We need to wait for an FCF to be selected due to the 396 * fcoe_ctlr_link_up other the FLOGI will be rejected. 397 */ 398 while (retries > 0) { 399 if (qedf->ctlr.sel_fcf) { 400 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 401 "FCF reselected, proceeding with FLOGI.\n"); 402 break; 403 } 404 msleep(500); 405 retries--; 406 } 407 408 if (retries < 1) { 409 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for " 410 "FCF selection.\n"); 411 return; 412 } 413 414 rval = qedf_send_flogi(qedf); 415 if (rval) 416 return; 417 418 /* Wait for FLOGI completion before proceeding with sending ADISCs */ 419 i = wait_for_completion_timeout(&qedf->flogi_compl, 420 qedf->lport->r_a_tov); 421 if (i == 0) { 422 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n"); 423 return; 424 } 425 426 /* 427 * Call lport->tt.rport_login which will cause libfc to send an 428 * ADISC since the rport is in state ready. 429 */ 430 mutex_lock(&lport->disc.disc_mutex); 431 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { 432 if (kref_get_unless_zero(&rdata->kref)) { 433 fc_rport_login(rdata); 434 kref_put(&rdata->kref, fc_rport_destroy); 435 } 436 } 437 mutex_unlock(&lport->disc.disc_mutex); 438 } 439 440 static void qedf_update_link_speed(struct qedf_ctx *qedf, 441 struct qed_link_output *link) 442 { 443 struct fc_lport *lport = qedf->lport; 444 445 lport->link_speed = FC_PORTSPEED_UNKNOWN; 446 lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; 447 448 /* Set fc_host link speed */ 449 switch (link->speed) { 450 case 10000: 451 lport->link_speed = FC_PORTSPEED_10GBIT; 452 break; 453 case 25000: 454 lport->link_speed = FC_PORTSPEED_25GBIT; 455 break; 456 case 40000: 457 lport->link_speed = FC_PORTSPEED_40GBIT; 458 break; 459 case 50000: 460 lport->link_speed = FC_PORTSPEED_50GBIT; 461 break; 462 case 100000: 463 lport->link_speed = FC_PORTSPEED_100GBIT; 464 break; 465 case 20000: 466 lport->link_speed = FC_PORTSPEED_20GBIT; 467 break; 468 default: 469 lport->link_speed = FC_PORTSPEED_UNKNOWN; 470 break; 471 } 472 473 /* 474 * Set supported link speed by querying the supported 475 * capabilities of the link. 476 */ 477 if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) || 478 (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) || 479 (link->supported_caps & QED_LM_10000baseR_FEC_BIT) || 480 (link->supported_caps & QED_LM_10000baseCR_Full_BIT) || 481 (link->supported_caps & QED_LM_10000baseSR_Full_BIT) || 482 (link->supported_caps & QED_LM_10000baseLR_Full_BIT) || 483 (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) || 484 (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) { 485 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; 486 } 487 if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) || 488 (link->supported_caps & QED_LM_25000baseCR_Full_BIT) || 489 (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) { 490 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; 491 } 492 if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) || 493 (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) || 494 (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) || 495 (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) { 496 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; 497 } 498 if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) || 499 (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) || 500 (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) { 501 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; 502 } 503 if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) || 504 (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) || 505 (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) || 506 (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) { 507 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; 508 } 509 if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT) 510 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; 511 fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; 512 } 513 514 static void qedf_bw_update(void *dev) 515 { 516 struct qedf_ctx *qedf = (struct qedf_ctx *)dev; 517 struct qed_link_output link; 518 519 /* Get the latest status of the link */ 520 qed_ops->common->get_link(qedf->cdev, &link); 521 522 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { 523 QEDF_ERR(&qedf->dbg_ctx, 524 "Ignore link update, driver getting unload.\n"); 525 return; 526 } 527 528 if (link.link_up) { 529 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) 530 qedf_update_link_speed(qedf, &link); 531 else 532 QEDF_ERR(&qedf->dbg_ctx, 533 "Ignore bw update, link is down.\n"); 534 535 } else { 536 QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n"); 537 } 538 } 539 540 static void qedf_link_update(void *dev, struct qed_link_output *link) 541 { 542 struct qedf_ctx *qedf = (struct qedf_ctx *)dev; 543 544 /* 545 * Prevent race where we're removing the module and we get link update 546 * for qed. 547 */ 548 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { 549 QEDF_ERR(&qedf->dbg_ctx, 550 "Ignore link update, driver getting unload.\n"); 551 return; 552 } 553 554 if (link->link_up) { 555 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { 556 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC, 557 "Ignoring link up event as link is already up.\n"); 558 return; 559 } 560 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n", 561 link->speed / 1000); 562 563 /* Cancel any pending link down work */ 564 cancel_delayed_work(&qedf->link_update); 565 566 atomic_set(&qedf->link_state, QEDF_LINK_UP); 567 qedf_update_link_speed(qedf, link); 568 569 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE || 570 qedf_dcbx_no_wait) { 571 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 572 "DCBx done.\n"); 573 if (atomic_read(&qedf->link_down_tmo_valid) > 0) 574 queue_delayed_work(qedf->link_update_wq, 575 &qedf->link_recovery, 0); 576 else 577 queue_delayed_work(qedf->link_update_wq, 578 &qedf->link_update, 0); 579 atomic_set(&qedf->link_down_tmo_valid, 0); 580 } 581 582 } else { 583 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n"); 584 585 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); 586 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); 587 /* 588 * Flag that we're waiting for the link to come back up before 589 * informing the fcoe layer of the event. 590 */ 591 if (qedf_link_down_tmo > 0) { 592 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 593 "Starting link down tmo.\n"); 594 atomic_set(&qedf->link_down_tmo_valid, 1); 595 } 596 qedf->vlan_id = 0; 597 qedf_update_link_speed(qedf, link); 598 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 599 qedf_link_down_tmo * HZ); 600 } 601 } 602 603 604 static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type) 605 { 606 struct qedf_ctx *qedf = (struct qedf_ctx *)dev; 607 u8 tmp_prio; 608 609 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe " 610 "prio=%d.\n", get->operational.valid, get->operational.enabled, 611 get->operational.app_prio.fcoe); 612 613 if (get->operational.enabled && get->operational.valid) { 614 /* If DCBX was already negotiated on link up then just exit */ 615 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { 616 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 617 "DCBX already set on link up.\n"); 618 return; 619 } 620 621 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE); 622 623 /* 624 * Set the 8021q priority in the following manner: 625 * 626 * 1. If a modparam is set use that 627 * 2. If the value is not between 0..7 use the default 628 * 3. Use the priority we get from the DCBX app tag 629 */ 630 tmp_prio = get->operational.app_prio.fcoe; 631 if (qedf_default_prio > -1) 632 qedf->prio = qedf_default_prio; 633 else if (tmp_prio > 7) { 634 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 635 "FIP/FCoE prio %d out of range, setting to %d.\n", 636 tmp_prio, QEDF_DEFAULT_PRIO); 637 qedf->prio = QEDF_DEFAULT_PRIO; 638 } else 639 qedf->prio = tmp_prio; 640 641 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP && 642 !qedf_dcbx_no_wait) { 643 if (atomic_read(&qedf->link_down_tmo_valid) > 0) 644 queue_delayed_work(qedf->link_update_wq, 645 &qedf->link_recovery, 0); 646 else 647 queue_delayed_work(qedf->link_update_wq, 648 &qedf->link_update, 0); 649 atomic_set(&qedf->link_down_tmo_valid, 0); 650 } 651 } 652 653 } 654 655 static u32 qedf_get_login_failures(void *cookie) 656 { 657 struct qedf_ctx *qedf; 658 659 qedf = (struct qedf_ctx *)cookie; 660 return qedf->flogi_failed; 661 } 662 663 static struct qed_fcoe_cb_ops qedf_cb_ops = { 664 { 665 .link_update = qedf_link_update, 666 .bw_update = qedf_bw_update, 667 .schedule_recovery_handler = qedf_schedule_recovery_handler, 668 .dcbx_aen = qedf_dcbx_handler, 669 .get_generic_tlv_data = qedf_get_generic_tlv_data, 670 .get_protocol_tlv_data = qedf_get_protocol_tlv_data, 671 } 672 }; 673 674 /* 675 * Various transport templates. 676 */ 677 678 static struct scsi_transport_template *qedf_fc_transport_template; 679 static struct scsi_transport_template *qedf_fc_vport_transport_template; 680 681 /* 682 * SCSI EH handlers 683 */ 684 static int qedf_eh_abort(struct scsi_cmnd *sc_cmd) 685 { 686 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 687 struct fc_lport *lport; 688 struct qedf_ctx *qedf; 689 struct qedf_ioreq *io_req; 690 struct fc_rport_libfc_priv *rp = rport->dd_data; 691 struct fc_rport_priv *rdata; 692 struct qedf_rport *fcport = NULL; 693 int rc = FAILED; 694 int wait_count = 100; 695 int refcount = 0; 696 int rval; 697 int got_ref = 0; 698 699 lport = shost_priv(sc_cmd->device->host); 700 qedf = (struct qedf_ctx *)lport_priv(lport); 701 702 /* rport and tgt are allocated together, so tgt should be non-NULL */ 703 fcport = (struct qedf_rport *)&rp[1]; 704 rdata = fcport->rdata; 705 if (!rdata || !kref_get_unless_zero(&rdata->kref)) { 706 QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); 707 rc = 1; 708 goto out; 709 } 710 711 712 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr; 713 if (!io_req) { 714 QEDF_ERR(&qedf->dbg_ctx, 715 "sc_cmd not queued with lld, sc_cmd=%p op=0x%02x, port_id=%06x\n", 716 sc_cmd, sc_cmd->cmnd[0], 717 rdata->ids.port_id); 718 rc = SUCCESS; 719 goto drop_rdata_kref; 720 } 721 722 rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */ 723 if (rval) 724 got_ref = 1; 725 726 /* If we got a valid io_req, confirm it belongs to this sc_cmd. */ 727 if (!rval || io_req->sc_cmd != sc_cmd) { 728 QEDF_ERR(&qedf->dbg_ctx, 729 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n", 730 io_req->sc_cmd, sc_cmd, rdata->ids.port_id); 731 732 goto drop_rdata_kref; 733 } 734 735 if (fc_remote_port_chkready(rport)) { 736 refcount = kref_read(&io_req->refcount); 737 QEDF_ERR(&qedf->dbg_ctx, 738 "rport not ready, io_req=%p, xid=0x%x sc_cmd=%p op=0x%02x, refcount=%d, port_id=%06x\n", 739 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0], 740 refcount, rdata->ids.port_id); 741 742 goto drop_rdata_kref; 743 } 744 745 rc = fc_block_scsi_eh(sc_cmd); 746 if (rc) 747 goto drop_rdata_kref; 748 749 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { 750 QEDF_ERR(&qedf->dbg_ctx, 751 "Connection uploading, xid=0x%x., port_id=%06x\n", 752 io_req->xid, rdata->ids.port_id); 753 while (io_req->sc_cmd && (wait_count != 0)) { 754 msleep(100); 755 wait_count--; 756 } 757 if (wait_count) { 758 QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n"); 759 rc = SUCCESS; 760 } else { 761 QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n"); 762 rc = FAILED; 763 } 764 goto drop_rdata_kref; 765 } 766 767 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 768 QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n"); 769 goto drop_rdata_kref; 770 } 771 772 QEDF_ERR(&qedf->dbg_ctx, 773 "Aborting io_req=%p sc_cmd=%p xid=0x%x fp_idx=%d, port_id=%06x.\n", 774 io_req, sc_cmd, io_req->xid, io_req->fp_idx, 775 rdata->ids.port_id); 776 777 if (qedf->stop_io_on_error) { 778 qedf_stop_all_io(qedf); 779 rc = SUCCESS; 780 goto drop_rdata_kref; 781 } 782 783 init_completion(&io_req->abts_done); 784 rval = qedf_initiate_abts(io_req, true); 785 if (rval) { 786 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); 787 /* 788 * If we fail to queue the ABTS then return this command to 789 * the SCSI layer as it will own and free the xid 790 */ 791 rc = SUCCESS; 792 qedf_scsi_done(qedf, io_req, DID_ERROR); 793 goto drop_rdata_kref; 794 } 795 796 wait_for_completion(&io_req->abts_done); 797 798 if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS || 799 io_req->event == QEDF_IOREQ_EV_ABORT_FAILED || 800 io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { 801 /* 802 * If we get a reponse to the abort this is success from 803 * the perspective that all references to the command have 804 * been removed from the driver and firmware 805 */ 806 rc = SUCCESS; 807 } else { 808 /* If the abort and cleanup failed then return a failure */ 809 rc = FAILED; 810 } 811 812 if (rc == SUCCESS) 813 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n", 814 io_req->xid); 815 else 816 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", 817 io_req->xid); 818 819 drop_rdata_kref: 820 kref_put(&rdata->kref, fc_rport_destroy); 821 out: 822 if (got_ref) 823 kref_put(&io_req->refcount, qedf_release_cmd); 824 return rc; 825 } 826 827 static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd) 828 { 829 QEDF_ERR(NULL, "%d:0:%d:%lld: TARGET RESET Issued...", 830 sc_cmd->device->host->host_no, sc_cmd->device->id, 831 sc_cmd->device->lun); 832 return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); 833 } 834 835 static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd) 836 { 837 QEDF_ERR(NULL, "%d:0:%d:%lld: LUN RESET Issued... ", 838 sc_cmd->device->host->host_no, sc_cmd->device->id, 839 sc_cmd->device->lun); 840 return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 841 } 842 843 bool qedf_wait_for_upload(struct qedf_ctx *qedf) 844 { 845 struct qedf_rport *fcport = NULL; 846 int wait_cnt = 120; 847 848 while (wait_cnt--) { 849 if (atomic_read(&qedf->num_offloads)) 850 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 851 "Waiting for all uploads to complete num_offloads = 0x%x.\n", 852 atomic_read(&qedf->num_offloads)); 853 else 854 return true; 855 msleep(500); 856 } 857 858 rcu_read_lock(); 859 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { 860 if (fcport && test_bit(QEDF_RPORT_SESSION_READY, 861 &fcport->flags)) { 862 if (fcport->rdata) 863 QEDF_ERR(&qedf->dbg_ctx, 864 "Waiting for fcport %p portid=%06x.\n", 865 fcport, fcport->rdata->ids.port_id); 866 } else { 867 QEDF_ERR(&qedf->dbg_ctx, 868 "Waiting for fcport %p.\n", fcport); 869 } 870 } 871 rcu_read_unlock(); 872 return false; 873 874 } 875 876 /* Performs soft reset of qedf_ctx by simulating a link down/up */ 877 void qedf_ctx_soft_reset(struct fc_lport *lport) 878 { 879 struct qedf_ctx *qedf; 880 struct qed_link_output if_link; 881 882 if (lport->vport) { 883 QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n"); 884 return; 885 } 886 887 qedf = lport_priv(lport); 888 889 qedf->flogi_pending = 0; 890 /* For host reset, essentially do a soft link up/down */ 891 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); 892 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 893 "Queuing link down work.\n"); 894 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 895 0); 896 897 if (qedf_wait_for_upload(qedf) == false) { 898 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); 899 WARN_ON(atomic_read(&qedf->num_offloads)); 900 } 901 902 /* Before setting link up query physical link state */ 903 qed_ops->common->get_link(qedf->cdev, &if_link); 904 /* Bail if the physical link is not up */ 905 if (!if_link.link_up) { 906 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 907 "Physical link is not up.\n"); 908 return; 909 } 910 /* Flush and wait to make sure link down is processed */ 911 flush_delayed_work(&qedf->link_update); 912 msleep(500); 913 914 atomic_set(&qedf->link_state, QEDF_LINK_UP); 915 qedf->vlan_id = 0; 916 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 917 "Queue link up work.\n"); 918 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, 919 0); 920 } 921 922 /* Reset the host by gracefully logging out and then logging back in */ 923 static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd) 924 { 925 struct fc_lport *lport; 926 struct qedf_ctx *qedf; 927 928 lport = shost_priv(sc_cmd->device->host); 929 qedf = lport_priv(lport); 930 931 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || 932 test_bit(QEDF_UNLOADING, &qedf->flags)) 933 return FAILED; 934 935 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); 936 937 qedf_ctx_soft_reset(lport); 938 939 return SUCCESS; 940 } 941 942 static int qedf_slave_configure(struct scsi_device *sdev) 943 { 944 if (qedf_queue_depth) { 945 scsi_change_queue_depth(sdev, qedf_queue_depth); 946 } 947 948 return 0; 949 } 950 951 static struct scsi_host_template qedf_host_template = { 952 .module = THIS_MODULE, 953 .name = QEDF_MODULE_NAME, 954 .this_id = -1, 955 .cmd_per_lun = 32, 956 .max_sectors = 0xffff, 957 .queuecommand = qedf_queuecommand, 958 .shost_attrs = qedf_host_attrs, 959 .eh_abort_handler = qedf_eh_abort, 960 .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */ 961 .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */ 962 .eh_host_reset_handler = qedf_eh_host_reset, 963 .slave_configure = qedf_slave_configure, 964 .dma_boundary = QED_HW_DMA_BOUNDARY, 965 .sg_tablesize = QEDF_MAX_BDS_PER_CMD, 966 .can_queue = FCOE_PARAMS_NUM_TASKS, 967 .change_queue_depth = scsi_change_queue_depth, 968 }; 969 970 static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen) 971 { 972 int rc; 973 974 spin_lock(&qedf_global_lock); 975 rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global); 976 spin_unlock(&qedf_global_lock); 977 978 return rc; 979 } 980 981 static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id) 982 { 983 struct qedf_rport *fcport; 984 struct fc_rport_priv *rdata; 985 986 rcu_read_lock(); 987 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { 988 rdata = fcport->rdata; 989 if (rdata == NULL) 990 continue; 991 if (rdata->ids.port_id == port_id) { 992 rcu_read_unlock(); 993 return fcport; 994 } 995 } 996 rcu_read_unlock(); 997 998 /* Return NULL to caller to let them know fcport was not found */ 999 return NULL; 1000 } 1001 1002 /* Transmits an ELS frame over an offloaded session */ 1003 static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp) 1004 { 1005 struct fc_frame_header *fh; 1006 int rc = 0; 1007 1008 fh = fc_frame_header_get(fp); 1009 if ((fh->fh_type == FC_TYPE_ELS) && 1010 (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { 1011 switch (fc_frame_payload_op(fp)) { 1012 case ELS_ADISC: 1013 qedf_send_adisc(fcport, fp); 1014 rc = 1; 1015 break; 1016 } 1017 } 1018 1019 return rc; 1020 } 1021 1022 /** 1023 * qedf_xmit - qedf FCoE frame transmit function 1024 * 1025 */ 1026 static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) 1027 { 1028 struct fc_lport *base_lport; 1029 struct qedf_ctx *qedf; 1030 struct ethhdr *eh; 1031 struct fcoe_crc_eof *cp; 1032 struct sk_buff *skb; 1033 struct fc_frame_header *fh; 1034 struct fcoe_hdr *hp; 1035 u8 sof, eof; 1036 u32 crc; 1037 unsigned int hlen, tlen, elen; 1038 int wlen; 1039 struct fc_stats *stats; 1040 struct fc_lport *tmp_lport; 1041 struct fc_lport *vn_port = NULL; 1042 struct qedf_rport *fcport; 1043 int rc; 1044 u16 vlan_tci = 0; 1045 1046 qedf = (struct qedf_ctx *)lport_priv(lport); 1047 1048 fh = fc_frame_header_get(fp); 1049 skb = fp_skb(fp); 1050 1051 /* Filter out traffic to other NPIV ports on the same host */ 1052 if (lport->vport) 1053 base_lport = shost_priv(vport_to_shost(lport->vport)); 1054 else 1055 base_lport = lport; 1056 1057 /* Flag if the destination is the base port */ 1058 if (base_lport->port_id == ntoh24(fh->fh_d_id)) { 1059 vn_port = base_lport; 1060 } else { 1061 /* Got through the list of vports attached to the base_lport 1062 * and see if we have a match with the destination address. 1063 */ 1064 list_for_each_entry(tmp_lport, &base_lport->vports, list) { 1065 if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) { 1066 vn_port = tmp_lport; 1067 break; 1068 } 1069 } 1070 } 1071 if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) { 1072 struct fc_rport_priv *rdata = NULL; 1073 1074 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, 1075 "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); 1076 kfree_skb(skb); 1077 rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); 1078 if (rdata) { 1079 rdata->retries = lport->max_rport_retry_count; 1080 kref_put(&rdata->kref, fc_rport_destroy); 1081 } 1082 return -EINVAL; 1083 } 1084 /* End NPIV filtering */ 1085 1086 if (!qedf->ctlr.sel_fcf) { 1087 kfree_skb(skb); 1088 return 0; 1089 } 1090 1091 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { 1092 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); 1093 kfree_skb(skb); 1094 return 0; 1095 } 1096 1097 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { 1098 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n"); 1099 kfree_skb(skb); 1100 return 0; 1101 } 1102 1103 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { 1104 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) 1105 return 0; 1106 } 1107 1108 /* Check to see if this needs to be sent on an offloaded session */ 1109 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); 1110 1111 if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 1112 rc = qedf_xmit_l2_frame(fcport, fp); 1113 /* 1114 * If the frame was successfully sent over the middle path 1115 * then do not try to also send it over the LL2 path 1116 */ 1117 if (rc) 1118 return 0; 1119 } 1120 1121 sof = fr_sof(fp); 1122 eof = fr_eof(fp); 1123 1124 elen = sizeof(struct ethhdr); 1125 hlen = sizeof(struct fcoe_hdr); 1126 tlen = sizeof(struct fcoe_crc_eof); 1127 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; 1128 1129 skb->ip_summed = CHECKSUM_NONE; 1130 crc = fcoe_fc_crc(fp); 1131 1132 /* copy port crc and eof to the skb buff */ 1133 if (skb_is_nonlinear(skb)) { 1134 skb_frag_t *frag; 1135 1136 if (qedf_get_paged_crc_eof(skb, tlen)) { 1137 kfree_skb(skb); 1138 return -ENOMEM; 1139 } 1140 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 1141 cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); 1142 } else { 1143 cp = skb_put(skb, tlen); 1144 } 1145 1146 memset(cp, 0, sizeof(*cp)); 1147 cp->fcoe_eof = eof; 1148 cp->fcoe_crc32 = cpu_to_le32(~crc); 1149 if (skb_is_nonlinear(skb)) { 1150 kunmap_atomic(cp); 1151 cp = NULL; 1152 } 1153 1154 1155 /* adjust skb network/transport offsets to match mac/fcoe/port */ 1156 skb_push(skb, elen + hlen); 1157 skb_reset_mac_header(skb); 1158 skb_reset_network_header(skb); 1159 skb->mac_len = elen; 1160 skb->protocol = htons(ETH_P_FCOE); 1161 1162 /* 1163 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN 1164 * for FIP/FCoE traffic. 1165 */ 1166 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); 1167 1168 /* fill up mac and fcoe headers */ 1169 eh = eth_hdr(skb); 1170 eh->h_proto = htons(ETH_P_FCOE); 1171 if (qedf->ctlr.map_dest) 1172 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 1173 else 1174 /* insert GW address */ 1175 ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); 1176 1177 /* Set the source MAC address */ 1178 ether_addr_copy(eh->h_source, qedf->data_src_addr); 1179 1180 hp = (struct fcoe_hdr *)(eh + 1); 1181 memset(hp, 0, sizeof(*hp)); 1182 if (FC_FCOE_VER) 1183 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); 1184 hp->fcoe_sof = sof; 1185 1186 /*update tx stats */ 1187 stats = per_cpu_ptr(lport->stats, get_cpu()); 1188 stats->TxFrames++; 1189 stats->TxWords += wlen; 1190 put_cpu(); 1191 1192 /* Get VLAN ID from skb for printing purposes */ 1193 __vlan_hwaccel_get_tag(skb, &vlan_tci); 1194 1195 /* send down to lld */ 1196 fr_dev(fp) = lport; 1197 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: " 1198 "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n", 1199 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type, 1200 vlan_tci); 1201 if (qedf_dump_frames) 1202 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, 1203 1, skb->data, skb->len, false); 1204 rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); 1205 if (rc) { 1206 QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); 1207 kfree_skb(skb); 1208 return rc; 1209 } 1210 1211 return 0; 1212 } 1213 1214 static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) 1215 { 1216 int rval = 0; 1217 u32 *pbl; 1218 dma_addr_t page; 1219 int num_pages; 1220 1221 /* Calculate appropriate queue and PBL sizes */ 1222 fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); 1223 fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE); 1224 fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) * 1225 sizeof(void *); 1226 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; 1227 1228 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, 1229 &fcport->sq_dma, GFP_KERNEL); 1230 if (!fcport->sq) { 1231 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); 1232 rval = 1; 1233 goto out; 1234 } 1235 1236 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, 1237 fcport->sq_pbl_size, 1238 &fcport->sq_pbl_dma, GFP_KERNEL); 1239 if (!fcport->sq_pbl) { 1240 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); 1241 rval = 1; 1242 goto out_free_sq; 1243 } 1244 1245 /* Create PBL */ 1246 num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE; 1247 page = fcport->sq_dma; 1248 pbl = (u32 *)fcport->sq_pbl; 1249 1250 while (num_pages--) { 1251 *pbl = U64_LO(page); 1252 pbl++; 1253 *pbl = U64_HI(page); 1254 pbl++; 1255 page += QEDF_PAGE_SIZE; 1256 } 1257 1258 return rval; 1259 1260 out_free_sq: 1261 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, 1262 fcport->sq_dma); 1263 out: 1264 return rval; 1265 } 1266 1267 static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport) 1268 { 1269 if (fcport->sq_pbl) 1270 dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, 1271 fcport->sq_pbl, fcport->sq_pbl_dma); 1272 if (fcport->sq) 1273 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, 1274 fcport->sq, fcport->sq_dma); 1275 } 1276 1277 static int qedf_offload_connection(struct qedf_ctx *qedf, 1278 struct qedf_rport *fcport) 1279 { 1280 struct qed_fcoe_params_offload conn_info; 1281 u32 port_id; 1282 int rval; 1283 uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe)); 1284 1285 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection " 1286 "portid=%06x.\n", fcport->rdata->ids.port_id); 1287 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle, 1288 &fcport->fw_cid, &fcport->p_doorbell); 1289 if (rval) { 1290 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection " 1291 "for portid=%06x.\n", fcport->rdata->ids.port_id); 1292 rval = 1; /* For some reason qed returns 0 on failure here */ 1293 goto out; 1294 } 1295 1296 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x " 1297 "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id, 1298 fcport->fw_cid, fcport->handle); 1299 1300 memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload)); 1301 1302 /* Fill in the offload connection info */ 1303 conn_info.sq_pbl_addr = fcport->sq_pbl_dma; 1304 1305 conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl); 1306 conn_info.sq_next_page_addr = 1307 (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8)); 1308 1309 /* Need to use our FCoE MAC for the offload session */ 1310 ether_addr_copy(conn_info.src_mac, qedf->data_src_addr); 1311 1312 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); 1313 1314 conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; 1315 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20; 1316 conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */ 1317 conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; 1318 1319 /* Set VLAN data */ 1320 conn_info.vlan_tag = qedf->vlan_id << 1321 FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT; 1322 conn_info.vlan_tag |= 1323 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT; 1324 conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK << 1325 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT); 1326 1327 /* Set host port source id */ 1328 port_id = fc_host_port_id(qedf->lport->host); 1329 fcport->sid = port_id; 1330 conn_info.s_id.addr_hi = (port_id & 0x000000FF); 1331 conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8; 1332 conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16; 1333 1334 conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq; 1335 1336 /* Set remote port destination id */ 1337 port_id = fcport->rdata->rport->port_id; 1338 conn_info.d_id.addr_hi = (port_id & 0x000000FF); 1339 conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8; 1340 conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16; 1341 1342 conn_info.def_q_idx = 0; /* Default index for send queue? */ 1343 1344 /* Set FC-TAPE specific flags if needed */ 1345 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { 1346 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, 1347 "Enable CONF, REC for portid=%06x.\n", 1348 fcport->rdata->ids.port_id); 1349 conn_info.flags |= 1 << 1350 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT; 1351 conn_info.flags |= 1352 ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << 1353 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT; 1354 } 1355 1356 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info); 1357 if (rval) { 1358 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection " 1359 "for portid=%06x.\n", fcport->rdata->ids.port_id); 1360 goto out_free_conn; 1361 } else 1362 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload " 1363 "succeeded portid=%06x total_sqe=%d.\n", 1364 fcport->rdata->ids.port_id, total_sqe); 1365 1366 spin_lock_init(&fcport->rport_lock); 1367 atomic_set(&fcport->free_sqes, total_sqe); 1368 return 0; 1369 out_free_conn: 1370 qed_ops->release_conn(qedf->cdev, fcport->handle); 1371 out: 1372 return rval; 1373 } 1374 1375 #define QEDF_TERM_BUFF_SIZE 10 1376 static void qedf_upload_connection(struct qedf_ctx *qedf, 1377 struct qedf_rport *fcport) 1378 { 1379 void *term_params; 1380 dma_addr_t term_params_dma; 1381 1382 /* Term params needs to be a DMA coherent buffer as qed shared the 1383 * physical DMA address with the firmware. The buffer may be used in 1384 * the receive path so we may eventually have to move this. 1385 */ 1386 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, 1387 &term_params_dma, GFP_KERNEL); 1388 1389 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " 1390 "port_id=%06x.\n", fcport->rdata->ids.port_id); 1391 1392 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma); 1393 qed_ops->release_conn(qedf->cdev, fcport->handle); 1394 1395 dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params, 1396 term_params_dma); 1397 } 1398 1399 static void qedf_cleanup_fcport(struct qedf_ctx *qedf, 1400 struct qedf_rport *fcport) 1401 { 1402 struct fc_rport_priv *rdata = fcport->rdata; 1403 1404 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", 1405 fcport->rdata->ids.port_id); 1406 1407 /* Flush any remaining i/o's before we upload the connection */ 1408 qedf_flush_active_ios(fcport, -1); 1409 1410 if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) 1411 qedf_upload_connection(qedf, fcport); 1412 qedf_free_sq(qedf, fcport); 1413 fcport->rdata = NULL; 1414 fcport->qedf = NULL; 1415 kref_put(&rdata->kref, fc_rport_destroy); 1416 } 1417 1418 /** 1419 * This event_callback is called after successful completion of libfc 1420 * initiated target login. qedf can proceed with initiating the session 1421 * establishment. 1422 */ 1423 static void qedf_rport_event_handler(struct fc_lport *lport, 1424 struct fc_rport_priv *rdata, 1425 enum fc_rport_event event) 1426 { 1427 struct qedf_ctx *qedf = lport_priv(lport); 1428 struct fc_rport *rport = rdata->rport; 1429 struct fc_rport_libfc_priv *rp; 1430 struct qedf_rport *fcport; 1431 u32 port_id; 1432 int rval; 1433 unsigned long flags; 1434 1435 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, " 1436 "port_id = 0x%x\n", event, rdata->ids.port_id); 1437 1438 switch (event) { 1439 case RPORT_EV_READY: 1440 if (!rport) { 1441 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n"); 1442 break; 1443 } 1444 1445 rp = rport->dd_data; 1446 fcport = (struct qedf_rport *)&rp[1]; 1447 fcport->qedf = qedf; 1448 1449 if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) { 1450 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading " 1451 "portid=0x%x as max number of offloaded sessions " 1452 "reached.\n", rdata->ids.port_id); 1453 return; 1454 } 1455 1456 /* 1457 * Don't try to offload the session again. Can happen when we 1458 * get an ADISC 1459 */ 1460 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 1461 QEDF_WARN(&(qedf->dbg_ctx), "Session already " 1462 "offloaded, portid=0x%x.\n", 1463 rdata->ids.port_id); 1464 return; 1465 } 1466 1467 if (rport->port_id == FC_FID_DIR_SERV) { 1468 /* 1469 * qedf_rport structure doesn't exist for 1470 * directory server. 1471 * We should not come here, as lport will 1472 * take care of fabric login 1473 */ 1474 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not " 1475 "exist for dir server port_id=%x\n", 1476 rdata->ids.port_id); 1477 break; 1478 } 1479 1480 if (rdata->spp_type != FC_TYPE_FCP) { 1481 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 1482 "Not offloading since spp type isn't FCP\n"); 1483 break; 1484 } 1485 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { 1486 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 1487 "Not FCP target so not offloading\n"); 1488 break; 1489 } 1490 1491 /* Initial reference held on entry, so this can't fail */ 1492 kref_get(&rdata->kref); 1493 fcport->rdata = rdata; 1494 fcport->rport = rport; 1495 1496 rval = qedf_alloc_sq(qedf, fcport); 1497 if (rval) { 1498 qedf_cleanup_fcport(qedf, fcport); 1499 break; 1500 } 1501 1502 /* Set device type */ 1503 if (rdata->flags & FC_RP_FLAGS_RETRY && 1504 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && 1505 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { 1506 fcport->dev_type = QEDF_RPORT_TYPE_TAPE; 1507 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 1508 "portid=%06x is a TAPE device.\n", 1509 rdata->ids.port_id); 1510 } else { 1511 fcport->dev_type = QEDF_RPORT_TYPE_DISK; 1512 } 1513 1514 rval = qedf_offload_connection(qedf, fcport); 1515 if (rval) { 1516 qedf_cleanup_fcport(qedf, fcport); 1517 break; 1518 } 1519 1520 /* Add fcport to list of qedf_ctx list of offloaded ports */ 1521 spin_lock_irqsave(&qedf->hba_lock, flags); 1522 list_add_rcu(&fcport->peers, &qedf->fcports); 1523 spin_unlock_irqrestore(&qedf->hba_lock, flags); 1524 1525 /* 1526 * Set the session ready bit to let everyone know that this 1527 * connection is ready for I/O 1528 */ 1529 set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags); 1530 atomic_inc(&qedf->num_offloads); 1531 1532 break; 1533 case RPORT_EV_LOGO: 1534 case RPORT_EV_FAILED: 1535 case RPORT_EV_STOP: 1536 port_id = rdata->ids.port_id; 1537 if (port_id == FC_FID_DIR_SERV) 1538 break; 1539 1540 if (!rport) { 1541 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 1542 "port_id=%x - rport notcreated Yet!!\n", port_id); 1543 break; 1544 } 1545 rp = rport->dd_data; 1546 /* 1547 * Perform session upload. Note that rdata->peers is already 1548 * removed from disc->rports list before we get this event. 1549 */ 1550 fcport = (struct qedf_rport *)&rp[1]; 1551 1552 spin_lock_irqsave(&fcport->rport_lock, flags); 1553 /* Only free this fcport if it is offloaded already */ 1554 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) && 1555 !test_bit(QEDF_RPORT_UPLOADING_CONNECTION, 1556 &fcport->flags)) { 1557 set_bit(QEDF_RPORT_UPLOADING_CONNECTION, 1558 &fcport->flags); 1559 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1560 qedf_cleanup_fcport(qedf, fcport); 1561 /* 1562 * Remove fcport to list of qedf_ctx list of offloaded 1563 * ports 1564 */ 1565 spin_lock_irqsave(&qedf->hba_lock, flags); 1566 list_del_rcu(&fcport->peers); 1567 spin_unlock_irqrestore(&qedf->hba_lock, flags); 1568 1569 clear_bit(QEDF_RPORT_UPLOADING_CONNECTION, 1570 &fcport->flags); 1571 atomic_dec(&qedf->num_offloads); 1572 } else { 1573 spin_unlock_irqrestore(&fcport->rport_lock, flags); 1574 } 1575 break; 1576 1577 case RPORT_EV_NONE: 1578 break; 1579 } 1580 } 1581 1582 static void qedf_abort_io(struct fc_lport *lport) 1583 { 1584 /* NO-OP but need to fill in the template */ 1585 } 1586 1587 static void qedf_fcp_cleanup(struct fc_lport *lport) 1588 { 1589 /* 1590 * NO-OP but need to fill in template to prevent a NULL 1591 * function pointer dereference during link down. I/Os 1592 * will be flushed when port is uploaded. 1593 */ 1594 } 1595 1596 static struct libfc_function_template qedf_lport_template = { 1597 .frame_send = qedf_xmit, 1598 .fcp_abort_io = qedf_abort_io, 1599 .fcp_cleanup = qedf_fcp_cleanup, 1600 .rport_event_callback = qedf_rport_event_handler, 1601 .elsct_send = qedf_elsct_send, 1602 }; 1603 1604 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf) 1605 { 1606 fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO); 1607 1608 qedf->ctlr.send = qedf_fip_send; 1609 qedf->ctlr.get_src_addr = qedf_get_src_mac; 1610 ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); 1611 } 1612 1613 static void qedf_setup_fdmi(struct qedf_ctx *qedf) 1614 { 1615 struct fc_lport *lport = qedf->lport; 1616 struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host); 1617 u64 dsn; 1618 1619 /* 1620 * fdmi_enabled needs to be set for libfc to execute FDMI registration. 1621 */ 1622 lport->fdmi_enabled = 1; 1623 1624 /* 1625 * Setup the necessary fc_host attributes to that will be used to fill 1626 * in the FDMI information. 1627 */ 1628 1629 /* Get the PCI-e Device Serial Number Capability */ 1630 dsn = pci_get_dsn(qedf->pdev); 1631 if (dsn) 1632 snprintf(fc_host->serial_number, 1633 sizeof(fc_host->serial_number), "%016llX", dsn); 1634 else 1635 snprintf(fc_host->serial_number, 1636 sizeof(fc_host->serial_number), "Unknown"); 1637 1638 snprintf(fc_host->manufacturer, 1639 sizeof(fc_host->manufacturer), "%s", "Cavium Inc."); 1640 1641 snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000"); 1642 1643 snprintf(fc_host->model_description, sizeof(fc_host->model_description), 1644 "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller" 1645 "(FCoE)"); 1646 1647 snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version), 1648 "Rev %d", qedf->pdev->revision); 1649 1650 snprintf(fc_host->driver_version, sizeof(fc_host->driver_version), 1651 "%s", QEDF_VERSION); 1652 1653 snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version), 1654 "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION, 1655 FW_REVISION_VERSION, FW_ENGINEERING_VERSION); 1656 } 1657 1658 static int qedf_lport_setup(struct qedf_ctx *qedf) 1659 { 1660 struct fc_lport *lport = qedf->lport; 1661 1662 lport->link_up = 0; 1663 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; 1664 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; 1665 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 1666 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 1667 lport->boot_time = jiffies; 1668 lport->e_d_tov = 2 * 1000; 1669 lport->r_a_tov = 10 * 1000; 1670 1671 /* Set NPIV support */ 1672 lport->does_npiv = 1; 1673 fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV; 1674 1675 fc_set_wwnn(lport, qedf->wwnn); 1676 fc_set_wwpn(lport, qedf->wwpn); 1677 1678 if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) { 1679 QEDF_ERR(&qedf->dbg_ctx, 1680 "fcoe_libfc_config failed.\n"); 1681 return -ENOMEM; 1682 } 1683 1684 /* Allocate the exchange manager */ 1685 fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_PARAMS_NUM_TASKS, 1686 0xfffe, NULL); 1687 1688 if (fc_lport_init_stats(lport)) 1689 return -ENOMEM; 1690 1691 /* Finish lport config */ 1692 fc_lport_config(lport); 1693 1694 /* Set max frame size */ 1695 fc_set_mfs(lport, QEDF_MFS); 1696 fc_host_maxframe_size(lport->host) = lport->mfs; 1697 1698 /* Set default dev_loss_tmo based on module parameter */ 1699 fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; 1700 1701 /* Set symbolic node name */ 1702 snprintf(fc_host_symbolic_name(lport->host), 256, 1703 "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION); 1704 1705 qedf_setup_fdmi(qedf); 1706 1707 return 0; 1708 } 1709 1710 /* 1711 * NPIV functions 1712 */ 1713 1714 static int qedf_vport_libfc_config(struct fc_vport *vport, 1715 struct fc_lport *lport) 1716 { 1717 lport->link_up = 0; 1718 lport->qfull = 0; 1719 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; 1720 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; 1721 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | 1722 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); 1723 lport->boot_time = jiffies; 1724 lport->e_d_tov = 2 * 1000; 1725 lport->r_a_tov = 10 * 1000; 1726 lport->does_npiv = 1; /* Temporary until we add NPIV support */ 1727 1728 /* Allocate stats for vport */ 1729 if (fc_lport_init_stats(lport)) 1730 return -ENOMEM; 1731 1732 /* Finish lport config */ 1733 fc_lport_config(lport); 1734 1735 /* offload related configuration */ 1736 lport->crc_offload = 0; 1737 lport->seq_offload = 0; 1738 lport->lro_enabled = 0; 1739 lport->lro_xid = 0; 1740 lport->lso_max = 0; 1741 1742 return 0; 1743 } 1744 1745 static int qedf_vport_create(struct fc_vport *vport, bool disabled) 1746 { 1747 struct Scsi_Host *shost = vport_to_shost(vport); 1748 struct fc_lport *n_port = shost_priv(shost); 1749 struct fc_lport *vn_port; 1750 struct qedf_ctx *base_qedf = lport_priv(n_port); 1751 struct qedf_ctx *vport_qedf; 1752 1753 char buf[32]; 1754 int rc = 0; 1755 1756 rc = fcoe_validate_vport_create(vport); 1757 if (rc) { 1758 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); 1759 QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " 1760 "WWPN (0x%s) already exists.\n", buf); 1761 goto err1; 1762 } 1763 1764 if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { 1765 QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " 1766 "because link is not up.\n"); 1767 rc = -EIO; 1768 goto err1; 1769 } 1770 1771 vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx)); 1772 if (!vn_port) { 1773 QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " 1774 "for vport.\n"); 1775 rc = -ENOMEM; 1776 goto err1; 1777 } 1778 1779 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); 1780 QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n", 1781 buf); 1782 1783 /* Copy some fields from base_qedf */ 1784 vport_qedf = lport_priv(vn_port); 1785 memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx)); 1786 1787 /* Set qedf data specific to this vport */ 1788 vport_qedf->lport = vn_port; 1789 /* Use same hba_lock as base_qedf */ 1790 vport_qedf->hba_lock = base_qedf->hba_lock; 1791 vport_qedf->pdev = base_qedf->pdev; 1792 vport_qedf->cmd_mgr = base_qedf->cmd_mgr; 1793 init_completion(&vport_qedf->flogi_compl); 1794 INIT_LIST_HEAD(&vport_qedf->fcports); 1795 1796 rc = qedf_vport_libfc_config(vport, vn_port); 1797 if (rc) { 1798 QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " 1799 "for lport stats.\n"); 1800 goto err2; 1801 } 1802 1803 fc_set_wwnn(vn_port, vport->node_name); 1804 fc_set_wwpn(vn_port, vport->port_name); 1805 vport_qedf->wwnn = vn_port->wwnn; 1806 vport_qedf->wwpn = vn_port->wwpn; 1807 1808 vn_port->host->transportt = qedf_fc_vport_transport_template; 1809 vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS; 1810 vn_port->host->max_lun = qedf_max_lun; 1811 vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; 1812 vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; 1813 1814 rc = scsi_add_host(vn_port->host, &vport->dev); 1815 if (rc) { 1816 QEDF_WARN(&base_qedf->dbg_ctx, 1817 "Error adding Scsi_Host rc=0x%x.\n", rc); 1818 goto err2; 1819 } 1820 1821 /* Set default dev_loss_tmo based on module parameter */ 1822 fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo; 1823 1824 /* Init libfc stuffs */ 1825 memcpy(&vn_port->tt, &qedf_lport_template, 1826 sizeof(qedf_lport_template)); 1827 fc_exch_init(vn_port); 1828 fc_elsct_init(vn_port); 1829 fc_lport_init(vn_port); 1830 fc_disc_init(vn_port); 1831 fc_disc_config(vn_port, vn_port); 1832 1833 1834 /* Allocate the exchange manager */ 1835 shost = vport_to_shost(vport); 1836 n_port = shost_priv(shost); 1837 fc_exch_mgr_list_clone(n_port, vn_port); 1838 1839 /* Set max frame size */ 1840 fc_set_mfs(vn_port, QEDF_MFS); 1841 1842 fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN; 1843 1844 if (disabled) { 1845 fc_vport_set_state(vport, FC_VPORT_DISABLED); 1846 } else { 1847 vn_port->boot_time = jiffies; 1848 fc_fabric_login(vn_port); 1849 fc_vport_setlink(vn_port); 1850 } 1851 1852 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", 1853 vn_port); 1854 1855 /* Set up debug context for vport */ 1856 vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; 1857 vport_qedf->dbg_ctx.pdev = base_qedf->pdev; 1858 1859 err2: 1860 scsi_host_put(vn_port->host); 1861 err1: 1862 return rc; 1863 } 1864 1865 static int qedf_vport_destroy(struct fc_vport *vport) 1866 { 1867 struct Scsi_Host *shost = vport_to_shost(vport); 1868 struct fc_lport *n_port = shost_priv(shost); 1869 struct fc_lport *vn_port = vport->dd_data; 1870 struct qedf_ctx *qedf = lport_priv(vn_port); 1871 1872 if (!qedf) { 1873 QEDF_ERR(NULL, "qedf is NULL.\n"); 1874 goto out; 1875 } 1876 1877 /* Set unloading bit on vport qedf_ctx to prevent more I/O */ 1878 set_bit(QEDF_UNLOADING, &qedf->flags); 1879 1880 mutex_lock(&n_port->lp_mutex); 1881 list_del(&vn_port->list); 1882 mutex_unlock(&n_port->lp_mutex); 1883 1884 fc_fabric_logoff(vn_port); 1885 fc_lport_destroy(vn_port); 1886 1887 /* Detach from scsi-ml */ 1888 fc_remove_host(vn_port->host); 1889 scsi_remove_host(vn_port->host); 1890 1891 /* 1892 * Only try to release the exchange manager if the vn_port 1893 * configuration is complete. 1894 */ 1895 if (vn_port->state == LPORT_ST_READY) 1896 fc_exch_mgr_free(vn_port); 1897 1898 /* Free memory used by statistical counters */ 1899 fc_lport_free_stats(vn_port); 1900 1901 /* Release Scsi_Host */ 1902 if (vn_port->host) 1903 scsi_host_put(vn_port->host); 1904 1905 out: 1906 return 0; 1907 } 1908 1909 static int qedf_vport_disable(struct fc_vport *vport, bool disable) 1910 { 1911 struct fc_lport *lport = vport->dd_data; 1912 1913 if (disable) { 1914 fc_vport_set_state(vport, FC_VPORT_DISABLED); 1915 fc_fabric_logoff(lport); 1916 } else { 1917 lport->boot_time = jiffies; 1918 fc_fabric_login(lport); 1919 fc_vport_setlink(lport); 1920 } 1921 return 0; 1922 } 1923 1924 /* 1925 * During removal we need to wait for all the vports associated with a port 1926 * to be destroyed so we avoid a race condition where libfc is still trying 1927 * to reap vports while the driver remove function has already reaped the 1928 * driver contexts associated with the physical port. 1929 */ 1930 static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf) 1931 { 1932 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host); 1933 1934 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, 1935 "Entered.\n"); 1936 while (fc_host->npiv_vports_inuse > 0) { 1937 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, 1938 "Waiting for all vports to be reaped.\n"); 1939 msleep(1000); 1940 } 1941 } 1942 1943 /** 1944 * qedf_fcoe_reset - Resets the fcoe 1945 * 1946 * @shost: shost the reset is from 1947 * 1948 * Returns: always 0 1949 */ 1950 static int qedf_fcoe_reset(struct Scsi_Host *shost) 1951 { 1952 struct fc_lport *lport = shost_priv(shost); 1953 1954 qedf_ctx_soft_reset(lport); 1955 return 0; 1956 } 1957 1958 static void qedf_get_host_port_id(struct Scsi_Host *shost) 1959 { 1960 struct fc_lport *lport = shost_priv(shost); 1961 1962 fc_host_port_id(shost) = lport->port_id; 1963 } 1964 1965 static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host 1966 *shost) 1967 { 1968 struct fc_host_statistics *qedf_stats; 1969 struct fc_lport *lport = shost_priv(shost); 1970 struct qedf_ctx *qedf = lport_priv(lport); 1971 struct qed_fcoe_stats *fw_fcoe_stats; 1972 1973 qedf_stats = fc_get_host_stats(shost); 1974 1975 /* We don't collect offload stats for specific NPIV ports */ 1976 if (lport->vport) 1977 goto out; 1978 1979 fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL); 1980 if (!fw_fcoe_stats) { 1981 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " 1982 "fw_fcoe_stats.\n"); 1983 goto out; 1984 } 1985 1986 mutex_lock(&qedf->stats_mutex); 1987 1988 /* Query firmware for offload stats */ 1989 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); 1990 1991 /* 1992 * The expectation is that we add our offload stats to the stats 1993 * being maintained by libfc each time the fc_get_host_status callback 1994 * is invoked. The additions are not carried over for each call to 1995 * the fc_get_host_stats callback. 1996 */ 1997 qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt + 1998 fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt + 1999 fw_fcoe_stats->fcoe_tx_other_pkt_cnt; 2000 qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt + 2001 fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt + 2002 fw_fcoe_stats->fcoe_rx_other_pkt_cnt; 2003 qedf_stats->fcp_input_megabytes += 2004 do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000); 2005 qedf_stats->fcp_output_megabytes += 2006 do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000); 2007 qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4; 2008 qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4; 2009 qedf_stats->invalid_crc_count += 2010 fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt; 2011 qedf_stats->dumped_frames = 2012 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; 2013 qedf_stats->error_frames += 2014 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; 2015 qedf_stats->fcp_input_requests += qedf->input_requests; 2016 qedf_stats->fcp_output_requests += qedf->output_requests; 2017 qedf_stats->fcp_control_requests += qedf->control_requests; 2018 qedf_stats->fcp_packet_aborts += qedf->packet_aborts; 2019 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures; 2020 2021 mutex_unlock(&qedf->stats_mutex); 2022 kfree(fw_fcoe_stats); 2023 out: 2024 return qedf_stats; 2025 } 2026 2027 static struct fc_function_template qedf_fc_transport_fn = { 2028 .show_host_node_name = 1, 2029 .show_host_port_name = 1, 2030 .show_host_supported_classes = 1, 2031 .show_host_supported_fc4s = 1, 2032 .show_host_active_fc4s = 1, 2033 .show_host_maxframe_size = 1, 2034 2035 .get_host_port_id = qedf_get_host_port_id, 2036 .show_host_port_id = 1, 2037 .show_host_supported_speeds = 1, 2038 .get_host_speed = fc_get_host_speed, 2039 .show_host_speed = 1, 2040 .show_host_port_type = 1, 2041 .get_host_port_state = fc_get_host_port_state, 2042 .show_host_port_state = 1, 2043 .show_host_symbolic_name = 1, 2044 2045 /* 2046 * Tell FC transport to allocate enough space to store the backpointer 2047 * for the associate qedf_rport struct. 2048 */ 2049 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + 2050 sizeof(struct qedf_rport)), 2051 .show_rport_maxframe_size = 1, 2052 .show_rport_supported_classes = 1, 2053 .show_host_fabric_name = 1, 2054 .show_starget_node_name = 1, 2055 .show_starget_port_name = 1, 2056 .show_starget_port_id = 1, 2057 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, 2058 .show_rport_dev_loss_tmo = 1, 2059 .get_fc_host_stats = qedf_fc_get_host_stats, 2060 .issue_fc_host_lip = qedf_fcoe_reset, 2061 .vport_create = qedf_vport_create, 2062 .vport_delete = qedf_vport_destroy, 2063 .vport_disable = qedf_vport_disable, 2064 .bsg_request = fc_lport_bsg_request, 2065 }; 2066 2067 static struct fc_function_template qedf_fc_vport_transport_fn = { 2068 .show_host_node_name = 1, 2069 .show_host_port_name = 1, 2070 .show_host_supported_classes = 1, 2071 .show_host_supported_fc4s = 1, 2072 .show_host_active_fc4s = 1, 2073 .show_host_maxframe_size = 1, 2074 .show_host_port_id = 1, 2075 .show_host_supported_speeds = 1, 2076 .get_host_speed = fc_get_host_speed, 2077 .show_host_speed = 1, 2078 .show_host_port_type = 1, 2079 .get_host_port_state = fc_get_host_port_state, 2080 .show_host_port_state = 1, 2081 .show_host_symbolic_name = 1, 2082 .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + 2083 sizeof(struct qedf_rport)), 2084 .show_rport_maxframe_size = 1, 2085 .show_rport_supported_classes = 1, 2086 .show_host_fabric_name = 1, 2087 .show_starget_node_name = 1, 2088 .show_starget_port_name = 1, 2089 .show_starget_port_id = 1, 2090 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, 2091 .show_rport_dev_loss_tmo = 1, 2092 .get_fc_host_stats = fc_get_host_stats, 2093 .issue_fc_host_lip = qedf_fcoe_reset, 2094 .bsg_request = fc_lport_bsg_request, 2095 }; 2096 2097 static bool qedf_fp_has_work(struct qedf_fastpath *fp) 2098 { 2099 struct qedf_ctx *qedf = fp->qedf; 2100 struct global_queue *que; 2101 struct qed_sb_info *sb_info = fp->sb_info; 2102 struct status_block_e4 *sb = sb_info->sb_virt; 2103 u16 prod_idx; 2104 2105 /* Get the pointer to the global CQ this completion is on */ 2106 que = qedf->global_queues[fp->sb_id]; 2107 2108 /* Be sure all responses have been written to PI */ 2109 rmb(); 2110 2111 /* Get the current firmware producer index */ 2112 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; 2113 2114 return (que->cq_prod_idx != prod_idx); 2115 } 2116 2117 /* 2118 * Interrupt handler code. 2119 */ 2120 2121 /* Process completion queue and copy CQE contents for deferred processesing 2122 * 2123 * Return true if we should wake the I/O thread, false if not. 2124 */ 2125 static bool qedf_process_completions(struct qedf_fastpath *fp) 2126 { 2127 struct qedf_ctx *qedf = fp->qedf; 2128 struct qed_sb_info *sb_info = fp->sb_info; 2129 struct status_block_e4 *sb = sb_info->sb_virt; 2130 struct global_queue *que; 2131 u16 prod_idx; 2132 struct fcoe_cqe *cqe; 2133 struct qedf_io_work *io_work; 2134 int num_handled = 0; 2135 unsigned int cpu; 2136 struct qedf_ioreq *io_req = NULL; 2137 u16 xid; 2138 u16 new_cqes; 2139 u32 comp_type; 2140 2141 /* Get the current firmware producer index */ 2142 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; 2143 2144 /* Get the pointer to the global CQ this completion is on */ 2145 que = qedf->global_queues[fp->sb_id]; 2146 2147 /* Calculate the amount of new elements since last processing */ 2148 new_cqes = (prod_idx >= que->cq_prod_idx) ? 2149 (prod_idx - que->cq_prod_idx) : 2150 0x10000 - que->cq_prod_idx + prod_idx; 2151 2152 /* Save producer index */ 2153 que->cq_prod_idx = prod_idx; 2154 2155 while (new_cqes) { 2156 fp->completions++; 2157 num_handled++; 2158 cqe = &que->cq[que->cq_cons_idx]; 2159 2160 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & 2161 FCOE_CQE_CQE_TYPE_MASK; 2162 2163 /* 2164 * Process unsolicited CQEs directly in the interrupt handler 2165 * sine we need the fastpath ID 2166 */ 2167 if (comp_type == FCOE_UNSOLIC_CQE_TYPE) { 2168 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, 2169 "Unsolicated CQE.\n"); 2170 qedf_process_unsol_compl(qedf, fp->sb_id, cqe); 2171 /* 2172 * Don't add a work list item. Increment consumer 2173 * consumer index and move on. 2174 */ 2175 goto inc_idx; 2176 } 2177 2178 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; 2179 io_req = &qedf->cmd_mgr->cmds[xid]; 2180 2181 /* 2182 * Figure out which percpu thread we should queue this I/O 2183 * on. 2184 */ 2185 if (!io_req) 2186 /* If there is not io_req assocated with this CQE 2187 * just queue it on CPU 0 2188 */ 2189 cpu = 0; 2190 else { 2191 cpu = io_req->cpu; 2192 io_req->int_cpu = smp_processor_id(); 2193 } 2194 2195 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); 2196 if (!io_work) { 2197 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " 2198 "work for I/O completion.\n"); 2199 continue; 2200 } 2201 memset(io_work, 0, sizeof(struct qedf_io_work)); 2202 2203 INIT_WORK(&io_work->work, qedf_fp_io_handler); 2204 2205 /* Copy contents of CQE for deferred processing */ 2206 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); 2207 2208 io_work->qedf = fp->qedf; 2209 io_work->fp = NULL; /* Only used for unsolicited frames */ 2210 2211 queue_work_on(cpu, qedf_io_wq, &io_work->work); 2212 2213 inc_idx: 2214 que->cq_cons_idx++; 2215 if (que->cq_cons_idx == fp->cq_num_entries) 2216 que->cq_cons_idx = 0; 2217 new_cqes--; 2218 } 2219 2220 return true; 2221 } 2222 2223 2224 /* MSI-X fastpath handler code */ 2225 static irqreturn_t qedf_msix_handler(int irq, void *dev_id) 2226 { 2227 struct qedf_fastpath *fp = dev_id; 2228 2229 if (!fp) { 2230 QEDF_ERR(NULL, "fp is null.\n"); 2231 return IRQ_HANDLED; 2232 } 2233 if (!fp->sb_info) { 2234 QEDF_ERR(NULL, "fp->sb_info in null."); 2235 return IRQ_HANDLED; 2236 } 2237 2238 /* 2239 * Disable interrupts for this status block while we process new 2240 * completions 2241 */ 2242 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); 2243 2244 while (1) { 2245 qedf_process_completions(fp); 2246 2247 if (qedf_fp_has_work(fp) == 0) { 2248 /* Update the sb information */ 2249 qed_sb_update_sb_idx(fp->sb_info); 2250 2251 /* Check for more work */ 2252 rmb(); 2253 2254 if (qedf_fp_has_work(fp) == 0) { 2255 /* Re-enable interrupts */ 2256 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 2257 return IRQ_HANDLED; 2258 } 2259 } 2260 } 2261 2262 /* Do we ever want to break out of above loop? */ 2263 return IRQ_HANDLED; 2264 } 2265 2266 /* simd handler for MSI/INTa */ 2267 static void qedf_simd_int_handler(void *cookie) 2268 { 2269 /* Cookie is qedf_ctx struct */ 2270 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; 2271 2272 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf); 2273 } 2274 2275 #define QEDF_SIMD_HANDLER_NUM 0 2276 static void qedf_sync_free_irqs(struct qedf_ctx *qedf) 2277 { 2278 int i; 2279 u16 vector_idx = 0; 2280 u32 vector; 2281 2282 if (qedf->int_info.msix_cnt) { 2283 for (i = 0; i < qedf->int_info.used_cnt; i++) { 2284 vector_idx = i * qedf->dev_info.common.num_hwfns + 2285 qed_ops->common->get_affin_hwfn_idx(qedf->cdev); 2286 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 2287 "Freeing IRQ #%d vector_idx=%d.\n", 2288 i, vector_idx); 2289 vector = qedf->int_info.msix[vector_idx].vector; 2290 synchronize_irq(vector); 2291 irq_set_affinity_hint(vector, NULL); 2292 irq_set_affinity_notifier(vector, NULL); 2293 free_irq(vector, &qedf->fp_array[i]); 2294 } 2295 } else 2296 qed_ops->common->simd_handler_clean(qedf->cdev, 2297 QEDF_SIMD_HANDLER_NUM); 2298 2299 qedf->int_info.used_cnt = 0; 2300 qed_ops->common->set_fp_int(qedf->cdev, 0); 2301 } 2302 2303 static int qedf_request_msix_irq(struct qedf_ctx *qedf) 2304 { 2305 int i, rc, cpu; 2306 u16 vector_idx = 0; 2307 u32 vector; 2308 2309 cpu = cpumask_first(cpu_online_mask); 2310 for (i = 0; i < qedf->num_queues; i++) { 2311 vector_idx = i * qedf->dev_info.common.num_hwfns + 2312 qed_ops->common->get_affin_hwfn_idx(qedf->cdev); 2313 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 2314 "Requesting IRQ #%d vector_idx=%d.\n", 2315 i, vector_idx); 2316 vector = qedf->int_info.msix[vector_idx].vector; 2317 rc = request_irq(vector, qedf_msix_handler, 0, "qedf", 2318 &qedf->fp_array[i]); 2319 2320 if (rc) { 2321 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); 2322 qedf_sync_free_irqs(qedf); 2323 return rc; 2324 } 2325 2326 qedf->int_info.used_cnt++; 2327 rc = irq_set_affinity_hint(vector, get_cpu_mask(cpu)); 2328 cpu = cpumask_next(cpu, cpu_online_mask); 2329 } 2330 2331 return 0; 2332 } 2333 2334 static int qedf_setup_int(struct qedf_ctx *qedf) 2335 { 2336 int rc = 0; 2337 2338 /* 2339 * Learn interrupt configuration 2340 */ 2341 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus()); 2342 if (rc <= 0) 2343 return 0; 2344 2345 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info); 2346 if (rc) 2347 return 0; 2348 2349 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = " 2350 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt, 2351 num_online_cpus()); 2352 2353 if (qedf->int_info.msix_cnt) 2354 return qedf_request_msix_irq(qedf); 2355 2356 qed_ops->common->simd_handler_config(qedf->cdev, &qedf, 2357 QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler); 2358 qedf->int_info.used_cnt = 1; 2359 2360 QEDF_ERR(&qedf->dbg_ctx, 2361 "Cannot load driver due to a lack of MSI-X vectors.\n"); 2362 return -EINVAL; 2363 } 2364 2365 /* Main function for libfc frame reception */ 2366 static void qedf_recv_frame(struct qedf_ctx *qedf, 2367 struct sk_buff *skb) 2368 { 2369 u32 fr_len; 2370 struct fc_lport *lport; 2371 struct fc_frame_header *fh; 2372 struct fcoe_crc_eof crc_eof; 2373 struct fc_frame *fp; 2374 u8 *mac = NULL; 2375 u8 *dest_mac = NULL; 2376 struct fcoe_hdr *hp; 2377 struct qedf_rport *fcport; 2378 struct fc_lport *vn_port; 2379 u32 f_ctl; 2380 2381 lport = qedf->lport; 2382 if (lport == NULL || lport->state == LPORT_ST_DISABLED) { 2383 QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n"); 2384 kfree_skb(skb); 2385 return; 2386 } 2387 2388 if (skb_is_nonlinear(skb)) 2389 skb_linearize(skb); 2390 mac = eth_hdr(skb)->h_source; 2391 dest_mac = eth_hdr(skb)->h_dest; 2392 2393 /* Pull the header */ 2394 hp = (struct fcoe_hdr *)skb->data; 2395 fh = (struct fc_frame_header *) skb_transport_header(skb); 2396 skb_pull(skb, sizeof(struct fcoe_hdr)); 2397 fr_len = skb->len - sizeof(struct fcoe_crc_eof); 2398 2399 fp = (struct fc_frame *)skb; 2400 fc_frame_init(fp); 2401 fr_dev(fp) = lport; 2402 fr_sof(fp) = hp->fcoe_sof; 2403 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { 2404 QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n"); 2405 kfree_skb(skb); 2406 return; 2407 } 2408 fr_eof(fp) = crc_eof.fcoe_eof; 2409 fr_crc(fp) = crc_eof.fcoe_crc32; 2410 if (pskb_trim(skb, fr_len)) { 2411 QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n"); 2412 kfree_skb(skb); 2413 return; 2414 } 2415 2416 fh = fc_frame_header_get(fp); 2417 2418 /* 2419 * Invalid frame filters. 2420 */ 2421 2422 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && 2423 fh->fh_type == FC_TYPE_FCP) { 2424 /* Drop FCP data. We dont this in L2 path */ 2425 kfree_skb(skb); 2426 return; 2427 } 2428 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && 2429 fh->fh_type == FC_TYPE_ELS) { 2430 switch (fc_frame_payload_op(fp)) { 2431 case ELS_LOGO: 2432 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { 2433 /* drop non-FIP LOGO */ 2434 kfree_skb(skb); 2435 return; 2436 } 2437 break; 2438 } 2439 } 2440 2441 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { 2442 /* Drop incoming ABTS */ 2443 kfree_skb(skb); 2444 return; 2445 } 2446 2447 if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { 2448 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, 2449 "FC frame d_id mismatch with MAC %pM.\n", dest_mac); 2450 kfree_skb(skb); 2451 return; 2452 } 2453 2454 if (qedf->ctlr.state) { 2455 if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) { 2456 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, 2457 "Wrong source address: mac:%pM dest_addr:%pM.\n", 2458 mac, qedf->ctlr.dest_addr); 2459 kfree_skb(skb); 2460 return; 2461 } 2462 } 2463 2464 vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); 2465 2466 /* 2467 * If the destination ID from the frame header does not match what we 2468 * have on record for lport and the search for a NPIV port came up 2469 * empty then this is not addressed to our port so simply drop it. 2470 */ 2471 if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { 2472 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, 2473 "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n", 2474 lport->port_id, ntoh24(fh->fh_d_id)); 2475 kfree_skb(skb); 2476 return; 2477 } 2478 2479 f_ctl = ntoh24(fh->fh_f_ctl); 2480 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && 2481 (f_ctl & FC_FC_EX_CTX)) { 2482 /* Drop incoming ABTS response that has both SEQ/EX CTX set */ 2483 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, 2484 "Dropping ABTS response as both SEQ/EX CTX set.\n"); 2485 kfree_skb(skb); 2486 return; 2487 } 2488 2489 /* 2490 * If a connection is uploading, drop incoming FCoE frames as there 2491 * is a small window where we could try to return a frame while libfc 2492 * is trying to clean things up. 2493 */ 2494 2495 /* Get fcport associated with d_id if it exists */ 2496 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); 2497 2498 if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION, 2499 &fcport->flags)) { 2500 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, 2501 "Connection uploading, dropping fp=%p.\n", fp); 2502 kfree_skb(skb); 2503 return; 2504 } 2505 2506 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: " 2507 "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp, 2508 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, 2509 fh->fh_type); 2510 if (qedf_dump_frames) 2511 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, 2512 1, skb->data, skb->len, false); 2513 fc_exch_recv(lport, fp); 2514 } 2515 2516 static void qedf_ll2_process_skb(struct work_struct *work) 2517 { 2518 struct qedf_skb_work *skb_work = 2519 container_of(work, struct qedf_skb_work, work); 2520 struct qedf_ctx *qedf = skb_work->qedf; 2521 struct sk_buff *skb = skb_work->skb; 2522 struct ethhdr *eh; 2523 2524 if (!qedf) { 2525 QEDF_ERR(NULL, "qedf is NULL\n"); 2526 goto err_out; 2527 } 2528 2529 eh = (struct ethhdr *)skb->data; 2530 2531 /* Undo VLAN encapsulation */ 2532 if (eh->h_proto == htons(ETH_P_8021Q)) { 2533 memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); 2534 eh = skb_pull(skb, VLAN_HLEN); 2535 skb_reset_mac_header(skb); 2536 } 2537 2538 /* 2539 * Process either a FIP frame or FCoE frame based on the 2540 * protocol value. If it's not either just drop the 2541 * frame. 2542 */ 2543 if (eh->h_proto == htons(ETH_P_FIP)) { 2544 qedf_fip_recv(qedf, skb); 2545 goto out; 2546 } else if (eh->h_proto == htons(ETH_P_FCOE)) { 2547 __skb_pull(skb, ETH_HLEN); 2548 qedf_recv_frame(qedf, skb); 2549 goto out; 2550 } else 2551 goto err_out; 2552 2553 err_out: 2554 kfree_skb(skb); 2555 out: 2556 kfree(skb_work); 2557 return; 2558 } 2559 2560 static int qedf_ll2_rx(void *cookie, struct sk_buff *skb, 2561 u32 arg1, u32 arg2) 2562 { 2563 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie; 2564 struct qedf_skb_work *skb_work; 2565 2566 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { 2567 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, 2568 "Dropping frame as link state is down.\n"); 2569 kfree_skb(skb); 2570 return 0; 2571 } 2572 2573 skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC); 2574 if (!skb_work) { 2575 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " 2576 "dropping frame.\n"); 2577 kfree_skb(skb); 2578 return 0; 2579 } 2580 2581 INIT_WORK(&skb_work->work, qedf_ll2_process_skb); 2582 skb_work->skb = skb; 2583 skb_work->qedf = qedf; 2584 queue_work(qedf->ll2_recv_wq, &skb_work->work); 2585 2586 return 0; 2587 } 2588 2589 static struct qed_ll2_cb_ops qedf_ll2_cb_ops = { 2590 .rx_cb = qedf_ll2_rx, 2591 .tx_cb = NULL, 2592 }; 2593 2594 /* Main thread to process I/O completions */ 2595 void qedf_fp_io_handler(struct work_struct *work) 2596 { 2597 struct qedf_io_work *io_work = 2598 container_of(work, struct qedf_io_work, work); 2599 u32 comp_type; 2600 2601 /* 2602 * Deferred part of unsolicited CQE sends 2603 * frame to libfc. 2604 */ 2605 comp_type = (io_work->cqe.cqe_data >> 2606 FCOE_CQE_CQE_TYPE_SHIFT) & 2607 FCOE_CQE_CQE_TYPE_MASK; 2608 if (comp_type == FCOE_UNSOLIC_CQE_TYPE && 2609 io_work->fp) 2610 fc_exch_recv(io_work->qedf->lport, io_work->fp); 2611 else 2612 qedf_process_cqe(io_work->qedf, &io_work->cqe); 2613 2614 kfree(io_work); 2615 } 2616 2617 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, 2618 struct qed_sb_info *sb_info, u16 sb_id) 2619 { 2620 struct status_block_e4 *sb_virt; 2621 dma_addr_t sb_phys; 2622 int ret; 2623 2624 sb_virt = dma_alloc_coherent(&qedf->pdev->dev, 2625 sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL); 2626 2627 if (!sb_virt) { 2628 QEDF_ERR(&qedf->dbg_ctx, 2629 "Status block allocation failed for id = %d.\n", 2630 sb_id); 2631 return -ENOMEM; 2632 } 2633 2634 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys, 2635 sb_id, QED_SB_TYPE_STORAGE); 2636 2637 if (ret) { 2638 QEDF_ERR(&qedf->dbg_ctx, 2639 "Status block initialization failed (0x%x) for id = %d.\n", 2640 ret, sb_id); 2641 return ret; 2642 } 2643 2644 return 0; 2645 } 2646 2647 static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info) 2648 { 2649 if (sb_info->sb_virt) 2650 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt), 2651 (void *)sb_info->sb_virt, sb_info->sb_phys); 2652 } 2653 2654 static void qedf_destroy_sb(struct qedf_ctx *qedf) 2655 { 2656 int id; 2657 struct qedf_fastpath *fp = NULL; 2658 2659 for (id = 0; id < qedf->num_queues; id++) { 2660 fp = &(qedf->fp_array[id]); 2661 if (fp->sb_id == QEDF_SB_ID_NULL) 2662 break; 2663 qedf_free_sb(qedf, fp->sb_info); 2664 kfree(fp->sb_info); 2665 } 2666 kfree(qedf->fp_array); 2667 } 2668 2669 static int qedf_prepare_sb(struct qedf_ctx *qedf) 2670 { 2671 int id; 2672 struct qedf_fastpath *fp; 2673 int ret; 2674 2675 qedf->fp_array = 2676 kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath), 2677 GFP_KERNEL); 2678 2679 if (!qedf->fp_array) { 2680 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation " 2681 "failed.\n"); 2682 return -ENOMEM; 2683 } 2684 2685 for (id = 0; id < qedf->num_queues; id++) { 2686 fp = &(qedf->fp_array[id]); 2687 fp->sb_id = QEDF_SB_ID_NULL; 2688 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); 2689 if (!fp->sb_info) { 2690 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct " 2691 "allocation failed.\n"); 2692 goto err; 2693 } 2694 ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id); 2695 if (ret) { 2696 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and " 2697 "initialization failed.\n"); 2698 goto err; 2699 } 2700 fp->sb_id = id; 2701 fp->qedf = qedf; 2702 fp->cq_num_entries = 2703 qedf->global_queues[id]->cq_mem_size / 2704 sizeof(struct fcoe_cqe); 2705 } 2706 err: 2707 return 0; 2708 } 2709 2710 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) 2711 { 2712 u16 xid; 2713 struct qedf_ioreq *io_req; 2714 struct qedf_rport *fcport; 2715 u32 comp_type; 2716 2717 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & 2718 FCOE_CQE_CQE_TYPE_MASK; 2719 2720 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; 2721 io_req = &qedf->cmd_mgr->cmds[xid]; 2722 2723 /* Completion not for a valid I/O anymore so just return */ 2724 if (!io_req) { 2725 QEDF_ERR(&qedf->dbg_ctx, 2726 "io_req is NULL for xid=0x%x.\n", xid); 2727 return; 2728 } 2729 2730 fcport = io_req->fcport; 2731 2732 if (fcport == NULL) { 2733 QEDF_ERR(&qedf->dbg_ctx, 2734 "fcport is NULL for xid=0x%x io_req=%p.\n", 2735 xid, io_req); 2736 return; 2737 } 2738 2739 /* 2740 * Check that fcport is offloaded. If it isn't then the spinlock 2741 * isn't valid and shouldn't be taken. We should just return. 2742 */ 2743 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { 2744 QEDF_ERR(&qedf->dbg_ctx, 2745 "Session not offloaded yet, fcport = %p.\n", fcport); 2746 return; 2747 } 2748 2749 2750 switch (comp_type) { 2751 case FCOE_GOOD_COMPLETION_CQE_TYPE: 2752 atomic_inc(&fcport->free_sqes); 2753 switch (io_req->cmd_type) { 2754 case QEDF_SCSI_CMD: 2755 qedf_scsi_completion(qedf, cqe, io_req); 2756 break; 2757 case QEDF_ELS: 2758 qedf_process_els_compl(qedf, cqe, io_req); 2759 break; 2760 case QEDF_TASK_MGMT_CMD: 2761 qedf_process_tmf_compl(qedf, cqe, io_req); 2762 break; 2763 case QEDF_SEQ_CLEANUP: 2764 qedf_process_seq_cleanup_compl(qedf, cqe, io_req); 2765 break; 2766 } 2767 break; 2768 case FCOE_ERROR_DETECTION_CQE_TYPE: 2769 atomic_inc(&fcport->free_sqes); 2770 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 2771 "Error detect CQE.\n"); 2772 qedf_process_error_detect(qedf, cqe, io_req); 2773 break; 2774 case FCOE_EXCH_CLEANUP_CQE_TYPE: 2775 atomic_inc(&fcport->free_sqes); 2776 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 2777 "Cleanup CQE.\n"); 2778 qedf_process_cleanup_compl(qedf, cqe, io_req); 2779 break; 2780 case FCOE_ABTS_CQE_TYPE: 2781 atomic_inc(&fcport->free_sqes); 2782 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 2783 "Abort CQE.\n"); 2784 qedf_process_abts_compl(qedf, cqe, io_req); 2785 break; 2786 case FCOE_DUMMY_CQE_TYPE: 2787 atomic_inc(&fcport->free_sqes); 2788 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 2789 "Dummy CQE.\n"); 2790 break; 2791 case FCOE_LOCAL_COMP_CQE_TYPE: 2792 atomic_inc(&fcport->free_sqes); 2793 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 2794 "Local completion CQE.\n"); 2795 break; 2796 case FCOE_WARNING_CQE_TYPE: 2797 atomic_inc(&fcport->free_sqes); 2798 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 2799 "Warning CQE.\n"); 2800 qedf_process_warning_compl(qedf, cqe, io_req); 2801 break; 2802 case MAX_FCOE_CQE_TYPE: 2803 atomic_inc(&fcport->free_sqes); 2804 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 2805 "Max FCoE CQE.\n"); 2806 break; 2807 default: 2808 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, 2809 "Default CQE.\n"); 2810 break; 2811 } 2812 } 2813 2814 static void qedf_free_bdq(struct qedf_ctx *qedf) 2815 { 2816 int i; 2817 2818 if (qedf->bdq_pbl_list) 2819 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, 2820 qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma); 2821 2822 if (qedf->bdq_pbl) 2823 dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size, 2824 qedf->bdq_pbl, qedf->bdq_pbl_dma); 2825 2826 for (i = 0; i < QEDF_BDQ_SIZE; i++) { 2827 if (qedf->bdq[i].buf_addr) { 2828 dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE, 2829 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma); 2830 } 2831 } 2832 } 2833 2834 static void qedf_free_global_queues(struct qedf_ctx *qedf) 2835 { 2836 int i; 2837 struct global_queue **gl = qedf->global_queues; 2838 2839 for (i = 0; i < qedf->num_queues; i++) { 2840 if (!gl[i]) 2841 continue; 2842 2843 if (gl[i]->cq) 2844 dma_free_coherent(&qedf->pdev->dev, 2845 gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); 2846 if (gl[i]->cq_pbl) 2847 dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size, 2848 gl[i]->cq_pbl, gl[i]->cq_pbl_dma); 2849 2850 kfree(gl[i]); 2851 } 2852 2853 qedf_free_bdq(qedf); 2854 } 2855 2856 static int qedf_alloc_bdq(struct qedf_ctx *qedf) 2857 { 2858 int i; 2859 struct scsi_bd *pbl; 2860 u64 *list; 2861 dma_addr_t page; 2862 2863 /* Alloc dma memory for BDQ buffers */ 2864 for (i = 0; i < QEDF_BDQ_SIZE; i++) { 2865 qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev, 2866 QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL); 2867 if (!qedf->bdq[i].buf_addr) { 2868 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ " 2869 "buffer %d.\n", i); 2870 return -ENOMEM; 2871 } 2872 } 2873 2874 /* Alloc dma memory for BDQ page buffer list */ 2875 qedf->bdq_pbl_mem_size = 2876 QEDF_BDQ_SIZE * sizeof(struct scsi_bd); 2877 qedf->bdq_pbl_mem_size = 2878 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE); 2879 2880 qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev, 2881 qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL); 2882 if (!qedf->bdq_pbl) { 2883 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n"); 2884 return -ENOMEM; 2885 } 2886 2887 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 2888 "BDQ PBL addr=0x%p dma=%pad\n", 2889 qedf->bdq_pbl, &qedf->bdq_pbl_dma); 2890 2891 /* 2892 * Populate BDQ PBL with physical and virtual address of individual 2893 * BDQ buffers 2894 */ 2895 pbl = (struct scsi_bd *)qedf->bdq_pbl; 2896 for (i = 0; i < QEDF_BDQ_SIZE; i++) { 2897 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma)); 2898 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma)); 2899 pbl->opaque.fcoe_opaque.hi = 0; 2900 /* Opaque lo data is an index into the BDQ array */ 2901 pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i); 2902 pbl++; 2903 } 2904 2905 /* Allocate list of PBL pages */ 2906 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, 2907 QEDF_PAGE_SIZE, 2908 &qedf->bdq_pbl_list_dma, 2909 GFP_KERNEL); 2910 if (!qedf->bdq_pbl_list) { 2911 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); 2912 return -ENOMEM; 2913 } 2914 2915 /* 2916 * Now populate PBL list with pages that contain pointers to the 2917 * individual buffers. 2918 */ 2919 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / 2920 QEDF_PAGE_SIZE; 2921 list = (u64 *)qedf->bdq_pbl_list; 2922 page = qedf->bdq_pbl_list_dma; 2923 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { 2924 *list = qedf->bdq_pbl_dma; 2925 list++; 2926 page += QEDF_PAGE_SIZE; 2927 } 2928 2929 return 0; 2930 } 2931 2932 static int qedf_alloc_global_queues(struct qedf_ctx *qedf) 2933 { 2934 u32 *list; 2935 int i; 2936 int status = 0, rc; 2937 u32 *pbl; 2938 dma_addr_t page; 2939 int num_pages; 2940 2941 /* Allocate and map CQs, RQs */ 2942 /* 2943 * Number of global queues (CQ / RQ). This should 2944 * be <= number of available MSIX vectors for the PF 2945 */ 2946 if (!qedf->num_queues) { 2947 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); 2948 return 1; 2949 } 2950 2951 /* 2952 * Make sure we allocated the PBL that will contain the physical 2953 * addresses of our queues 2954 */ 2955 if (!qedf->p_cpuq) { 2956 status = 1; 2957 QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); 2958 goto mem_alloc_failure; 2959 } 2960 2961 qedf->global_queues = kzalloc((sizeof(struct global_queue *) 2962 * qedf->num_queues), GFP_KERNEL); 2963 if (!qedf->global_queues) { 2964 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global " 2965 "queues array ptr memory\n"); 2966 return -ENOMEM; 2967 } 2968 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 2969 "qedf->global_queues=%p.\n", qedf->global_queues); 2970 2971 /* Allocate DMA coherent buffers for BDQ */ 2972 rc = qedf_alloc_bdq(qedf); 2973 if (rc) { 2974 QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n"); 2975 goto mem_alloc_failure; 2976 } 2977 2978 /* Allocate a CQ and an associated PBL for each MSI-X vector */ 2979 for (i = 0; i < qedf->num_queues; i++) { 2980 qedf->global_queues[i] = kzalloc(sizeof(struct global_queue), 2981 GFP_KERNEL); 2982 if (!qedf->global_queues[i]) { 2983 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate " 2984 "global queue %d.\n", i); 2985 status = -ENOMEM; 2986 goto mem_alloc_failure; 2987 } 2988 2989 qedf->global_queues[i]->cq_mem_size = 2990 FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); 2991 qedf->global_queues[i]->cq_mem_size = 2992 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE); 2993 2994 qedf->global_queues[i]->cq_pbl_size = 2995 (qedf->global_queues[i]->cq_mem_size / 2996 PAGE_SIZE) * sizeof(void *); 2997 qedf->global_queues[i]->cq_pbl_size = 2998 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); 2999 3000 qedf->global_queues[i]->cq = 3001 dma_alloc_coherent(&qedf->pdev->dev, 3002 qedf->global_queues[i]->cq_mem_size, 3003 &qedf->global_queues[i]->cq_dma, 3004 GFP_KERNEL); 3005 3006 if (!qedf->global_queues[i]->cq) { 3007 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); 3008 status = -ENOMEM; 3009 goto mem_alloc_failure; 3010 } 3011 3012 qedf->global_queues[i]->cq_pbl = 3013 dma_alloc_coherent(&qedf->pdev->dev, 3014 qedf->global_queues[i]->cq_pbl_size, 3015 &qedf->global_queues[i]->cq_pbl_dma, 3016 GFP_KERNEL); 3017 3018 if (!qedf->global_queues[i]->cq_pbl) { 3019 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); 3020 status = -ENOMEM; 3021 goto mem_alloc_failure; 3022 } 3023 3024 /* Create PBL */ 3025 num_pages = qedf->global_queues[i]->cq_mem_size / 3026 QEDF_PAGE_SIZE; 3027 page = qedf->global_queues[i]->cq_dma; 3028 pbl = (u32 *)qedf->global_queues[i]->cq_pbl; 3029 3030 while (num_pages--) { 3031 *pbl = U64_LO(page); 3032 pbl++; 3033 *pbl = U64_HI(page); 3034 pbl++; 3035 page += QEDF_PAGE_SIZE; 3036 } 3037 /* Set the initial consumer index for cq */ 3038 qedf->global_queues[i]->cq_cons_idx = 0; 3039 } 3040 3041 list = (u32 *)qedf->p_cpuq; 3042 3043 /* 3044 * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer, 3045 * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points 3046 * to the physical address which contains an array of pointers to 3047 * the physical addresses of the specific queue pages. 3048 */ 3049 for (i = 0; i < qedf->num_queues; i++) { 3050 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma); 3051 list++; 3052 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma); 3053 list++; 3054 *list = U64_LO(0); 3055 list++; 3056 *list = U64_HI(0); 3057 list++; 3058 } 3059 3060 return 0; 3061 3062 mem_alloc_failure: 3063 qedf_free_global_queues(qedf); 3064 return status; 3065 } 3066 3067 static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf) 3068 { 3069 u8 sq_num_pbl_pages; 3070 u32 sq_mem_size; 3071 u32 cq_mem_size; 3072 u32 cq_num_entries; 3073 int rval; 3074 3075 /* 3076 * The number of completion queues/fastpath interrupts/status blocks 3077 * we allocation is the minimum off: 3078 * 3079 * Number of CPUs 3080 * Number allocated by qed for our PCI function 3081 */ 3082 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); 3083 3084 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 3085 qedf->num_queues); 3086 3087 qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev, 3088 qedf->num_queues * sizeof(struct qedf_glbl_q_params), 3089 &qedf->hw_p_cpuq, GFP_KERNEL); 3090 3091 if (!qedf->p_cpuq) { 3092 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n"); 3093 return 1; 3094 } 3095 3096 rval = qedf_alloc_global_queues(qedf); 3097 if (rval) { 3098 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation " 3099 "failed.\n"); 3100 return 1; 3101 } 3102 3103 /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */ 3104 sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); 3105 sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE); 3106 sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE); 3107 3108 /* Calculate CQ num entries */ 3109 cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe); 3110 cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE); 3111 cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe); 3112 3113 memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params)); 3114 3115 /* Setup the value for fcoe PF */ 3116 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS; 3117 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS; 3118 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr = 3119 (u64)qedf->hw_p_cpuq; 3120 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages; 3121 3122 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0; 3123 3124 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries; 3125 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues; 3126 3127 /* log_page_size: 12 for 4KB pages */ 3128 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE); 3129 3130 qedf->pf_params.fcoe_pf_params.mtu = 9000; 3131 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI; 3132 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI; 3133 3134 /* BDQ address and size */ 3135 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] = 3136 qedf->bdq_pbl_list_dma; 3137 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] = 3138 qedf->bdq_pbl_list_num_entries; 3139 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE; 3140 3141 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 3142 "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n", 3143 qedf->bdq_pbl_list, 3144 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0], 3145 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]); 3146 3147 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 3148 "cq_num_entries=%d.\n", 3149 qedf->pf_params.fcoe_pf_params.cq_num_entries); 3150 3151 return 0; 3152 } 3153 3154 /* Free DMA coherent memory for array of queue pointers we pass to qed */ 3155 static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf) 3156 { 3157 size_t size = 0; 3158 3159 if (qedf->p_cpuq) { 3160 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); 3161 dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq, 3162 qedf->hw_p_cpuq); 3163 } 3164 3165 qedf_free_global_queues(qedf); 3166 3167 kfree(qedf->global_queues); 3168 } 3169 3170 /* 3171 * PCI driver functions 3172 */ 3173 3174 static const struct pci_device_id qedf_pci_tbl[] = { 3175 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) }, 3176 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) }, 3177 {0} 3178 }; 3179 MODULE_DEVICE_TABLE(pci, qedf_pci_tbl); 3180 3181 static struct pci_driver qedf_pci_driver = { 3182 .name = QEDF_MODULE_NAME, 3183 .id_table = qedf_pci_tbl, 3184 .probe = qedf_probe, 3185 .remove = qedf_remove, 3186 .shutdown = qedf_shutdown, 3187 }; 3188 3189 static int __qedf_probe(struct pci_dev *pdev, int mode) 3190 { 3191 int rc = -EINVAL; 3192 struct fc_lport *lport; 3193 struct qedf_ctx *qedf = NULL; 3194 struct Scsi_Host *host; 3195 bool is_vf = false; 3196 struct qed_ll2_params params; 3197 char host_buf[20]; 3198 struct qed_link_params link_params; 3199 int status; 3200 void *task_start, *task_end; 3201 struct qed_slowpath_params slowpath_params; 3202 struct qed_probe_params qed_params; 3203 u16 tmp; 3204 3205 /* 3206 * When doing error recovery we didn't reap the lport so don't try 3207 * to reallocate it. 3208 */ 3209 if (mode != QEDF_MODE_RECOVERY) { 3210 lport = libfc_host_alloc(&qedf_host_template, 3211 sizeof(struct qedf_ctx)); 3212 3213 if (!lport) { 3214 QEDF_ERR(NULL, "Could not allocate lport.\n"); 3215 rc = -ENOMEM; 3216 goto err0; 3217 } 3218 3219 fc_disc_init(lport); 3220 3221 /* Initialize qedf_ctx */ 3222 qedf = lport_priv(lport); 3223 set_bit(QEDF_PROBING, &qedf->flags); 3224 qedf->lport = lport; 3225 qedf->ctlr.lp = lport; 3226 qedf->pdev = pdev; 3227 qedf->dbg_ctx.pdev = pdev; 3228 qedf->dbg_ctx.host_no = lport->host->host_no; 3229 spin_lock_init(&qedf->hba_lock); 3230 INIT_LIST_HEAD(&qedf->fcports); 3231 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1; 3232 atomic_set(&qedf->num_offloads, 0); 3233 qedf->stop_io_on_error = false; 3234 pci_set_drvdata(pdev, qedf); 3235 init_completion(&qedf->fipvlan_compl); 3236 mutex_init(&qedf->stats_mutex); 3237 mutex_init(&qedf->flush_mutex); 3238 qedf->flogi_pending = 0; 3239 3240 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, 3241 "QLogic FastLinQ FCoE Module qedf %s, " 3242 "FW %d.%d.%d.%d\n", QEDF_VERSION, 3243 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 3244 FW_ENGINEERING_VERSION); 3245 } else { 3246 /* Init pointers during recovery */ 3247 qedf = pci_get_drvdata(pdev); 3248 set_bit(QEDF_PROBING, &qedf->flags); 3249 lport = qedf->lport; 3250 } 3251 3252 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n"); 3253 3254 host = lport->host; 3255 3256 /* Allocate mempool for qedf_io_work structs */ 3257 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN, 3258 qedf_io_work_cache); 3259 if (qedf->io_mempool == NULL) { 3260 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n"); 3261 goto err1; 3262 } 3263 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", 3264 qedf->io_mempool); 3265 3266 sprintf(host_buf, "qedf_%u_link", 3267 qedf->lport->host->host_no); 3268 qedf->link_update_wq = create_workqueue(host_buf); 3269 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); 3270 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); 3271 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); 3272 INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work); 3273 qedf->fipvlan_retries = qedf_fipvlan_retries; 3274 /* Set a default prio in case DCBX doesn't converge */ 3275 if (qedf_default_prio > -1) { 3276 /* 3277 * This is the case where we pass a modparam in so we want to 3278 * honor it even if dcbx doesn't converge. 3279 */ 3280 qedf->prio = qedf_default_prio; 3281 } else 3282 qedf->prio = QEDF_DEFAULT_PRIO; 3283 3284 /* 3285 * Common probe. Takes care of basic hardware init and pci_* 3286 * functions. 3287 */ 3288 memset(&qed_params, 0, sizeof(qed_params)); 3289 qed_params.protocol = QED_PROTOCOL_FCOE; 3290 qed_params.dp_module = qedf_dp_module; 3291 qed_params.dp_level = qedf_dp_level; 3292 qed_params.is_vf = is_vf; 3293 qedf->cdev = qed_ops->common->probe(pdev, &qed_params); 3294 if (!qedf->cdev) { 3295 QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n"); 3296 rc = -ENODEV; 3297 goto err1; 3298 } 3299 3300 /* Learn information crucial for qedf to progress */ 3301 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); 3302 if (rc) { 3303 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); 3304 goto err1; 3305 } 3306 3307 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, 3308 "dev_info: num_hwfns=%d affin_hwfn_idx=%d.\n", 3309 qedf->dev_info.common.num_hwfns, 3310 qed_ops->common->get_affin_hwfn_idx(qedf->cdev)); 3311 3312 /* queue allocation code should come here 3313 * order should be 3314 * slowpath_start 3315 * status block allocation 3316 * interrupt registration (to get min number of queues) 3317 * set_fcoe_pf_param 3318 * qed_sp_fcoe_func_start 3319 */ 3320 rc = qedf_set_fcoe_pf_param(qedf); 3321 if (rc) { 3322 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n"); 3323 goto err2; 3324 } 3325 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 3326 3327 /* Learn information crucial for qedf to progress */ 3328 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); 3329 if (rc) { 3330 QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n"); 3331 goto err2; 3332 } 3333 3334 /* Record BDQ producer doorbell addresses */ 3335 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; 3336 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; 3337 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 3338 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod, 3339 qedf->bdq_secondary_prod); 3340 3341 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf); 3342 3343 rc = qedf_prepare_sb(qedf); 3344 if (rc) { 3345 3346 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); 3347 goto err2; 3348 } 3349 3350 /* Start the Slowpath-process */ 3351 slowpath_params.int_mode = QED_INT_MODE_MSIX; 3352 slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER; 3353 slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER; 3354 slowpath_params.drv_rev = QEDF_DRIVER_REV_VER; 3355 slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER; 3356 strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE); 3357 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); 3358 if (rc) { 3359 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); 3360 goto err2; 3361 } 3362 3363 /* 3364 * update_pf_params needs to be called before and after slowpath 3365 * start 3366 */ 3367 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 3368 3369 /* Setup interrupts */ 3370 rc = qedf_setup_int(qedf); 3371 if (rc) { 3372 QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n"); 3373 goto err3; 3374 } 3375 3376 rc = qed_ops->start(qedf->cdev, &qedf->tasks); 3377 if (rc) { 3378 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n"); 3379 goto err4; 3380 } 3381 task_start = qedf_get_task_mem(&qedf->tasks, 0); 3382 task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1); 3383 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, " 3384 "end=%p block_size=%u.\n", task_start, task_end, 3385 qedf->tasks.size); 3386 3387 /* 3388 * We need to write the number of BDs in the BDQ we've preallocated so 3389 * the f/w will do a prefetch and we'll get an unsolicited CQE when a 3390 * packet arrives. 3391 */ 3392 qedf->bdq_prod_idx = QEDF_BDQ_SIZE; 3393 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 3394 "Writing %d to primary and secondary BDQ doorbell registers.\n", 3395 qedf->bdq_prod_idx); 3396 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); 3397 tmp = readw(qedf->bdq_primary_prod); 3398 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); 3399 tmp = readw(qedf->bdq_secondary_prod); 3400 3401 qed_ops->common->set_power_state(qedf->cdev, PCI_D0); 3402 3403 /* Now that the dev_info struct has been filled in set the MAC 3404 * address 3405 */ 3406 ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac); 3407 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n", 3408 qedf->mac); 3409 3410 /* 3411 * Set the WWNN and WWPN in the following way: 3412 * 3413 * If the info we get from qed is non-zero then use that to set the 3414 * WWPN and WWNN. Otherwise fall back to use fcoe_wwn_from_mac() based 3415 * on the MAC address. 3416 */ 3417 if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) { 3418 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 3419 "Setting WWPN and WWNN from qed dev_info.\n"); 3420 qedf->wwnn = qedf->dev_info.wwnn; 3421 qedf->wwpn = qedf->dev_info.wwpn; 3422 } else { 3423 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 3424 "Setting WWPN and WWNN using fcoe_wwn_from_mac().\n"); 3425 qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); 3426 qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); 3427 } 3428 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx " 3429 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); 3430 3431 sprintf(host_buf, "host_%d", host->host_no); 3432 qed_ops->common->set_name(qedf->cdev, host_buf); 3433 3434 /* Allocate cmd mgr */ 3435 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); 3436 if (!qedf->cmd_mgr) { 3437 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n"); 3438 rc = -ENOMEM; 3439 goto err5; 3440 } 3441 3442 if (mode != QEDF_MODE_RECOVERY) { 3443 host->transportt = qedf_fc_transport_template; 3444 host->max_lun = qedf_max_lun; 3445 host->max_cmd_len = QEDF_MAX_CDB_LEN; 3446 host->can_queue = FCOE_PARAMS_NUM_TASKS; 3447 rc = scsi_add_host(host, &pdev->dev); 3448 if (rc) { 3449 QEDF_WARN(&qedf->dbg_ctx, 3450 "Error adding Scsi_Host rc=0x%x.\n", rc); 3451 goto err6; 3452 } 3453 } 3454 3455 memset(¶ms, 0, sizeof(params)); 3456 params.mtu = QEDF_LL2_BUF_SIZE; 3457 ether_addr_copy(params.ll2_mac_address, qedf->mac); 3458 3459 /* Start LL2 processing thread */ 3460 snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); 3461 qedf->ll2_recv_wq = 3462 create_workqueue(host_buf); 3463 if (!qedf->ll2_recv_wq) { 3464 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); 3465 rc = -ENOMEM; 3466 goto err7; 3467 } 3468 3469 #ifdef CONFIG_DEBUG_FS 3470 qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops, 3471 qedf_dbg_fops); 3472 #endif 3473 3474 /* Start LL2 */ 3475 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf); 3476 rc = qed_ops->ll2->start(qedf->cdev, ¶ms); 3477 if (rc) { 3478 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n"); 3479 goto err7; 3480 } 3481 set_bit(QEDF_LL2_STARTED, &qedf->flags); 3482 3483 /* Set initial FIP/FCoE VLAN to NULL */ 3484 qedf->vlan_id = 0; 3485 3486 /* 3487 * No need to setup fcoe_ctlr or fc_lport objects during recovery since 3488 * they were not reaped during the unload process. 3489 */ 3490 if (mode != QEDF_MODE_RECOVERY) { 3491 /* Setup imbedded fcoe controller */ 3492 qedf_fcoe_ctlr_setup(qedf); 3493 3494 /* Setup lport */ 3495 rc = qedf_lport_setup(qedf); 3496 if (rc) { 3497 QEDF_ERR(&(qedf->dbg_ctx), 3498 "qedf_lport_setup failed.\n"); 3499 goto err7; 3500 } 3501 } 3502 3503 sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); 3504 qedf->timer_work_queue = 3505 create_workqueue(host_buf); 3506 if (!qedf->timer_work_queue) { 3507 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " 3508 "workqueue.\n"); 3509 rc = -ENOMEM; 3510 goto err7; 3511 } 3512 3513 /* DPC workqueue is not reaped during recovery unload */ 3514 if (mode != QEDF_MODE_RECOVERY) { 3515 sprintf(host_buf, "qedf_%u_dpc", 3516 qedf->lport->host->host_no); 3517 qedf->dpc_wq = create_workqueue(host_buf); 3518 } 3519 INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); 3520 3521 /* 3522 * GRC dump and sysfs parameters are not reaped during the recovery 3523 * unload process. 3524 */ 3525 if (mode != QEDF_MODE_RECOVERY) { 3526 qedf->grcdump_size = 3527 qed_ops->common->dbg_all_data_size(qedf->cdev); 3528 if (qedf->grcdump_size) { 3529 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump, 3530 qedf->grcdump_size); 3531 if (rc) { 3532 QEDF_ERR(&(qedf->dbg_ctx), 3533 "GRC Dump buffer alloc failed.\n"); 3534 qedf->grcdump = NULL; 3535 } 3536 3537 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, 3538 "grcdump: addr=%p, size=%u.\n", 3539 qedf->grcdump, qedf->grcdump_size); 3540 } 3541 qedf_create_sysfs_ctx_attr(qedf); 3542 3543 /* Initialize I/O tracing for this adapter */ 3544 spin_lock_init(&qedf->io_trace_lock); 3545 qedf->io_trace_idx = 0; 3546 } 3547 3548 init_completion(&qedf->flogi_compl); 3549 3550 status = qed_ops->common->update_drv_state(qedf->cdev, true); 3551 if (status) 3552 QEDF_ERR(&(qedf->dbg_ctx), 3553 "Failed to send drv state to MFW.\n"); 3554 3555 memset(&link_params, 0, sizeof(struct qed_link_params)); 3556 link_params.link_up = true; 3557 status = qed_ops->common->set_link(qedf->cdev, &link_params); 3558 if (status) 3559 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n"); 3560 3561 /* Start/restart discovery */ 3562 if (mode == QEDF_MODE_RECOVERY) 3563 fcoe_ctlr_link_up(&qedf->ctlr); 3564 else 3565 fc_fabric_login(lport); 3566 3567 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); 3568 3569 clear_bit(QEDF_PROBING, &qedf->flags); 3570 3571 /* All good */ 3572 return 0; 3573 3574 err7: 3575 if (qedf->ll2_recv_wq) 3576 destroy_workqueue(qedf->ll2_recv_wq); 3577 fc_remove_host(qedf->lport->host); 3578 scsi_remove_host(qedf->lport->host); 3579 #ifdef CONFIG_DEBUG_FS 3580 qedf_dbg_host_exit(&(qedf->dbg_ctx)); 3581 #endif 3582 err6: 3583 qedf_cmd_mgr_free(qedf->cmd_mgr); 3584 err5: 3585 qed_ops->stop(qedf->cdev); 3586 err4: 3587 qedf_free_fcoe_pf_param(qedf); 3588 qedf_sync_free_irqs(qedf); 3589 err3: 3590 qed_ops->common->slowpath_stop(qedf->cdev); 3591 err2: 3592 qed_ops->common->remove(qedf->cdev); 3593 err1: 3594 scsi_host_put(lport->host); 3595 err0: 3596 if (qedf) { 3597 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); 3598 3599 clear_bit(QEDF_PROBING, &qedf->flags); 3600 } 3601 return rc; 3602 } 3603 3604 static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id) 3605 { 3606 return __qedf_probe(pdev, QEDF_MODE_NORMAL); 3607 } 3608 3609 static void __qedf_remove(struct pci_dev *pdev, int mode) 3610 { 3611 struct qedf_ctx *qedf; 3612 int rc; 3613 3614 if (!pdev) { 3615 QEDF_ERR(NULL, "pdev is NULL.\n"); 3616 return; 3617 } 3618 3619 qedf = pci_get_drvdata(pdev); 3620 3621 /* 3622 * Prevent race where we're in board disable work and then try to 3623 * rmmod the module. 3624 */ 3625 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { 3626 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n"); 3627 return; 3628 } 3629 3630 if (mode != QEDF_MODE_RECOVERY) 3631 set_bit(QEDF_UNLOADING, &qedf->flags); 3632 3633 /* Logoff the fabric to upload all connections */ 3634 if (mode == QEDF_MODE_RECOVERY) 3635 fcoe_ctlr_link_down(&qedf->ctlr); 3636 else 3637 fc_fabric_logoff(qedf->lport); 3638 3639 if (qedf_wait_for_upload(qedf) == false) 3640 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); 3641 3642 #ifdef CONFIG_DEBUG_FS 3643 qedf_dbg_host_exit(&(qedf->dbg_ctx)); 3644 #endif 3645 3646 /* Stop any link update handling */ 3647 cancel_delayed_work_sync(&qedf->link_update); 3648 destroy_workqueue(qedf->link_update_wq); 3649 qedf->link_update_wq = NULL; 3650 3651 if (qedf->timer_work_queue) 3652 destroy_workqueue(qedf->timer_work_queue); 3653 3654 /* Stop Light L2 */ 3655 clear_bit(QEDF_LL2_STARTED, &qedf->flags); 3656 qed_ops->ll2->stop(qedf->cdev); 3657 if (qedf->ll2_recv_wq) 3658 destroy_workqueue(qedf->ll2_recv_wq); 3659 3660 /* Stop fastpath */ 3661 qedf_sync_free_irqs(qedf); 3662 qedf_destroy_sb(qedf); 3663 3664 /* 3665 * During recovery don't destroy OS constructs that represent the 3666 * physical port. 3667 */ 3668 if (mode != QEDF_MODE_RECOVERY) { 3669 qedf_free_grc_dump_buf(&qedf->grcdump); 3670 qedf_remove_sysfs_ctx_attr(qedf); 3671 3672 /* Remove all SCSI/libfc/libfcoe structures */ 3673 fcoe_ctlr_destroy(&qedf->ctlr); 3674 fc_lport_destroy(qedf->lport); 3675 fc_remove_host(qedf->lport->host); 3676 scsi_remove_host(qedf->lport->host); 3677 } 3678 3679 qedf_cmd_mgr_free(qedf->cmd_mgr); 3680 3681 if (mode != QEDF_MODE_RECOVERY) { 3682 fc_exch_mgr_free(qedf->lport); 3683 fc_lport_free_stats(qedf->lport); 3684 3685 /* Wait for all vports to be reaped */ 3686 qedf_wait_for_vport_destroy(qedf); 3687 } 3688 3689 /* 3690 * Now that all connections have been uploaded we can stop the 3691 * rest of the qed operations 3692 */ 3693 qed_ops->stop(qedf->cdev); 3694 3695 if (mode != QEDF_MODE_RECOVERY) { 3696 if (qedf->dpc_wq) { 3697 /* Stop general DPC handling */ 3698 destroy_workqueue(qedf->dpc_wq); 3699 qedf->dpc_wq = NULL; 3700 } 3701 } 3702 3703 /* Final shutdown for the board */ 3704 qedf_free_fcoe_pf_param(qedf); 3705 if (mode != QEDF_MODE_RECOVERY) { 3706 qed_ops->common->set_power_state(qedf->cdev, PCI_D0); 3707 pci_set_drvdata(pdev, NULL); 3708 } 3709 3710 rc = qed_ops->common->update_drv_state(qedf->cdev, false); 3711 if (rc) 3712 QEDF_ERR(&(qedf->dbg_ctx), 3713 "Failed to send drv state to MFW.\n"); 3714 3715 qed_ops->common->slowpath_stop(qedf->cdev); 3716 qed_ops->common->remove(qedf->cdev); 3717 3718 mempool_destroy(qedf->io_mempool); 3719 3720 /* Only reap the Scsi_host on a real removal */ 3721 if (mode != QEDF_MODE_RECOVERY) 3722 scsi_host_put(qedf->lport->host); 3723 } 3724 3725 static void qedf_remove(struct pci_dev *pdev) 3726 { 3727 /* Check to make sure this function wasn't already disabled */ 3728 if (!atomic_read(&pdev->enable_cnt)) 3729 return; 3730 3731 __qedf_remove(pdev, QEDF_MODE_NORMAL); 3732 } 3733 3734 void qedf_wq_grcdump(struct work_struct *work) 3735 { 3736 struct qedf_ctx *qedf = 3737 container_of(work, struct qedf_ctx, grcdump_work.work); 3738 3739 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n"); 3740 qedf_capture_grc_dump(qedf); 3741 } 3742 3743 /* 3744 * Protocol TLV handler 3745 */ 3746 void qedf_get_protocol_tlv_data(void *dev, void *data) 3747 { 3748 struct qedf_ctx *qedf = dev; 3749 struct qed_mfw_tlv_fcoe *fcoe = data; 3750 struct fc_lport *lport; 3751 struct Scsi_Host *host; 3752 struct fc_host_attrs *fc_host; 3753 struct fc_host_statistics *hst; 3754 3755 if (!qedf) { 3756 QEDF_ERR(NULL, "qedf is null.\n"); 3757 return; 3758 } 3759 3760 if (test_bit(QEDF_PROBING, &qedf->flags)) { 3761 QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n"); 3762 return; 3763 } 3764 3765 lport = qedf->lport; 3766 host = lport->host; 3767 fc_host = shost_to_fc_host(host); 3768 3769 /* Force a refresh of the fc_host stats including offload stats */ 3770 hst = qedf_fc_get_host_stats(host); 3771 3772 fcoe->qos_pri_set = true; 3773 fcoe->qos_pri = 3; /* Hard coded to 3 in driver */ 3774 3775 fcoe->ra_tov_set = true; 3776 fcoe->ra_tov = lport->r_a_tov; 3777 3778 fcoe->ed_tov_set = true; 3779 fcoe->ed_tov = lport->e_d_tov; 3780 3781 fcoe->npiv_state_set = true; 3782 fcoe->npiv_state = 1; /* NPIV always enabled */ 3783 3784 fcoe->num_npiv_ids_set = true; 3785 fcoe->num_npiv_ids = fc_host->npiv_vports_inuse; 3786 3787 /* Certain attributes we only want to set if we've selected an FCF */ 3788 if (qedf->ctlr.sel_fcf) { 3789 fcoe->switch_name_set = true; 3790 u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name); 3791 } 3792 3793 fcoe->port_state_set = true; 3794 /* For qedf we're either link down or fabric attach */ 3795 if (lport->link_up) 3796 fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC; 3797 else 3798 fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE; 3799 3800 fcoe->link_failures_set = true; 3801 fcoe->link_failures = (u16)hst->link_failure_count; 3802 3803 fcoe->fcoe_txq_depth_set = true; 3804 fcoe->fcoe_rxq_depth_set = true; 3805 fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS; 3806 fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS; 3807 3808 fcoe->fcoe_rx_frames_set = true; 3809 fcoe->fcoe_rx_frames = hst->rx_frames; 3810 3811 fcoe->fcoe_tx_frames_set = true; 3812 fcoe->fcoe_tx_frames = hst->tx_frames; 3813 3814 fcoe->fcoe_rx_bytes_set = true; 3815 fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000; 3816 3817 fcoe->fcoe_tx_bytes_set = true; 3818 fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000; 3819 3820 fcoe->crc_count_set = true; 3821 fcoe->crc_count = hst->invalid_crc_count; 3822 3823 fcoe->tx_abts_set = true; 3824 fcoe->tx_abts = hst->fcp_packet_aborts; 3825 3826 fcoe->tx_lun_rst_set = true; 3827 fcoe->tx_lun_rst = qedf->lun_resets; 3828 3829 fcoe->abort_task_sets_set = true; 3830 fcoe->abort_task_sets = qedf->packet_aborts; 3831 3832 fcoe->scsi_busy_set = true; 3833 fcoe->scsi_busy = qedf->busy; 3834 3835 fcoe->scsi_tsk_full_set = true; 3836 fcoe->scsi_tsk_full = qedf->task_set_fulls; 3837 } 3838 3839 /* Deferred work function to perform soft context reset on STAG change */ 3840 void qedf_stag_change_work(struct work_struct *work) 3841 { 3842 struct qedf_ctx *qedf = 3843 container_of(work, struct qedf_ctx, stag_work.work); 3844 3845 if (!qedf) { 3846 QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); 3847 return; 3848 } 3849 QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n"); 3850 qedf_ctx_soft_reset(qedf->lport); 3851 } 3852 3853 static void qedf_shutdown(struct pci_dev *pdev) 3854 { 3855 __qedf_remove(pdev, QEDF_MODE_NORMAL); 3856 } 3857 3858 /* 3859 * Recovery handler code 3860 */ 3861 static void qedf_schedule_recovery_handler(void *dev) 3862 { 3863 struct qedf_ctx *qedf = dev; 3864 3865 QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n"); 3866 schedule_delayed_work(&qedf->recovery_work, 0); 3867 } 3868 3869 static void qedf_recovery_handler(struct work_struct *work) 3870 { 3871 struct qedf_ctx *qedf = 3872 container_of(work, struct qedf_ctx, recovery_work.work); 3873 3874 if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags)) 3875 return; 3876 3877 /* 3878 * Call common_ops->recovery_prolog to allow the MFW to quiesce 3879 * any PCI transactions. 3880 */ 3881 qed_ops->common->recovery_prolog(qedf->cdev); 3882 3883 QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n"); 3884 __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY); 3885 /* 3886 * Reset link and dcbx to down state since we will not get a link down 3887 * event from the MFW but calling __qedf_remove will essentially be a 3888 * link down event. 3889 */ 3890 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); 3891 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); 3892 __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY); 3893 clear_bit(QEDF_IN_RECOVERY, &qedf->flags); 3894 QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n"); 3895 } 3896 3897 /* Generic TLV data callback */ 3898 void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) 3899 { 3900 struct qedf_ctx *qedf; 3901 3902 if (!dev) { 3903 QEDF_INFO(NULL, QEDF_LOG_EVT, 3904 "dev is NULL so ignoring get_generic_tlv_data request.\n"); 3905 return; 3906 } 3907 qedf = (struct qedf_ctx *)dev; 3908 3909 memset(data, 0, sizeof(struct qed_generic_tlvs)); 3910 ether_addr_copy(data->mac[0], qedf->mac); 3911 } 3912 3913 /* 3914 * Module Init/Remove 3915 */ 3916 3917 static int __init qedf_init(void) 3918 { 3919 int ret; 3920 3921 /* If debug=1 passed, set the default log mask */ 3922 if (qedf_debug == QEDF_LOG_DEFAULT) 3923 qedf_debug = QEDF_DEFAULT_LOG_MASK; 3924 3925 /* 3926 * Check that default prio for FIP/FCoE traffic is between 0..7 if a 3927 * value has been set 3928 */ 3929 if (qedf_default_prio > -1) 3930 if (qedf_default_prio > 7) { 3931 qedf_default_prio = QEDF_DEFAULT_PRIO; 3932 QEDF_ERR(NULL, "FCoE/FIP priority out of range, resetting to %d.\n", 3933 QEDF_DEFAULT_PRIO); 3934 } 3935 3936 /* Print driver banner */ 3937 QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR, 3938 QEDF_VERSION); 3939 3940 /* Create kmem_cache for qedf_io_work structs */ 3941 qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache", 3942 sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL); 3943 if (qedf_io_work_cache == NULL) { 3944 QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n"); 3945 goto err1; 3946 } 3947 QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n", 3948 qedf_io_work_cache); 3949 3950 qed_ops = qed_get_fcoe_ops(); 3951 if (!qed_ops) { 3952 QEDF_ERR(NULL, "Failed to get qed fcoe operations\n"); 3953 goto err1; 3954 } 3955 3956 #ifdef CONFIG_DEBUG_FS 3957 qedf_dbg_init("qedf"); 3958 #endif 3959 3960 qedf_fc_transport_template = 3961 fc_attach_transport(&qedf_fc_transport_fn); 3962 if (!qedf_fc_transport_template) { 3963 QEDF_ERR(NULL, "Could not register with FC transport\n"); 3964 goto err2; 3965 } 3966 3967 qedf_fc_vport_transport_template = 3968 fc_attach_transport(&qedf_fc_vport_transport_fn); 3969 if (!qedf_fc_vport_transport_template) { 3970 QEDF_ERR(NULL, "Could not register vport template with FC " 3971 "transport\n"); 3972 goto err3; 3973 } 3974 3975 qedf_io_wq = create_workqueue("qedf_io_wq"); 3976 if (!qedf_io_wq) { 3977 QEDF_ERR(NULL, "Could not create qedf_io_wq.\n"); 3978 goto err4; 3979 } 3980 3981 qedf_cb_ops.get_login_failures = qedf_get_login_failures; 3982 3983 ret = pci_register_driver(&qedf_pci_driver); 3984 if (ret) { 3985 QEDF_ERR(NULL, "Failed to register driver\n"); 3986 goto err5; 3987 } 3988 3989 return 0; 3990 3991 err5: 3992 destroy_workqueue(qedf_io_wq); 3993 err4: 3994 fc_release_transport(qedf_fc_vport_transport_template); 3995 err3: 3996 fc_release_transport(qedf_fc_transport_template); 3997 err2: 3998 #ifdef CONFIG_DEBUG_FS 3999 qedf_dbg_exit(); 4000 #endif 4001 qed_put_fcoe_ops(); 4002 err1: 4003 return -EINVAL; 4004 } 4005 4006 static void __exit qedf_cleanup(void) 4007 { 4008 pci_unregister_driver(&qedf_pci_driver); 4009 4010 destroy_workqueue(qedf_io_wq); 4011 4012 fc_release_transport(qedf_fc_vport_transport_template); 4013 fc_release_transport(qedf_fc_transport_template); 4014 #ifdef CONFIG_DEBUG_FS 4015 qedf_dbg_exit(); 4016 #endif 4017 qed_put_fcoe_ops(); 4018 4019 kmem_cache_destroy(qedf_io_work_cache); 4020 } 4021 4022 MODULE_LICENSE("GPL"); 4023 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module"); 4024 MODULE_AUTHOR("QLogic Corporation"); 4025 MODULE_VERSION(QEDF_VERSION); 4026 module_init(qedf_init); 4027 module_exit(qedf_cleanup); 4028