Lines Matching +full:supports +full:- +full:cqe

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2016-2018 Cavium Inc.
59 static int qedf_default_prio = -1;
82 "supports. (default 0xffffffff)");
107 "during probe (0-3: 0 more verbose).");
126 vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT); in qedf_set_vlan_id()
127 qedf->vlan_id = vlan_id_tmp; in qedf_set_vlan_id()
128 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_set_vlan_id()
130 vlan_id_tmp, qedf->prio); in qedf_set_vlan_id()
137 while (qedf->fipvlan_retries--) { in qedf_initiate_fipvlan_req()
139 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { in qedf_initiate_fipvlan_req()
140 QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n"); in qedf_initiate_fipvlan_req()
144 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in qedf_initiate_fipvlan_req()
145 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); in qedf_initiate_fipvlan_req()
149 if (qedf->vlan_id > 0) { in qedf_initiate_fipvlan_req()
150 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_initiate_fipvlan_req()
152 qedf->vlan_id); in qedf_initiate_fipvlan_req()
153 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) in qedf_initiate_fipvlan_req()
154 fcoe_ctlr_link_up(&qedf->ctlr); in qedf_initiate_fipvlan_req()
158 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_initiate_fipvlan_req()
159 "Retry %d.\n", qedf->fipvlan_retries); in qedf_initiate_fipvlan_req()
160 init_completion(&qedf->fipvlan_compl); in qedf_initiate_fipvlan_req()
162 wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ); in qedf_initiate_fipvlan_req()
174 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n", in qedf_handle_link_update()
175 atomic_read(&qedf->link_state)); in qedf_handle_link_update()
177 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { in qedf_handle_link_update()
182 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { in qedf_handle_link_update()
183 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_handle_link_update()
185 qedf->vlan_id = 0; in qedf_handle_link_update()
194 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN " in qedf_handle_link_update()
203 eth_zero_addr(qedf->data_src_addr); in qedf_handle_link_update()
204 fcoe_ctlr_link_up(&qedf->ctlr); in qedf_handle_link_update()
205 } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { in qedf_handle_link_update()
211 atomic_set(&qedf->link_down_tmo_valid, 0); in qedf_handle_link_update()
212 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_handle_link_update()
214 fcoe_ctlr_link_down(&qedf->ctlr); in qedf_handle_link_update()
216 QEDF_ERR(&qedf->dbg_ctx, in qedf_handle_link_update()
219 qedf->fipvlan_retries = qedf_fipvlan_retries; in qedf_handle_link_update()
234 granted_mac = fr_cb(fp)->granted_mac; in qedf_set_data_src_addr()
240 * If granted_mac is non-zero, we used that. in qedf_set_data_src_addr()
242 * the sel_fcf->fc_map and the d_id fo the FLOGI frame. in qedf_set_data_src_addr()
243 * If sel_fcf->fc_map is 0 then we use the default FCF-MAC plus the in qedf_set_data_src_addr()
247 ether_addr_copy(qedf->data_src_addr, granted_mac); in qedf_set_data_src_addr()
249 } else if (qedf->ctlr.sel_fcf->fc_map != 0) { in qedf_set_data_src_addr()
250 hton24(fc_map, qedf->ctlr.sel_fcf->fc_map); in qedf_set_data_src_addr()
251 qedf->data_src_addr[0] = fc_map[0]; in qedf_set_data_src_addr()
252 qedf->data_src_addr[1] = fc_map[1]; in qedf_set_data_src_addr()
253 qedf->data_src_addr[2] = fc_map[2]; in qedf_set_data_src_addr()
254 qedf->data_src_addr[3] = fh->fh_d_id[0]; in qedf_set_data_src_addr()
255 qedf->data_src_addr[4] = fh->fh_d_id[1]; in qedf_set_data_src_addr()
256 qedf->data_src_addr[5] = fh->fh_d_id[2]; in qedf_set_data_src_addr()
259 fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id); in qedf_set_data_src_addr()
263 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_set_data_src_addr()
264 "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method); in qedf_set_data_src_addr()
271 struct fc_lport *lport = exch->lp; in qedf_flogi_resp()
284 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, in qedf_flogi_resp()
291 qedf->flogi_failed++; in qedf_flogi_resp()
295 qedf->flogi_pending = 0; in qedf_flogi_resp()
299 complete(&qedf->flogi_compl); in qedf_flogi_resp()
320 qedf->flogi_cnt++; in qedf_elsct_send()
321 qedf->flogi_pending++; in qedf_elsct_send()
323 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in qedf_elsct_send()
324 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n"); in qedf_elsct_send()
325 qedf->flogi_pending = 0; in qedf_elsct_send()
328 if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) { in qedf_elsct_send()
329 schedule_delayed_work(&qedf->stag_work, 2); in qedf_elsct_send()
345 lport = qedf->lport; in qedf_send_flogi()
347 if (!lport->tt.elsct_send) { in qedf_send_flogi()
348 QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n"); in qedf_send_flogi()
349 return -EINVAL; in qedf_send_flogi()
354 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n"); in qedf_send_flogi()
355 return -ENOMEM; in qedf_send_flogi()
358 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, in qedf_send_flogi()
360 lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, in qedf_send_flogi()
361 ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov); in qedf_send_flogi()
363 init_completion(&qedf->flogi_compl); in qedf_send_flogi()
377 struct fc_lport *lport = qedf->lport; in qedf_link_recovery()
386 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_recovery()
393 qedf->ctlr.state = FIP_ST_LINK_WAIT; in qedf_link_recovery()
394 fcoe_ctlr_link_down(&qedf->ctlr); in qedf_link_recovery()
400 fcoe_ctlr_link_up(&qedf->ctlr); in qedf_link_recovery()
403 qedf->fipvlan_retries = qedf_fipvlan_retries; in qedf_link_recovery()
414 if (qedf->ctlr.sel_fcf) { in qedf_link_recovery()
415 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_recovery()
420 retries--; in qedf_link_recovery()
424 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for " in qedf_link_recovery()
434 i = wait_for_completion_timeout(&qedf->flogi_compl, in qedf_link_recovery()
435 qedf->lport->r_a_tov); in qedf_link_recovery()
437 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n"); in qedf_link_recovery()
442 * Call lport->tt.rport_login which will cause libfc to send an in qedf_link_recovery()
445 mutex_lock(&lport->disc.disc_mutex); in qedf_link_recovery()
446 list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { in qedf_link_recovery()
447 if (kref_get_unless_zero(&rdata->kref)) { in qedf_link_recovery()
449 kref_put(&rdata->kref, fc_rport_destroy); in qedf_link_recovery()
452 mutex_unlock(&lport->disc.disc_mutex); in qedf_link_recovery()
459 struct fc_lport *lport = qedf->lport; in qedf_update_link_speed()
461 lport->link_speed = FC_PORTSPEED_UNKNOWN; in qedf_update_link_speed()
462 lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; in qedf_update_link_speed()
465 switch (link->speed) { in qedf_update_link_speed()
467 lport->link_speed = FC_PORTSPEED_10GBIT; in qedf_update_link_speed()
470 lport->link_speed = FC_PORTSPEED_25GBIT; in qedf_update_link_speed()
473 lport->link_speed = FC_PORTSPEED_40GBIT; in qedf_update_link_speed()
476 lport->link_speed = FC_PORTSPEED_50GBIT; in qedf_update_link_speed()
479 lport->link_speed = FC_PORTSPEED_100GBIT; in qedf_update_link_speed()
482 lport->link_speed = FC_PORTSPEED_20GBIT; in qedf_update_link_speed()
485 lport->link_speed = FC_PORTSPEED_UNKNOWN; in qedf_update_link_speed()
504 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
505 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; in qedf_update_link_speed()
512 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
513 lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; in qedf_update_link_speed()
521 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
522 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; in qedf_update_link_speed()
529 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
530 lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; in qedf_update_link_speed()
538 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
539 lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; in qedf_update_link_speed()
544 if (linkmode_intersects(link->supported_caps, sup_caps)) in qedf_update_link_speed()
545 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; in qedf_update_link_speed()
547 if (lport->host && lport->host->shost_data) in qedf_update_link_speed()
548 fc_host_supported_speeds(lport->host) = in qedf_update_link_speed()
549 lport->link_supported_speeds; in qedf_update_link_speed()
558 qed_ops->common->get_link(qedf->cdev, &link); in qedf_bw_update()
560 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in qedf_bw_update()
561 QEDF_ERR(&qedf->dbg_ctx, in qedf_bw_update()
567 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) in qedf_bw_update()
570 QEDF_ERR(&qedf->dbg_ctx, in qedf_bw_update()
574 QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n"); in qedf_bw_update()
586 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in qedf_link_update()
587 QEDF_ERR(&qedf->dbg_ctx, in qedf_link_update()
592 if (link->link_up) { in qedf_link_update()
593 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { in qedf_link_update()
594 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_update()
598 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n", in qedf_link_update()
599 link->speed / 1000); in qedf_link_update()
602 cancel_delayed_work(&qedf->link_update); in qedf_link_update()
604 atomic_set(&qedf->link_state, QEDF_LINK_UP); in qedf_link_update()
607 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE || in qedf_link_update()
609 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_update()
611 if (atomic_read(&qedf->link_down_tmo_valid) > 0) in qedf_link_update()
612 queue_delayed_work(qedf->link_update_wq, in qedf_link_update()
613 &qedf->link_recovery, 0); in qedf_link_update()
615 queue_delayed_work(qedf->link_update_wq, in qedf_link_update()
616 &qedf->link_update, 0); in qedf_link_update()
617 atomic_set(&qedf->link_down_tmo_valid, 0); in qedf_link_update()
621 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n"); in qedf_link_update()
623 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); in qedf_link_update()
624 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); in qedf_link_update()
630 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_link_update()
632 atomic_set(&qedf->link_down_tmo_valid, 1); in qedf_link_update()
634 qedf->vlan_id = 0; in qedf_link_update()
636 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, in qedf_link_update()
647 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe " in qedf_dcbx_handler()
648 "prio=%d.\n", get->operational.valid, get->operational.enabled, in qedf_dcbx_handler()
649 get->operational.app_prio.fcoe); in qedf_dcbx_handler()
651 if (get->operational.enabled && get->operational.valid) { in qedf_dcbx_handler()
653 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) { in qedf_dcbx_handler()
654 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_dcbx_handler()
659 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE); in qedf_dcbx_handler()
668 tmp_prio = get->operational.app_prio.fcoe; in qedf_dcbx_handler()
669 if (qedf_default_prio > -1) in qedf_dcbx_handler()
670 qedf->prio = qedf_default_prio; in qedf_dcbx_handler()
672 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_dcbx_handler()
675 qedf->prio = QEDF_DEFAULT_PRIO; in qedf_dcbx_handler()
677 qedf->prio = tmp_prio; in qedf_dcbx_handler()
679 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP && in qedf_dcbx_handler()
681 if (atomic_read(&qedf->link_down_tmo_valid) > 0) in qedf_dcbx_handler()
682 queue_delayed_work(qedf->link_update_wq, in qedf_dcbx_handler()
683 &qedf->link_recovery, 0); in qedf_dcbx_handler()
685 queue_delayed_work(qedf->link_update_wq, in qedf_dcbx_handler()
686 &qedf->link_update, 0); in qedf_dcbx_handler()
687 atomic_set(&qedf->link_down_tmo_valid, 0); in qedf_dcbx_handler()
698 return qedf->flogi_failed; in qedf_get_login_failures()
725 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); in qedf_eh_abort()
729 struct fc_rport_libfc_priv *rp = rport->dd_data; in qedf_eh_abort()
738 lport = shost_priv(sc_cmd->device->host); in qedf_eh_abort()
741 /* rport and tgt are allocated together, so tgt should be non-NULL */ in qedf_eh_abort()
743 rdata = fcport->rdata; in qedf_eh_abort()
744 if (!rdata || !kref_get_unless_zero(&rdata->kref)) { in qedf_eh_abort()
745 QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd); in qedf_eh_abort()
751 io_req = qedf_priv(sc_cmd)->io_req; in qedf_eh_abort()
753 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
755 sc_cmd, sc_cmd->cmnd[0], in qedf_eh_abort()
756 rdata->ids.port_id); in qedf_eh_abort()
761 rval = kref_get_unless_zero(&io_req->refcount); /* ID: 005 */ in qedf_eh_abort()
766 if (!rval || io_req->sc_cmd != sc_cmd) { in qedf_eh_abort()
767 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
768 "Freed/Incorrect io_req, io_req->sc_cmd=%p, sc_cmd=%p, port_id=%06x, bailing out.\n", in qedf_eh_abort()
769 io_req->sc_cmd, sc_cmd, rdata->ids.port_id); in qedf_eh_abort()
775 refcount = kref_read(&io_req->refcount); in qedf_eh_abort()
776 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
778 io_req, io_req->xid, sc_cmd, sc_cmd->cmnd[0], in qedf_eh_abort()
779 refcount, rdata->ids.port_id); in qedf_eh_abort()
788 if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) { in qedf_eh_abort()
789 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
791 io_req->xid, rdata->ids.port_id); in qedf_eh_abort()
792 while (io_req->sc_cmd && (wait_count != 0)) { in qedf_eh_abort()
794 wait_count--; in qedf_eh_abort()
797 QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n"); in qedf_eh_abort()
800 QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n"); in qedf_eh_abort()
806 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { in qedf_eh_abort()
807 QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n"); in qedf_eh_abort()
811 QEDF_ERR(&qedf->dbg_ctx, in qedf_eh_abort()
813 io_req, sc_cmd, io_req->xid, io_req->fp_idx, in qedf_eh_abort()
814 rdata->ids.port_id); in qedf_eh_abort()
816 if (qedf->stop_io_on_error) { in qedf_eh_abort()
822 init_completion(&io_req->abts_done); in qedf_eh_abort()
825 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n"); in qedf_eh_abort()
835 wait_for_completion(&io_req->abts_done); in qedf_eh_abort()
837 if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS || in qedf_eh_abort()
838 io_req->event == QEDF_IOREQ_EV_ABORT_FAILED || in qedf_eh_abort()
839 io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) { in qedf_eh_abort()
852 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n", in qedf_eh_abort()
853 io_req->xid); in qedf_eh_abort()
855 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n", in qedf_eh_abort()
856 io_req->xid); in qedf_eh_abort()
859 kref_put(&rdata->kref, fc_rport_destroy); in qedf_eh_abort()
862 kref_put(&io_req->refcount, qedf_release_cmd); in qedf_eh_abort()
869 sc_cmd->device->host->host_no, sc_cmd->device->id, in qedf_eh_target_reset()
870 sc_cmd->device->lun); in qedf_eh_target_reset()
877 sc_cmd->device->host->host_no, sc_cmd->device->id, in qedf_eh_device_reset()
878 sc_cmd->device->lun); in qedf_eh_device_reset()
887 while (wait_cnt--) { in qedf_wait_for_upload()
888 if (atomic_read(&qedf->num_offloads)) in qedf_wait_for_upload()
889 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_wait_for_upload()
891 atomic_read(&qedf->num_offloads)); in qedf_wait_for_upload()
898 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { in qedf_wait_for_upload()
900 &fcport->flags)) { in qedf_wait_for_upload()
901 if (fcport->rdata) in qedf_wait_for_upload()
902 QEDF_ERR(&qedf->dbg_ctx, in qedf_wait_for_upload()
904 fcport, fcport->rdata->ids.port_id); in qedf_wait_for_upload()
906 QEDF_ERR(&qedf->dbg_ctx, in qedf_wait_for_upload()
923 if (lport->vport) { in qedf_ctx_soft_reset()
924 clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); in qedf_ctx_soft_reset()
929 qedf->flogi_pending = 0; in qedf_ctx_soft_reset()
931 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); in qedf_ctx_soft_reset()
932 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_ctx_soft_reset()
934 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, in qedf_ctx_soft_reset()
938 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); in qedf_ctx_soft_reset()
939 WARN_ON(atomic_read(&qedf->num_offloads)); in qedf_ctx_soft_reset()
943 qed_ops->common->get_link(qedf->cdev, &if_link); in qedf_ctx_soft_reset()
946 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_ctx_soft_reset()
948 clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); in qedf_ctx_soft_reset()
952 flush_delayed_work(&qedf->link_update); in qedf_ctx_soft_reset()
955 atomic_set(&qedf->link_state, QEDF_LINK_UP); in qedf_ctx_soft_reset()
956 qedf->vlan_id = 0; in qedf_ctx_soft_reset()
957 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_ctx_soft_reset()
959 queue_delayed_work(qedf->link_update_wq, &qedf->link_update, in qedf_ctx_soft_reset()
961 clear_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); in qedf_ctx_soft_reset()
970 lport = shost_priv(sc_cmd->device->host); in qedf_eh_host_reset()
973 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN || in qedf_eh_host_reset()
974 test_bit(QEDF_UNLOADING, &qedf->flags)) in qedf_eh_host_reset()
977 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued..."); in qedf_eh_host_reset()
996 .this_id = -1,
1030 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { in qedf_fcport_lookup()
1031 rdata = fcport->rdata; in qedf_fcport_lookup()
1034 if (rdata->ids.port_id == port_id) { in qedf_fcport_lookup()
1052 if ((fh->fh_type == FC_TYPE_ELS) && in qedf_xmit_l2_frame()
1053 (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { in qedf_xmit_l2_frame()
1066 * qedf_xmit - qedf FCoE frame transmit function
1093 if (lport->vport) in qedf_xmit()
1094 base_lport = shost_priv(vport_to_shost(lport->vport)); in qedf_xmit()
1099 if (base_lport->port_id == ntoh24(fh->fh_d_id)) { in qedf_xmit()
1105 list_for_each_entry(tmp_lport, &base_lport->vports, list) { in qedf_xmit()
1106 if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) { in qedf_xmit()
1112 if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) { in qedf_xmit()
1115 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, in qedf_xmit()
1116 "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id)); in qedf_xmit()
1118 rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id)); in qedf_xmit()
1120 rdata->retries = lport->max_rport_retry_count; in qedf_xmit()
1121 kref_put(&rdata->kref, fc_rport_destroy); in qedf_xmit()
1123 return -EINVAL; in qedf_xmit()
1127 if (!qedf->ctlr.sel_fcf) { in qedf_xmit()
1132 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) { in qedf_xmit()
1133 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n"); in qedf_xmit()
1138 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { in qedf_xmit()
1139 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n"); in qedf_xmit()
1144 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { in qedf_xmit()
1145 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb)) in qedf_xmit()
1150 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); in qedf_xmit()
1152 if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_xmit()
1168 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; in qedf_xmit()
1170 skb->ip_summed = CHECKSUM_NONE; in qedf_xmit()
1179 return -ENOMEM; in qedf_xmit()
1181 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; in qedf_xmit()
1188 cp->fcoe_eof = eof; in qedf_xmit()
1189 cp->fcoe_crc32 = cpu_to_le32(~crc); in qedf_xmit()
1200 skb->mac_len = elen; in qedf_xmit()
1201 skb->protocol = htons(ETH_P_FCOE); in qedf_xmit()
1204 * Add VLAN tag to non-offload FCoE frame based on current stored VLAN in qedf_xmit()
1207 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id); in qedf_xmit()
1211 eh->h_proto = htons(ETH_P_FCOE); in qedf_xmit()
1212 if (qedf->ctlr.map_dest) in qedf_xmit()
1213 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); in qedf_xmit()
1216 ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr); in qedf_xmit()
1219 ether_addr_copy(eh->h_source, qedf->data_src_addr); in qedf_xmit()
1225 hp->fcoe_sof = sof; in qedf_xmit()
1228 this_cpu_inc(lport->stats->TxFrames); in qedf_xmit()
1229 this_cpu_add(lport->stats->TxWords, wlen); in qedf_xmit()
1236 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: " in qedf_xmit()
1238 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type, in qedf_xmit()
1242 1, skb->data, skb->len, false); in qedf_xmit()
1243 rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); in qedf_xmit()
1245 QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc); in qedf_xmit()
1261 fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe); in qedf_alloc_sq()
1262 fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE); in qedf_alloc_sq()
1263 fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) * in qedf_alloc_sq()
1265 fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE; in qedf_alloc_sq()
1267 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, in qedf_alloc_sq()
1268 &fcport->sq_dma, GFP_KERNEL); in qedf_alloc_sq()
1269 if (!fcport->sq) { in qedf_alloc_sq()
1270 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n"); in qedf_alloc_sq()
1275 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_sq()
1276 fcport->sq_pbl_size, in qedf_alloc_sq()
1277 &fcport->sq_pbl_dma, GFP_KERNEL); in qedf_alloc_sq()
1278 if (!fcport->sq_pbl) { in qedf_alloc_sq()
1279 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n"); in qedf_alloc_sq()
1285 num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE; in qedf_alloc_sq()
1286 page = fcport->sq_dma; in qedf_alloc_sq()
1287 pbl = (u32 *)fcport->sq_pbl; in qedf_alloc_sq()
1289 while (num_pages--) { in qedf_alloc_sq()
1300 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, in qedf_alloc_sq()
1301 fcport->sq_dma); in qedf_alloc_sq()
1308 if (fcport->sq_pbl) in qedf_free_sq()
1309 dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size, in qedf_free_sq()
1310 fcport->sq_pbl, fcport->sq_pbl_dma); in qedf_free_sq()
1311 if (fcport->sq) in qedf_free_sq()
1312 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, in qedf_free_sq()
1313 fcport->sq, fcport->sq_dma); in qedf_free_sq()
1322 uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe)); in qedf_offload_connection()
1324 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection " in qedf_offload_connection()
1325 "portid=%06x.\n", fcport->rdata->ids.port_id); in qedf_offload_connection()
1326 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle, in qedf_offload_connection()
1327 &fcport->fw_cid, &fcport->p_doorbell); in qedf_offload_connection()
1329 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection " in qedf_offload_connection()
1330 "for portid=%06x.\n", fcport->rdata->ids.port_id); in qedf_offload_connection()
1335 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x " in qedf_offload_connection()
1336 "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id, in qedf_offload_connection()
1337 fcport->fw_cid, fcport->handle); in qedf_offload_connection()
1342 conn_info.sq_pbl_addr = fcport->sq_pbl_dma; in qedf_offload_connection()
1344 conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl); in qedf_offload_connection()
1346 (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8)); in qedf_offload_connection()
1349 ether_addr_copy(conn_info.src_mac, qedf->data_src_addr); in qedf_offload_connection()
1351 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr); in qedf_offload_connection()
1353 conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size; in qedf_offload_connection()
1354 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov; in qedf_offload_connection()
1356 conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size; in qedf_offload_connection()
1359 conn_info.vlan_tag = qedf->vlan_id << in qedf_offload_connection()
1362 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT; in qedf_offload_connection()
1367 port_id = fc_host_port_id(qedf->lport->host); in qedf_offload_connection()
1368 fcport->sid = port_id; in qedf_offload_connection()
1373 conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq; in qedf_offload_connection()
1376 port_id = fcport->rdata->rport->port_id; in qedf_offload_connection()
1383 /* Set FC-TAPE specific flags if needed */ in qedf_offload_connection()
1384 if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) { in qedf_offload_connection()
1385 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, in qedf_offload_connection()
1387 fcport->rdata->ids.port_id); in qedf_offload_connection()
1391 ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << in qedf_offload_connection()
1395 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info); in qedf_offload_connection()
1397 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection " in qedf_offload_connection()
1398 "for portid=%06x.\n", fcport->rdata->ids.port_id); in qedf_offload_connection()
1401 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload " in qedf_offload_connection()
1403 fcport->rdata->ids.port_id, total_sqe); in qedf_offload_connection()
1405 spin_lock_init(&fcport->rport_lock); in qedf_offload_connection()
1406 atomic_set(&fcport->free_sqes, total_sqe); in qedf_offload_connection()
1409 qed_ops->release_conn(qedf->cdev, fcport->handle); in qedf_offload_connection()
1425 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, in qedf_upload_connection()
1430 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection " in qedf_upload_connection()
1431 "port_id=%06x.\n", fcport->rdata->ids.port_id); in qedf_upload_connection()
1433 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma); in qedf_upload_connection()
1434 qed_ops->release_conn(qedf->cdev, fcport->handle); in qedf_upload_connection()
1436 dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params, in qedf_upload_connection()
1443 struct fc_rport_priv *rdata = fcport->rdata; in qedf_cleanup_fcport()
1445 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n", in qedf_cleanup_fcport()
1446 fcport->rdata->ids.port_id); in qedf_cleanup_fcport()
1449 qedf_flush_active_ios(fcport, -1); in qedf_cleanup_fcport()
1451 if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) in qedf_cleanup_fcport()
1454 fcport->rdata = NULL; in qedf_cleanup_fcport()
1455 fcport->qedf = NULL; in qedf_cleanup_fcport()
1456 kref_put(&rdata->kref, fc_rport_destroy); in qedf_cleanup_fcport()
1469 struct fc_rport *rport = rdata->rport; in qedf_rport_event_handler()
1476 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, " in qedf_rport_event_handler()
1477 "port_id = 0x%x\n", event, rdata->ids.port_id); in qedf_rport_event_handler()
1482 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n"); in qedf_rport_event_handler()
1486 rp = rport->dd_data; in qedf_rport_event_handler()
1488 fcport->qedf = qedf; in qedf_rport_event_handler()
1490 if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) { in qedf_rport_event_handler()
1491 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading " in qedf_rport_event_handler()
1493 "reached.\n", rdata->ids.port_id); in qedf_rport_event_handler()
1501 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_rport_event_handler()
1502 QEDF_WARN(&(qedf->dbg_ctx), "Session already " in qedf_rport_event_handler()
1504 rdata->ids.port_id); in qedf_rport_event_handler()
1508 if (rport->port_id == FC_FID_DIR_SERV) { in qedf_rport_event_handler()
1515 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not " in qedf_rport_event_handler()
1517 rdata->ids.port_id); in qedf_rport_event_handler()
1521 if (rdata->spp_type != FC_TYPE_FCP) { in qedf_rport_event_handler()
1522 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1526 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { in qedf_rport_event_handler()
1527 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1533 kref_get(&rdata->kref); in qedf_rport_event_handler()
1534 fcport->rdata = rdata; in qedf_rport_event_handler()
1535 fcport->rport = rport; in qedf_rport_event_handler()
1544 if (rdata->flags & FC_RP_FLAGS_RETRY && in qedf_rport_event_handler()
1545 rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && in qedf_rport_event_handler()
1546 !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { in qedf_rport_event_handler()
1547 fcport->dev_type = QEDF_RPORT_TYPE_TAPE; in qedf_rport_event_handler()
1548 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1550 rdata->ids.port_id); in qedf_rport_event_handler()
1552 fcport->dev_type = QEDF_RPORT_TYPE_DISK; in qedf_rport_event_handler()
1562 spin_lock_irqsave(&qedf->hba_lock, flags); in qedf_rport_event_handler()
1563 list_add_rcu(&fcport->peers, &qedf->fcports); in qedf_rport_event_handler()
1564 spin_unlock_irqrestore(&qedf->hba_lock, flags); in qedf_rport_event_handler()
1570 set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags); in qedf_rport_event_handler()
1571 atomic_inc(&qedf->num_offloads); in qedf_rport_event_handler()
1577 port_id = rdata->ids.port_id; in qedf_rport_event_handler()
1581 if (rdata->spp_type != FC_TYPE_FCP) { in qedf_rport_event_handler()
1582 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1586 if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { in qedf_rport_event_handler()
1587 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1593 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_rport_event_handler()
1594 "port_id=%x - rport notcreated Yet!!\n", port_id); in qedf_rport_event_handler()
1597 rp = rport->dd_data; in qedf_rport_event_handler()
1599 * Perform session upload. Note that rdata->peers is already in qedf_rport_event_handler()
1600 * removed from disc->rports list before we get this event. in qedf_rport_event_handler()
1604 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_rport_event_handler()
1606 if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) && in qedf_rport_event_handler()
1608 &fcport->flags)) { in qedf_rport_event_handler()
1610 &fcport->flags); in qedf_rport_event_handler()
1611 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_rport_event_handler()
1617 spin_lock_irqsave(&qedf->hba_lock, flags); in qedf_rport_event_handler()
1618 list_del_rcu(&fcport->peers); in qedf_rport_event_handler()
1619 spin_unlock_irqrestore(&qedf->hba_lock, flags); in qedf_rport_event_handler()
1622 &fcport->flags); in qedf_rport_event_handler()
1623 atomic_dec(&qedf->num_offloads); in qedf_rport_event_handler()
1625 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_rport_event_handler()
1636 /* NO-OP but need to fill in the template */ in qedf_abort_io()
1642 * NO-OP but need to fill in template to prevent a NULL in qedf_fcp_cleanup()
1658 fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO); in qedf_fcoe_ctlr_setup()
1660 qedf->ctlr.send = qedf_fip_send; in qedf_fcoe_ctlr_setup()
1661 qedf->ctlr.get_src_addr = qedf_get_src_mac; in qedf_fcoe_ctlr_setup()
1662 ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac); in qedf_fcoe_ctlr_setup()
1667 struct fc_lport *lport = qedf->lport; in qedf_setup_fdmi()
1676 lport->fdmi_enabled = 1; in qedf_setup_fdmi()
1683 /* Get the PCI-e Device Serial Number Capability */ in qedf_setup_fdmi()
1684 pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN); in qedf_setup_fdmi()
1688 pci_read_config_byte(qedf->pdev, pos + i, &buf[i]); in qedf_setup_fdmi()
1690 snprintf(fc_host_serial_number(lport->host), in qedf_setup_fdmi()
1696 snprintf(fc_host_serial_number(lport->host), in qedf_setup_fdmi()
1699 snprintf(fc_host_manufacturer(lport->host), in qedf_setup_fdmi()
1702 if (qedf->pdev->device == QL45xxx) { in qedf_setup_fdmi()
1703 snprintf(fc_host_model(lport->host), in qedf_setup_fdmi()
1706 snprintf(fc_host_model_description(lport->host), in qedf_setup_fdmi()
1711 if (qedf->pdev->device == QL41xxx) { in qedf_setup_fdmi()
1712 snprintf(fc_host_model(lport->host), in qedf_setup_fdmi()
1715 snprintf(fc_host_model_description(lport->host), in qedf_setup_fdmi()
1720 snprintf(fc_host_hardware_version(lport->host), in qedf_setup_fdmi()
1721 FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision); in qedf_setup_fdmi()
1723 snprintf(fc_host_driver_version(lport->host), in qedf_setup_fdmi()
1726 snprintf(fc_host_firmware_version(lport->host), in qedf_setup_fdmi()
1731 snprintf(fc_host_vendor_identifier(lport->host), in qedf_setup_fdmi()
1738 struct fc_lport *lport = qedf->lport; in qedf_lport_setup()
1740 lport->link_up = 0; in qedf_lport_setup()
1741 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; in qedf_lport_setup()
1742 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; in qedf_lport_setup()
1743 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | in qedf_lport_setup()
1745 lport->boot_time = jiffies; in qedf_lport_setup()
1746 lport->e_d_tov = 2 * 1000; in qedf_lport_setup()
1747 lport->r_a_tov = 10 * 1000; in qedf_lport_setup()
1750 lport->does_npiv = 1; in qedf_lport_setup()
1751 fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV; in qedf_lport_setup()
1753 fc_set_wwnn(lport, qedf->wwnn); in qedf_lport_setup()
1754 fc_set_wwpn(lport, qedf->wwpn); in qedf_lport_setup()
1756 if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) { in qedf_lport_setup()
1757 QEDF_ERR(&qedf->dbg_ctx, in qedf_lport_setup()
1759 return -ENOMEM; in qedf_lport_setup()
1767 return -ENOMEM; in qedf_lport_setup()
1774 fc_host_maxframe_size(lport->host) = lport->mfs; in qedf_lport_setup()
1777 fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo; in qedf_lport_setup()
1780 if (qedf->pdev->device == QL45xxx) in qedf_lport_setup()
1781 snprintf(fc_host_symbolic_name(lport->host), 256, in qedf_lport_setup()
1784 if (qedf->pdev->device == QL41xxx) in qedf_lport_setup()
1785 snprintf(fc_host_symbolic_name(lport->host), 256, in qedf_lport_setup()
1800 lport->link_up = 0; in qedf_vport_libfc_config()
1801 lport->qfull = 0; in qedf_vport_libfc_config()
1802 lport->max_retry_count = QEDF_FLOGI_RETRY_CNT; in qedf_vport_libfc_config()
1803 lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT; in qedf_vport_libfc_config()
1804 lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | in qedf_vport_libfc_config()
1806 lport->boot_time = jiffies; in qedf_vport_libfc_config()
1807 lport->e_d_tov = 2 * 1000; in qedf_vport_libfc_config()
1808 lport->r_a_tov = 10 * 1000; in qedf_vport_libfc_config()
1809 lport->does_npiv = 1; /* Temporary until we add NPIV support */ in qedf_vport_libfc_config()
1813 return -ENOMEM; in qedf_vport_libfc_config()
1819 lport->crc_offload = 0; in qedf_vport_libfc_config()
1820 lport->seq_offload = 0; in qedf_vport_libfc_config()
1821 lport->lro_enabled = 0; in qedf_vport_libfc_config()
1822 lport->lro_xid = 0; in qedf_vport_libfc_config()
1823 lport->lso_max = 0; in qedf_vport_libfc_config()
1841 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); in qedf_vport_create()
1842 QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, " in qedf_vport_create()
1847 if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) { in qedf_vport_create()
1848 QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport " in qedf_vport_create()
1850 return -EIO; in qedf_vport_create()
1855 QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport " in qedf_vport_create()
1857 return -ENOMEM; in qedf_vport_create()
1860 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); in qedf_vport_create()
1861 QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n", in qedf_vport_create()
1869 vport_qedf->lport = vn_port; in qedf_vport_create()
1871 vport_qedf->hba_lock = base_qedf->hba_lock; in qedf_vport_create()
1872 vport_qedf->pdev = base_qedf->pdev; in qedf_vport_create()
1873 vport_qedf->cmd_mgr = base_qedf->cmd_mgr; in qedf_vport_create()
1874 init_completion(&vport_qedf->flogi_compl); in qedf_vport_create()
1875 INIT_LIST_HEAD(&vport_qedf->fcports); in qedf_vport_create()
1876 INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work); in qedf_vport_create()
1880 QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory " in qedf_vport_create()
1885 fc_set_wwnn(vn_port, vport->node_name); in qedf_vport_create()
1886 fc_set_wwpn(vn_port, vport->port_name); in qedf_vport_create()
1887 vport_qedf->wwnn = vn_port->wwnn; in qedf_vport_create()
1888 vport_qedf->wwpn = vn_port->wwpn; in qedf_vport_create()
1890 vn_port->host->transportt = qedf_fc_vport_transport_template; in qedf_vport_create()
1891 vn_port->host->can_queue = FCOE_PARAMS_NUM_TASKS; in qedf_vport_create()
1892 vn_port->host->max_lun = qedf_max_lun; in qedf_vport_create()
1893 vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD; in qedf_vport_create()
1894 vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN; in qedf_vport_create()
1895 vn_port->host->max_id = QEDF_MAX_SESSIONS; in qedf_vport_create()
1897 rc = scsi_add_host(vn_port->host, &vport->dev); in qedf_vport_create()
1899 QEDF_WARN(&base_qedf->dbg_ctx, in qedf_vport_create()
1905 fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo; in qedf_vport_create()
1908 memcpy(&vn_port->tt, &qedf_lport_template, in qedf_vport_create()
1925 fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN; in qedf_vport_create()
1930 vn_port->boot_time = jiffies; in qedf_vport_create()
1936 if (base_qedf->pdev->device == QL45xxx) in qedf_vport_create()
1937 snprintf(fc_host_symbolic_name(vn_port->host), 256, in qedf_vport_create()
1940 if (base_qedf->pdev->device == QL41xxx) in qedf_vport_create()
1941 snprintf(fc_host_symbolic_name(vn_port->host), 256, in qedf_vport_create()
1945 fc_host_supported_speeds(vn_port->host) = n_port->link_supported_speeds; in qedf_vport_create()
1948 vn_port->link_speed = n_port->link_speed; in qedf_vport_create()
1951 fc_host_port_type(vn_port->host) = FC_PORTTYPE_NPIV; in qedf_vport_create()
1954 fc_host_maxframe_size(vn_port->host) = n_port->mfs; in qedf_vport_create()
1956 QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n", in qedf_vport_create()
1960 vport_qedf->dbg_ctx.host_no = vn_port->host->host_no; in qedf_vport_create()
1961 vport_qedf->dbg_ctx.pdev = base_qedf->pdev; in qedf_vport_create()
1966 scsi_host_put(vn_port->host); in qedf_vport_create()
1974 struct fc_lport *vn_port = vport->dd_data; in qedf_vport_destroy()
1983 set_bit(QEDF_UNLOADING, &qedf->flags); in qedf_vport_destroy()
1985 mutex_lock(&n_port->lp_mutex); in qedf_vport_destroy()
1986 list_del(&vn_port->list); in qedf_vport_destroy()
1987 mutex_unlock(&n_port->lp_mutex); in qedf_vport_destroy()
1992 /* Detach from scsi-ml */ in qedf_vport_destroy()
1993 fc_remove_host(vn_port->host); in qedf_vport_destroy()
1994 scsi_remove_host(vn_port->host); in qedf_vport_destroy()
2000 if (vn_port->state == LPORT_ST_READY) in qedf_vport_destroy()
2007 scsi_host_put(vn_port->host); in qedf_vport_destroy()
2015 struct fc_lport *lport = vport->dd_data; in qedf_vport_disable()
2021 lport->boot_time = jiffies; in qedf_vport_disable()
2036 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host); in qedf_wait_for_vport_destroy()
2038 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, in qedf_wait_for_vport_destroy()
2040 while (fc_host->npiv_vports_inuse > 0) { in qedf_wait_for_vport_destroy()
2041 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV, in qedf_wait_for_vport_destroy()
2048 * qedf_fcoe_reset - Resets the fcoe
2066 fc_host_port_id(shost) = lport->port_id; in qedf_get_host_port_id()
2080 if (lport->vport) in qedf_fc_get_host_stats()
2085 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for " in qedf_fc_get_host_stats()
2090 mutex_lock(&qedf->stats_mutex); in qedf_fc_get_host_stats()
2093 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats); in qedf_fc_get_host_stats()
2101 qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt + in qedf_fc_get_host_stats()
2102 fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt + in qedf_fc_get_host_stats()
2103 fw_fcoe_stats->fcoe_tx_other_pkt_cnt; in qedf_fc_get_host_stats()
2104 qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt + in qedf_fc_get_host_stats()
2105 fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt + in qedf_fc_get_host_stats()
2106 fw_fcoe_stats->fcoe_rx_other_pkt_cnt; in qedf_fc_get_host_stats()
2107 qedf_stats->fcp_input_megabytes += in qedf_fc_get_host_stats()
2108 do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000); in qedf_fc_get_host_stats()
2109 qedf_stats->fcp_output_megabytes += in qedf_fc_get_host_stats()
2110 do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000); in qedf_fc_get_host_stats()
2111 qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4; in qedf_fc_get_host_stats()
2112 qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4; in qedf_fc_get_host_stats()
2113 qedf_stats->invalid_crc_count += in qedf_fc_get_host_stats()
2114 fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt; in qedf_fc_get_host_stats()
2115 qedf_stats->dumped_frames = in qedf_fc_get_host_stats()
2116 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; in qedf_fc_get_host_stats()
2117 qedf_stats->error_frames += in qedf_fc_get_host_stats()
2118 fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt; in qedf_fc_get_host_stats()
2119 qedf_stats->fcp_input_requests += qedf->input_requests; in qedf_fc_get_host_stats()
2120 qedf_stats->fcp_output_requests += qedf->output_requests; in qedf_fc_get_host_stats()
2121 qedf_stats->fcp_control_requests += qedf->control_requests; in qedf_fc_get_host_stats()
2122 qedf_stats->fcp_packet_aborts += qedf->packet_aborts; in qedf_fc_get_host_stats()
2123 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures; in qedf_fc_get_host_stats()
2125 mutex_unlock(&qedf->stats_mutex); in qedf_fc_get_host_stats()
2203 struct qedf_ctx *qedf = fp->qedf; in qedf_fp_has_work()
2205 struct qed_sb_info *sb_info = fp->sb_info; in qedf_fp_has_work()
2206 struct status_block *sb = sb_info->sb_virt; in qedf_fp_has_work()
2210 que = qedf->global_queues[fp->sb_id]; in qedf_fp_has_work()
2216 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; in qedf_fp_has_work()
2218 return (que->cq_prod_idx != prod_idx); in qedf_fp_has_work()
2225 /* Process completion queue and copy CQE contents for deferred processesing
2231 struct qedf_ctx *qedf = fp->qedf; in qedf_process_completions()
2232 struct qed_sb_info *sb_info = fp->sb_info; in qedf_process_completions()
2233 struct status_block *sb = sb_info->sb_virt; in qedf_process_completions()
2236 struct fcoe_cqe *cqe; in qedf_process_completions() local
2245 prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI]; in qedf_process_completions()
2248 que = qedf->global_queues[fp->sb_id]; in qedf_process_completions()
2251 new_cqes = (prod_idx >= que->cq_prod_idx) ? in qedf_process_completions()
2252 (prod_idx - que->cq_prod_idx) : in qedf_process_completions()
2253 0x10000 - que->cq_prod_idx + prod_idx; in qedf_process_completions()
2256 que->cq_prod_idx = prod_idx; in qedf_process_completions()
2259 fp->completions++; in qedf_process_completions()
2260 cqe = &que->cq[que->cq_cons_idx]; in qedf_process_completions()
2262 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & in qedf_process_completions()
2270 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, in qedf_process_completions()
2271 "Unsolicated CQE.\n"); in qedf_process_completions()
2272 qedf_process_unsol_compl(qedf, fp->sb_id, cqe); in qedf_process_completions()
2280 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; in qedf_process_completions()
2281 io_req = &qedf->cmd_mgr->cmds[xid]; in qedf_process_completions()
2288 /* If there is not io_req assocated with this CQE in qedf_process_completions()
2293 cpu = io_req->cpu; in qedf_process_completions()
2294 io_req->int_cpu = smp_processor_id(); in qedf_process_completions()
2297 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC); in qedf_process_completions()
2299 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate " in qedf_process_completions()
2305 INIT_WORK(&io_work->work, qedf_fp_io_handler); in qedf_process_completions()
2307 /* Copy contents of CQE for deferred processing */ in qedf_process_completions()
2308 memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe)); in qedf_process_completions()
2310 io_work->qedf = fp->qedf; in qedf_process_completions()
2311 io_work->fp = NULL; /* Only used for unsolicited frames */ in qedf_process_completions()
2313 queue_work_on(cpu, qedf_io_wq, &io_work->work); in qedf_process_completions()
2316 que->cq_cons_idx++; in qedf_process_completions()
2317 if (que->cq_cons_idx == fp->cq_num_entries) in qedf_process_completions()
2318 que->cq_cons_idx = 0; in qedf_process_completions()
2319 new_cqes--; in qedf_process_completions()
2326 /* MSI-X fastpath handler code */
2335 if (!fp->sb_info) { in qedf_msix_handler()
2336 QEDF_ERR(NULL, "fp->sb_info in null."); in qedf_msix_handler()
2344 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); in qedf_msix_handler()
2351 qed_sb_update_sb_idx(fp->sb_info); in qedf_msix_handler()
2357 /* Re-enable interrupts */ in qedf_msix_handler()
2358 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); in qedf_msix_handler()
2374 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf); in qedf_simd_int_handler()
2384 if (qedf->int_info.msix_cnt) { in qedf_sync_free_irqs()
2385 for (i = 0; i < qedf->int_info.used_cnt; i++) { in qedf_sync_free_irqs()
2386 vector_idx = i * qedf->dev_info.common.num_hwfns + in qedf_sync_free_irqs()
2387 qed_ops->common->get_affin_hwfn_idx(qedf->cdev); in qedf_sync_free_irqs()
2388 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_sync_free_irqs()
2391 vector = qedf->int_info.msix[vector_idx].vector; in qedf_sync_free_irqs()
2395 free_irq(vector, &qedf->fp_array[i]); in qedf_sync_free_irqs()
2398 qed_ops->common->simd_handler_clean(qedf->cdev, in qedf_sync_free_irqs()
2401 qedf->int_info.used_cnt = 0; in qedf_sync_free_irqs()
2402 qed_ops->common->set_fp_int(qedf->cdev, 0); in qedf_sync_free_irqs()
2412 for (i = 0; i < qedf->num_queues; i++) { in qedf_request_msix_irq()
2413 vector_idx = i * qedf->dev_info.common.num_hwfns + in qedf_request_msix_irq()
2414 qed_ops->common->get_affin_hwfn_idx(qedf->cdev); in qedf_request_msix_irq()
2415 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in qedf_request_msix_irq()
2418 vector = qedf->int_info.msix[vector_idx].vector; in qedf_request_msix_irq()
2420 &qedf->fp_array[i]); in qedf_request_msix_irq()
2423 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n"); in qedf_request_msix_irq()
2428 qedf->int_info.used_cnt++; in qedf_request_msix_irq()
2443 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus()); in qedf_setup_int()
2447 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info); in qedf_setup_int()
2451 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = " in qedf_setup_int()
2452 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt, in qedf_setup_int()
2455 if (qedf->int_info.msix_cnt) in qedf_setup_int()
2458 qed_ops->common->simd_handler_config(qedf->cdev, &qedf, in qedf_setup_int()
2460 qedf->int_info.used_cnt = 1; in qedf_setup_int()
2462 QEDF_ERR(&qedf->dbg_ctx, in qedf_setup_int()
2463 "Cannot load driver due to a lack of MSI-X vectors.\n"); in qedf_setup_int()
2464 return -EINVAL; in qedf_setup_int()
2483 lport = qedf->lport; in qedf_recv_frame()
2484 if (lport == NULL || lport->state == LPORT_ST_DISABLED) { in qedf_recv_frame()
2492 mac = eth_hdr(skb)->h_source; in qedf_recv_frame()
2493 dest_mac = eth_hdr(skb)->h_dest; in qedf_recv_frame()
2496 hp = (struct fcoe_hdr *)skb->data; in qedf_recv_frame()
2499 fr_len = skb->len - sizeof(struct fcoe_crc_eof); in qedf_recv_frame()
2504 fr_sof(fp) = hp->fcoe_sof; in qedf_recv_frame()
2524 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && in qedf_recv_frame()
2525 fh->fh_type == FC_TYPE_FCP) { in qedf_recv_frame()
2530 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && in qedf_recv_frame()
2531 fh->fh_type == FC_TYPE_ELS) { in qedf_recv_frame()
2534 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { in qedf_recv_frame()
2535 /* drop non-FIP LOGO */ in qedf_recv_frame()
2543 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { in qedf_recv_frame()
2549 if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { in qedf_recv_frame()
2550 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, in qedf_recv_frame()
2556 if (qedf->ctlr.state) { in qedf_recv_frame()
2557 if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) { in qedf_recv_frame()
2558 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, in qedf_recv_frame()
2560 mac, qedf->ctlr.dest_addr); in qedf_recv_frame()
2566 vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); in qedf_recv_frame()
2573 if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { in qedf_recv_frame()
2574 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, in qedf_recv_frame()
2575 "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n", in qedf_recv_frame()
2576 lport->port_id, ntoh24(fh->fh_d_id)); in qedf_recv_frame()
2581 f_ctl = ntoh24(fh->fh_f_ctl); in qedf_recv_frame()
2582 if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && in qedf_recv_frame()
2585 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, in qedf_recv_frame()
2598 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id)); in qedf_recv_frame()
2601 &fcport->flags)) { in qedf_recv_frame()
2602 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, in qedf_recv_frame()
2608 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: " in qedf_recv_frame()
2610 ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, in qedf_recv_frame()
2611 fh->fh_type); in qedf_recv_frame()
2614 1, skb->data, skb->len, false); in qedf_recv_frame()
2622 struct qedf_ctx *qedf = skb_work->qedf; in qedf_ll2_process_skb()
2623 struct sk_buff *skb = skb_work->skb; in qedf_ll2_process_skb()
2631 eh = (struct ethhdr *)skb->data; in qedf_ll2_process_skb()
2634 if (eh->h_proto == htons(ETH_P_8021Q)) { in qedf_ll2_process_skb()
2645 if (eh->h_proto == htons(ETH_P_FIP)) { in qedf_ll2_process_skb()
2648 } else if (eh->h_proto == htons(ETH_P_FCOE)) { in qedf_ll2_process_skb()
2668 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) { in qedf_ll2_rx()
2669 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, in qedf_ll2_rx()
2677 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so " in qedf_ll2_rx()
2683 INIT_WORK(&skb_work->work, qedf_ll2_process_skb); in qedf_ll2_rx()
2684 skb_work->skb = skb; in qedf_ll2_rx()
2685 skb_work->qedf = qedf; in qedf_ll2_rx()
2686 queue_work(qedf->ll2_recv_wq, &skb_work->work); in qedf_ll2_rx()
2704 * Deferred part of unsolicited CQE sends in qedf_fp_io_handler()
2707 comp_type = (io_work->cqe.cqe_data >> in qedf_fp_io_handler()
2711 io_work->fp) in qedf_fp_io_handler()
2712 fc_exch_recv(io_work->qedf->lport, io_work->fp); in qedf_fp_io_handler()
2714 qedf_process_cqe(io_work->qedf, &io_work->cqe); in qedf_fp_io_handler()
2726 sb_virt = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_and_init_sb()
2730 QEDF_ERR(&qedf->dbg_ctx, in qedf_alloc_and_init_sb()
2733 return -ENOMEM; in qedf_alloc_and_init_sb()
2736 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys, in qedf_alloc_and_init_sb()
2740 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); in qedf_alloc_and_init_sb()
2741 QEDF_ERR(&qedf->dbg_ctx, in qedf_alloc_and_init_sb()
2752 if (sb_info->sb_virt) in qedf_free_sb()
2753 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt), in qedf_free_sb()
2754 (void *)sb_info->sb_virt, sb_info->sb_phys); in qedf_free_sb()
2762 for (id = 0; id < qedf->num_queues; id++) { in qedf_destroy_sb()
2763 fp = &(qedf->fp_array[id]); in qedf_destroy_sb()
2764 if (fp->sb_id == QEDF_SB_ID_NULL) in qedf_destroy_sb()
2766 qedf_free_sb(qedf, fp->sb_info); in qedf_destroy_sb()
2767 kfree(fp->sb_info); in qedf_destroy_sb()
2769 kfree(qedf->fp_array); in qedf_destroy_sb()
2778 qedf->fp_array = in qedf_prepare_sb()
2779 kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath), in qedf_prepare_sb()
2782 if (!qedf->fp_array) { in qedf_prepare_sb()
2783 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation " in qedf_prepare_sb()
2785 return -ENOMEM; in qedf_prepare_sb()
2788 for (id = 0; id < qedf->num_queues; id++) { in qedf_prepare_sb()
2789 fp = &(qedf->fp_array[id]); in qedf_prepare_sb()
2790 fp->sb_id = QEDF_SB_ID_NULL; in qedf_prepare_sb()
2791 fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); in qedf_prepare_sb()
2792 if (!fp->sb_info) { in qedf_prepare_sb()
2793 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct " in qedf_prepare_sb()
2797 ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id); in qedf_prepare_sb()
2799 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and " in qedf_prepare_sb()
2803 fp->sb_id = id; in qedf_prepare_sb()
2804 fp->qedf = qedf; in qedf_prepare_sb()
2805 fp->cq_num_entries = in qedf_prepare_sb()
2806 qedf->global_queues[id]->cq_mem_size / in qedf_prepare_sb()
2813 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) in qedf_process_cqe() argument
2822 comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) & in qedf_process_cqe()
2825 xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK; in qedf_process_cqe()
2826 io_req = &qedf->cmd_mgr->cmds[xid]; in qedf_process_cqe()
2830 QEDF_ERR(&qedf->dbg_ctx, in qedf_process_cqe()
2835 fcport = io_req->fcport; in qedf_process_cqe()
2838 QEDF_ERR(&qedf->dbg_ctx, in qedf_process_cqe()
2848 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { in qedf_process_cqe()
2849 QEDF_ERR(&qedf->dbg_ctx, in qedf_process_cqe()
2854 spin_lock_irqsave(&fcport->rport_lock, flags); in qedf_process_cqe()
2855 io_comp_type = io_req->cmd_type; in qedf_process_cqe()
2856 spin_unlock_irqrestore(&fcport->rport_lock, flags); in qedf_process_cqe()
2860 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2863 qedf_scsi_completion(qedf, cqe, io_req); in qedf_process_cqe()
2866 qedf_process_els_compl(qedf, cqe, io_req); in qedf_process_cqe()
2869 qedf_process_tmf_compl(qedf, cqe, io_req); in qedf_process_cqe()
2872 qedf_process_seq_cleanup_compl(qedf, cqe, io_req); in qedf_process_cqe()
2877 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2878 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2879 "Error detect CQE.\n"); in qedf_process_cqe()
2880 qedf_process_error_detect(qedf, cqe, io_req); in qedf_process_cqe()
2883 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2884 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2885 "Cleanup CQE.\n"); in qedf_process_cqe()
2886 qedf_process_cleanup_compl(qedf, cqe, io_req); in qedf_process_cqe()
2889 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2890 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2891 "Abort CQE.\n"); in qedf_process_cqe()
2892 qedf_process_abts_compl(qedf, cqe, io_req); in qedf_process_cqe()
2895 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2896 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2897 "Dummy CQE.\n"); in qedf_process_cqe()
2900 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2901 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2902 "Local completion CQE.\n"); in qedf_process_cqe()
2905 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2906 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2907 "Warning CQE.\n"); in qedf_process_cqe()
2908 qedf_process_warning_compl(qedf, cqe, io_req); in qedf_process_cqe()
2911 atomic_inc(&fcport->free_sqes); in qedf_process_cqe()
2912 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2913 "Max FCoE CQE.\n"); in qedf_process_cqe()
2916 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, in qedf_process_cqe()
2917 "Default CQE.\n"); in qedf_process_cqe()
2926 if (qedf->bdq_pbl_list) in qedf_free_bdq()
2927 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE, in qedf_free_bdq()
2928 qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma); in qedf_free_bdq()
2930 if (qedf->bdq_pbl) in qedf_free_bdq()
2931 dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size, in qedf_free_bdq()
2932 qedf->bdq_pbl, qedf->bdq_pbl_dma); in qedf_free_bdq()
2935 if (qedf->bdq[i].buf_addr) { in qedf_free_bdq()
2936 dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE, in qedf_free_bdq()
2937 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma); in qedf_free_bdq()
2945 struct global_queue **gl = qedf->global_queues; in qedf_free_global_queues()
2947 for (i = 0; i < qedf->num_queues; i++) { in qedf_free_global_queues()
2951 if (gl[i]->cq) in qedf_free_global_queues()
2952 dma_free_coherent(&qedf->pdev->dev, in qedf_free_global_queues()
2953 gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma); in qedf_free_global_queues()
2954 if (gl[i]->cq_pbl) in qedf_free_global_queues()
2955 dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size, in qedf_free_global_queues()
2956 gl[i]->cq_pbl, gl[i]->cq_pbl_dma); in qedf_free_global_queues()
2972 qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_bdq()
2973 QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL); in qedf_alloc_bdq()
2974 if (!qedf->bdq[i].buf_addr) { in qedf_alloc_bdq()
2975 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ " in qedf_alloc_bdq()
2977 return -ENOMEM; in qedf_alloc_bdq()
2982 qedf->bdq_pbl_mem_size = in qedf_alloc_bdq()
2984 qedf->bdq_pbl_mem_size = in qedf_alloc_bdq()
2985 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE); in qedf_alloc_bdq()
2987 qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_bdq()
2988 qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL); in qedf_alloc_bdq()
2989 if (!qedf->bdq_pbl) { in qedf_alloc_bdq()
2990 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n"); in qedf_alloc_bdq()
2991 return -ENOMEM; in qedf_alloc_bdq()
2994 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_alloc_bdq()
2996 qedf->bdq_pbl, &qedf->bdq_pbl_dma); in qedf_alloc_bdq()
3002 pbl = (struct scsi_bd *)qedf->bdq_pbl; in qedf_alloc_bdq()
3004 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma)); in qedf_alloc_bdq()
3005 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma)); in qedf_alloc_bdq()
3006 pbl->opaque.fcoe_opaque.hi = 0; in qedf_alloc_bdq()
3008 pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i); in qedf_alloc_bdq()
3013 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_bdq()
3015 &qedf->bdq_pbl_list_dma, in qedf_alloc_bdq()
3017 if (!qedf->bdq_pbl_list) { in qedf_alloc_bdq()
3018 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n"); in qedf_alloc_bdq()
3019 return -ENOMEM; in qedf_alloc_bdq()
3026 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / in qedf_alloc_bdq()
3028 list = (u64 *)qedf->bdq_pbl_list; in qedf_alloc_bdq()
3029 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { in qedf_alloc_bdq()
3030 *list = qedf->bdq_pbl_dma; in qedf_alloc_bdq()
3051 if (!qedf->num_queues) { in qedf_alloc_global_queues()
3052 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n"); in qedf_alloc_global_queues()
3053 return -ENOMEM; in qedf_alloc_global_queues()
3060 if (!qedf->p_cpuq) { in qedf_alloc_global_queues()
3061 QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); in qedf_alloc_global_queues()
3062 return -EINVAL; in qedf_alloc_global_queues()
3065 qedf->global_queues = kzalloc((sizeof(struct global_queue *) in qedf_alloc_global_queues()
3066 * qedf->num_queues), GFP_KERNEL); in qedf_alloc_global_queues()
3067 if (!qedf->global_queues) { in qedf_alloc_global_queues()
3068 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global " in qedf_alloc_global_queues()
3070 return -ENOMEM; in qedf_alloc_global_queues()
3072 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_alloc_global_queues()
3073 "qedf->global_queues=%p.\n", qedf->global_queues); in qedf_alloc_global_queues()
3078 QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n"); in qedf_alloc_global_queues()
3082 /* Allocate a CQ and an associated PBL for each MSI-X vector */ in qedf_alloc_global_queues()
3083 for (i = 0; i < qedf->num_queues; i++) { in qedf_alloc_global_queues()
3084 qedf->global_queues[i] = kzalloc(sizeof(struct global_queue), in qedf_alloc_global_queues()
3086 if (!qedf->global_queues[i]) { in qedf_alloc_global_queues()
3087 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate " in qedf_alloc_global_queues()
3089 status = -ENOMEM; in qedf_alloc_global_queues()
3093 qedf->global_queues[i]->cq_mem_size = in qedf_alloc_global_queues()
3095 qedf->global_queues[i]->cq_mem_size = in qedf_alloc_global_queues()
3096 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE); in qedf_alloc_global_queues()
3098 qedf->global_queues[i]->cq_pbl_size = in qedf_alloc_global_queues()
3099 (qedf->global_queues[i]->cq_mem_size / in qedf_alloc_global_queues()
3101 qedf->global_queues[i]->cq_pbl_size = in qedf_alloc_global_queues()
3102 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE); in qedf_alloc_global_queues()
3104 qedf->global_queues[i]->cq = in qedf_alloc_global_queues()
3105 dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_global_queues()
3106 qedf->global_queues[i]->cq_mem_size, in qedf_alloc_global_queues()
3107 &qedf->global_queues[i]->cq_dma, in qedf_alloc_global_queues()
3110 if (!qedf->global_queues[i]->cq) { in qedf_alloc_global_queues()
3111 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n"); in qedf_alloc_global_queues()
3112 status = -ENOMEM; in qedf_alloc_global_queues()
3116 qedf->global_queues[i]->cq_pbl = in qedf_alloc_global_queues()
3117 dma_alloc_coherent(&qedf->pdev->dev, in qedf_alloc_global_queues()
3118 qedf->global_queues[i]->cq_pbl_size, in qedf_alloc_global_queues()
3119 &qedf->global_queues[i]->cq_pbl_dma, in qedf_alloc_global_queues()
3122 if (!qedf->global_queues[i]->cq_pbl) { in qedf_alloc_global_queues()
3123 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n"); in qedf_alloc_global_queues()
3124 status = -ENOMEM; in qedf_alloc_global_queues()
3129 num_pages = qedf->global_queues[i]->cq_mem_size / in qedf_alloc_global_queues()
3131 page = qedf->global_queues[i]->cq_dma; in qedf_alloc_global_queues()
3132 pbl = (u32 *)qedf->global_queues[i]->cq_pbl; in qedf_alloc_global_queues()
3134 while (num_pages--) { in qedf_alloc_global_queues()
3142 qedf->global_queues[i]->cq_cons_idx = 0; in qedf_alloc_global_queues()
3145 list = (u32 *)qedf->p_cpuq; in qedf_alloc_global_queues()
3153 for (i = 0; i < qedf->num_queues; i++) { in qedf_alloc_global_queues()
3154 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma); in qedf_alloc_global_queues()
3156 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma); in qedf_alloc_global_queues()
3186 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); in qedf_set_fcoe_pf_param()
3188 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", in qedf_set_fcoe_pf_param()
3189 qedf->num_queues); in qedf_set_fcoe_pf_param()
3191 qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev, in qedf_set_fcoe_pf_param()
3192 qedf->num_queues * sizeof(struct qedf_glbl_q_params), in qedf_set_fcoe_pf_param()
3193 &qedf->hw_p_cpuq, GFP_KERNEL); in qedf_set_fcoe_pf_param()
3195 if (!qedf->p_cpuq) { in qedf_set_fcoe_pf_param()
3196 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n"); in qedf_set_fcoe_pf_param()
3202 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation " in qedf_set_fcoe_pf_param()
3217 memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params)); in qedf_set_fcoe_pf_param()
3220 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS; in qedf_set_fcoe_pf_param()
3221 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS; in qedf_set_fcoe_pf_param()
3222 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr = in qedf_set_fcoe_pf_param()
3223 (u64)qedf->hw_p_cpuq; in qedf_set_fcoe_pf_param()
3224 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages; in qedf_set_fcoe_pf_param()
3226 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0; in qedf_set_fcoe_pf_param()
3228 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries; in qedf_set_fcoe_pf_param()
3229 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues; in qedf_set_fcoe_pf_param()
3232 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE); in qedf_set_fcoe_pf_param()
3234 qedf->pf_params.fcoe_pf_params.mtu = 9000; in qedf_set_fcoe_pf_param()
3235 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI; in qedf_set_fcoe_pf_param()
3236 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI; in qedf_set_fcoe_pf_param()
3239 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] = in qedf_set_fcoe_pf_param()
3240 qedf->bdq_pbl_list_dma; in qedf_set_fcoe_pf_param()
3241 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] = in qedf_set_fcoe_pf_param()
3242 qedf->bdq_pbl_list_num_entries; in qedf_set_fcoe_pf_param()
3243 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE; in qedf_set_fcoe_pf_param()
3245 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_set_fcoe_pf_param()
3247 qedf->bdq_pbl_list, in qedf_set_fcoe_pf_param()
3248 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0], in qedf_set_fcoe_pf_param()
3249 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]); in qedf_set_fcoe_pf_param()
3251 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in qedf_set_fcoe_pf_param()
3253 qedf->pf_params.fcoe_pf_params.cq_num_entries); in qedf_set_fcoe_pf_param()
3263 if (qedf->p_cpuq) { in qedf_free_fcoe_pf_param()
3264 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params); in qedf_free_fcoe_pf_param()
3265 dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq, in qedf_free_fcoe_pf_param()
3266 qedf->hw_p_cpuq); in qedf_free_fcoe_pf_param()
3271 kfree(qedf->global_queues); in qedf_free_fcoe_pf_param()
3296 int rc = -EINVAL; in __qedf_probe()
3324 rc = -ENOMEM; in __qedf_probe()
3332 set_bit(QEDF_PROBING, &qedf->flags); in __qedf_probe()
3333 qedf->lport = lport; in __qedf_probe()
3334 qedf->ctlr.lp = lport; in __qedf_probe()
3335 qedf->pdev = pdev; in __qedf_probe()
3336 qedf->dbg_ctx.pdev = pdev; in __qedf_probe()
3337 qedf->dbg_ctx.host_no = lport->host->host_no; in __qedf_probe()
3338 spin_lock_init(&qedf->hba_lock); in __qedf_probe()
3339 INIT_LIST_HEAD(&qedf->fcports); in __qedf_probe()
3340 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1; in __qedf_probe()
3341 atomic_set(&qedf->num_offloads, 0); in __qedf_probe()
3342 qedf->stop_io_on_error = false; in __qedf_probe()
3344 init_completion(&qedf->fipvlan_compl); in __qedf_probe()
3345 mutex_init(&qedf->stats_mutex); in __qedf_probe()
3346 mutex_init(&qedf->flush_mutex); in __qedf_probe()
3347 qedf->flogi_pending = 0; in __qedf_probe()
3349 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, in __qedf_probe()
3357 set_bit(QEDF_PROBING, &qedf->flags); in __qedf_probe()
3358 lport = qedf->lport; in __qedf_probe()
3361 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n"); in __qedf_probe()
3363 host = lport->host; in __qedf_probe()
3366 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN, in __qedf_probe()
3368 if (qedf->io_mempool == NULL) { in __qedf_probe()
3369 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n"); in __qedf_probe()
3372 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n", in __qedf_probe()
3373 qedf->io_mempool); in __qedf_probe()
3376 qedf->lport->host->host_no); in __qedf_probe()
3377 qedf->link_update_wq = create_workqueue(host_buf); in __qedf_probe()
3378 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update); in __qedf_probe()
3379 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery); in __qedf_probe()
3380 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump); in __qedf_probe()
3381 INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work); in __qedf_probe()
3382 qedf->fipvlan_retries = qedf_fipvlan_retries; in __qedf_probe()
3384 if (qedf_default_prio > -1) { in __qedf_probe()
3389 qedf->prio = qedf_default_prio; in __qedf_probe()
3391 qedf->prio = QEDF_DEFAULT_PRIO; in __qedf_probe()
3402 qedf->cdev = qed_ops->common->probe(pdev, &qed_params); in __qedf_probe()
3403 if (!qedf->cdev) { in __qedf_probe()
3405 QEDF_ERR(&qedf->dbg_ctx, in __qedf_probe()
3407 retry_cnt--; in __qedf_probe()
3410 QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n"); in __qedf_probe()
3411 rc = -ENODEV; in __qedf_probe()
3416 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); in __qedf_probe()
3418 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); in __qedf_probe()
3422 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, in __qedf_probe()
3424 qedf->dev_info.common.num_hwfns, in __qedf_probe()
3425 qed_ops->common->get_affin_hwfn_idx(qedf->cdev)); in __qedf_probe()
3437 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n"); in __qedf_probe()
3440 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); in __qedf_probe()
3443 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); in __qedf_probe()
3445 QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n"); in __qedf_probe()
3450 qedf->devlink = qed_ops->common->devlink_register(qedf->cdev); in __qedf_probe()
3451 if (IS_ERR(qedf->devlink)) { in __qedf_probe()
3452 QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n"); in __qedf_probe()
3453 rc = PTR_ERR(qedf->devlink); in __qedf_probe()
3454 qedf->devlink = NULL; in __qedf_probe()
3460 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr; in __qedf_probe()
3461 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr; in __qedf_probe()
3462 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3463 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod, in __qedf_probe()
3464 qedf->bdq_secondary_prod); in __qedf_probe()
3466 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf); in __qedf_probe()
3471 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); in __qedf_probe()
3475 /* Start the Slowpath-process */ in __qedf_probe()
3483 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params); in __qedf_probe()
3485 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n"); in __qedf_probe()
3493 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); in __qedf_probe()
3498 QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n"); in __qedf_probe()
3502 rc = qed_ops->start(qedf->cdev, &qedf->tasks); in __qedf_probe()
3504 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n"); in __qedf_probe()
3507 task_start = qedf_get_task_mem(&qedf->tasks, 0); in __qedf_probe()
3508 task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1); in __qedf_probe()
3509 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, " in __qedf_probe()
3511 qedf->tasks.size); in __qedf_probe()
3515 * the f/w will do a prefetch and we'll get an unsolicited CQE when a in __qedf_probe()
3518 qedf->bdq_prod_idx = QEDF_BDQ_SIZE; in __qedf_probe()
3519 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3521 qedf->bdq_prod_idx); in __qedf_probe()
3522 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod); in __qedf_probe()
3523 readw(qedf->bdq_primary_prod); in __qedf_probe()
3524 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod); in __qedf_probe()
3525 readw(qedf->bdq_secondary_prod); in __qedf_probe()
3527 qed_ops->common->set_power_state(qedf->cdev, PCI_D0); in __qedf_probe()
3532 ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac); in __qedf_probe()
3533 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n", in __qedf_probe()
3534 qedf->mac); in __qedf_probe()
3539 * If the info we get from qed is non-zero then use that to set the in __qedf_probe()
3543 if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) { in __qedf_probe()
3544 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3546 qedf->wwnn = qedf->dev_info.wwnn; in __qedf_probe()
3547 qedf->wwpn = qedf->dev_info.wwpn; in __qedf_probe()
3549 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3551 qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0); in __qedf_probe()
3552 qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0); in __qedf_probe()
3554 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx " in __qedf_probe()
3555 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); in __qedf_probe()
3557 sprintf(host_buf, "host_%d", host->host_no); in __qedf_probe()
3558 qed_ops->common->set_name(qedf->cdev, host_buf); in __qedf_probe()
3561 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf); in __qedf_probe()
3562 if (!qedf->cmd_mgr) { in __qedf_probe()
3563 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n"); in __qedf_probe()
3564 rc = -ENOMEM; in __qedf_probe()
3569 host->transportt = qedf_fc_transport_template; in __qedf_probe()
3570 host->max_lun = qedf_max_lun; in __qedf_probe()
3571 host->max_cmd_len = QEDF_MAX_CDB_LEN; in __qedf_probe()
3572 host->max_id = QEDF_MAX_SESSIONS; in __qedf_probe()
3573 host->can_queue = FCOE_PARAMS_NUM_TASKS; in __qedf_probe()
3574 rc = scsi_add_host(host, &pdev->dev); in __qedf_probe()
3576 QEDF_WARN(&qedf->dbg_ctx, in __qedf_probe()
3584 ether_addr_copy(params.ll2_mac_address, qedf->mac); in __qedf_probe()
3587 snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no); in __qedf_probe()
3588 qedf->ll2_recv_wq = in __qedf_probe()
3590 if (!qedf->ll2_recv_wq) { in __qedf_probe()
3591 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n"); in __qedf_probe()
3592 rc = -ENOMEM; in __qedf_probe()
3597 qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops, in __qedf_probe()
3602 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf); in __qedf_probe()
3603 rc = qed_ops->ll2->start(qedf->cdev, &params); in __qedf_probe()
3605 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n"); in __qedf_probe()
3608 set_bit(QEDF_LL2_STARTED, &qedf->flags); in __qedf_probe()
3611 qedf->vlan_id = 0; in __qedf_probe()
3624 QEDF_ERR(&(qedf->dbg_ctx), in __qedf_probe()
3630 sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no); in __qedf_probe()
3631 qedf->timer_work_queue = in __qedf_probe()
3633 if (!qedf->timer_work_queue) { in __qedf_probe()
3634 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer " in __qedf_probe()
3636 rc = -ENOMEM; in __qedf_probe()
3643 qedf->lport->host->host_no); in __qedf_probe()
3644 qedf->dpc_wq = create_workqueue(host_buf); in __qedf_probe()
3646 INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler); in __qedf_probe()
3653 qedf->grcdump_size = in __qedf_probe()
3654 qed_ops->common->dbg_all_data_size(qedf->cdev); in __qedf_probe()
3655 if (qedf->grcdump_size) { in __qedf_probe()
3656 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump, in __qedf_probe()
3657 qedf->grcdump_size); in __qedf_probe()
3659 QEDF_ERR(&(qedf->dbg_ctx), in __qedf_probe()
3661 qedf->grcdump = NULL; in __qedf_probe()
3664 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, in __qedf_probe()
3666 qedf->grcdump, qedf->grcdump_size); in __qedf_probe()
3671 spin_lock_init(&qedf->io_trace_lock); in __qedf_probe()
3672 qedf->io_trace_idx = 0; in __qedf_probe()
3675 init_completion(&qedf->flogi_compl); in __qedf_probe()
3677 status = qed_ops->common->update_drv_state(qedf->cdev, true); in __qedf_probe()
3679 QEDF_ERR(&(qedf->dbg_ctx), in __qedf_probe()
3684 status = qed_ops->common->set_link(qedf->cdev, &link_params); in __qedf_probe()
3686 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n"); in __qedf_probe()
3690 fcoe_ctlr_link_up(&qedf->ctlr); in __qedf_probe()
3694 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n"); in __qedf_probe()
3696 clear_bit(QEDF_PROBING, &qedf->flags); in __qedf_probe()
3702 if (qedf->ll2_recv_wq) in __qedf_probe()
3703 destroy_workqueue(qedf->ll2_recv_wq); in __qedf_probe()
3704 fc_remove_host(qedf->lport->host); in __qedf_probe()
3705 scsi_remove_host(qedf->lport->host); in __qedf_probe()
3707 qedf_dbg_host_exit(&(qedf->dbg_ctx)); in __qedf_probe()
3710 qedf_cmd_mgr_free(qedf->cmd_mgr); in __qedf_probe()
3712 qed_ops->stop(qedf->cdev); in __qedf_probe()
3717 qed_ops->common->slowpath_stop(qedf->cdev); in __qedf_probe()
3719 qed_ops->common->remove(qedf->cdev); in __qedf_probe()
3721 scsi_host_put(lport->host); in __qedf_probe()
3748 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in __qedf_remove()
3749 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n"); in __qedf_remove()
3754 if (test_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags)) { in __qedf_remove()
3755 QEDF_ERR(&qedf->dbg_ctx, "Stag in progress, cnt=%d.\n", cnt); in __qedf_remove()
3765 set_bit(QEDF_UNLOADING, &qedf->flags); in __qedf_remove()
3769 fcoe_ctlr_link_down(&qedf->ctlr); in __qedf_remove()
3771 fc_fabric_logoff(qedf->lport); in __qedf_remove()
3774 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n"); in __qedf_remove()
3777 qedf_dbg_host_exit(&(qedf->dbg_ctx)); in __qedf_remove()
3781 cancel_delayed_work_sync(&qedf->link_update); in __qedf_remove()
3782 destroy_workqueue(qedf->link_update_wq); in __qedf_remove()
3783 qedf->link_update_wq = NULL; in __qedf_remove()
3785 if (qedf->timer_work_queue) in __qedf_remove()
3786 destroy_workqueue(qedf->timer_work_queue); in __qedf_remove()
3789 clear_bit(QEDF_LL2_STARTED, &qedf->flags); in __qedf_remove()
3790 qed_ops->ll2->stop(qedf->cdev); in __qedf_remove()
3791 if (qedf->ll2_recv_wq) in __qedf_remove()
3792 destroy_workqueue(qedf->ll2_recv_wq); in __qedf_remove()
3803 qedf_free_grc_dump_buf(&qedf->grcdump); in __qedf_remove()
3807 fcoe_ctlr_destroy(&qedf->ctlr); in __qedf_remove()
3808 fc_lport_destroy(qedf->lport); in __qedf_remove()
3809 fc_remove_host(qedf->lport->host); in __qedf_remove()
3810 scsi_remove_host(qedf->lport->host); in __qedf_remove()
3813 qedf_cmd_mgr_free(qedf->cmd_mgr); in __qedf_remove()
3816 fc_exch_mgr_free(qedf->lport); in __qedf_remove()
3817 fc_lport_free_stats(qedf->lport); in __qedf_remove()
3827 qed_ops->stop(qedf->cdev); in __qedf_remove()
3830 if (qedf->dpc_wq) { in __qedf_remove()
3832 destroy_workqueue(qedf->dpc_wq); in __qedf_remove()
3833 qedf->dpc_wq = NULL; in __qedf_remove()
3840 qed_ops->common->set_power_state(qedf->cdev, PCI_D0); in __qedf_remove()
3844 rc = qed_ops->common->update_drv_state(qedf->cdev, false); in __qedf_remove()
3846 QEDF_ERR(&(qedf->dbg_ctx), in __qedf_remove()
3849 if (mode != QEDF_MODE_RECOVERY && qedf->devlink) { in __qedf_remove()
3850 qed_ops->common->devlink_unregister(qedf->devlink); in __qedf_remove()
3851 qedf->devlink = NULL; in __qedf_remove()
3854 qed_ops->common->slowpath_stop(qedf->cdev); in __qedf_remove()
3855 qed_ops->common->remove(qedf->cdev); in __qedf_remove()
3857 mempool_destroy(qedf->io_mempool); in __qedf_remove()
3861 scsi_host_put(qedf->lport->host); in __qedf_remove()
3867 if (!atomic_read(&pdev->enable_cnt)) in qedf_remove()
3878 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n"); in qedf_wq_grcdump()
3886 QEDF_ERR(&(qedf->dbg_ctx), in qedf_schedule_hw_err_handler()
3890 if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { in qedf_schedule_hw_err_handler()
3891 QEDF_ERR(&(qedf->dbg_ctx), in qedf_schedule_hw_err_handler()
3898 schedule_delayed_work(&qedf->board_disable_work, 0); in qedf_schedule_hw_err_handler()
3905 qed_ops->common->attn_clr_enable(qedf->cdev, true); in qedf_schedule_hw_err_handler()
3909 qed_ops->common->attn_clr_enable(qedf->cdev, true); in qedf_schedule_hw_err_handler()
3911 if (qedf_enable_recovery && qedf->devlink) in qedf_schedule_hw_err_handler()
3912 qed_ops->common->report_fatal_error(qedf->devlink, in qedf_schedule_hw_err_handler()
3938 if (test_bit(QEDF_PROBING, &qedf->flags)) { in qedf_get_protocol_tlv_data()
3939 QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n"); in qedf_get_protocol_tlv_data()
3943 lport = qedf->lport; in qedf_get_protocol_tlv_data()
3944 host = lport->host; in qedf_get_protocol_tlv_data()
3950 fcoe->qos_pri_set = true; in qedf_get_protocol_tlv_data()
3951 fcoe->qos_pri = 3; /* Hard coded to 3 in driver */ in qedf_get_protocol_tlv_data()
3953 fcoe->ra_tov_set = true; in qedf_get_protocol_tlv_data()
3954 fcoe->ra_tov = lport->r_a_tov; in qedf_get_protocol_tlv_data()
3956 fcoe->ed_tov_set = true; in qedf_get_protocol_tlv_data()
3957 fcoe->ed_tov = lport->e_d_tov; in qedf_get_protocol_tlv_data()
3959 fcoe->npiv_state_set = true; in qedf_get_protocol_tlv_data()
3960 fcoe->npiv_state = 1; /* NPIV always enabled */ in qedf_get_protocol_tlv_data()
3962 fcoe->num_npiv_ids_set = true; in qedf_get_protocol_tlv_data()
3963 fcoe->num_npiv_ids = fc_host->npiv_vports_inuse; in qedf_get_protocol_tlv_data()
3966 if (qedf->ctlr.sel_fcf) { in qedf_get_protocol_tlv_data()
3967 fcoe->switch_name_set = true; in qedf_get_protocol_tlv_data()
3968 u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name); in qedf_get_protocol_tlv_data()
3971 fcoe->port_state_set = true; in qedf_get_protocol_tlv_data()
3973 if (lport->link_up) in qedf_get_protocol_tlv_data()
3974 fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC; in qedf_get_protocol_tlv_data()
3976 fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE; in qedf_get_protocol_tlv_data()
3978 fcoe->link_failures_set = true; in qedf_get_protocol_tlv_data()
3979 fcoe->link_failures = (u16)hst->link_failure_count; in qedf_get_protocol_tlv_data()
3981 fcoe->fcoe_txq_depth_set = true; in qedf_get_protocol_tlv_data()
3982 fcoe->fcoe_rxq_depth_set = true; in qedf_get_protocol_tlv_data()
3983 fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS; in qedf_get_protocol_tlv_data()
3984 fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS; in qedf_get_protocol_tlv_data()
3986 fcoe->fcoe_rx_frames_set = true; in qedf_get_protocol_tlv_data()
3987 fcoe->fcoe_rx_frames = hst->rx_frames; in qedf_get_protocol_tlv_data()
3989 fcoe->fcoe_tx_frames_set = true; in qedf_get_protocol_tlv_data()
3990 fcoe->fcoe_tx_frames = hst->tx_frames; in qedf_get_protocol_tlv_data()
3992 fcoe->fcoe_rx_bytes_set = true; in qedf_get_protocol_tlv_data()
3993 fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000; in qedf_get_protocol_tlv_data()
3995 fcoe->fcoe_tx_bytes_set = true; in qedf_get_protocol_tlv_data()
3996 fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000; in qedf_get_protocol_tlv_data()
3998 fcoe->crc_count_set = true; in qedf_get_protocol_tlv_data()
3999 fcoe->crc_count = hst->invalid_crc_count; in qedf_get_protocol_tlv_data()
4001 fcoe->tx_abts_set = true; in qedf_get_protocol_tlv_data()
4002 fcoe->tx_abts = hst->fcp_packet_aborts; in qedf_get_protocol_tlv_data()
4004 fcoe->tx_lun_rst_set = true; in qedf_get_protocol_tlv_data()
4005 fcoe->tx_lun_rst = qedf->lun_resets; in qedf_get_protocol_tlv_data()
4007 fcoe->abort_task_sets_set = true; in qedf_get_protocol_tlv_data()
4008 fcoe->abort_task_sets = qedf->packet_aborts; in qedf_get_protocol_tlv_data()
4010 fcoe->scsi_busy_set = true; in qedf_get_protocol_tlv_data()
4011 fcoe->scsi_busy = qedf->busy; in qedf_get_protocol_tlv_data()
4013 fcoe->scsi_tsk_full_set = true; in qedf_get_protocol_tlv_data()
4014 fcoe->scsi_tsk_full = qedf->task_set_fulls; in qedf_get_protocol_tlv_data()
4024 QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL"); in qedf_stag_change_work()
4028 if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) { in qedf_stag_change_work()
4029 QEDF_ERR(&qedf->dbg_ctx, in qedf_stag_change_work()
4034 if (test_bit(QEDF_UNLOADING, &qedf->flags)) { in qedf_stag_change_work()
4035 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading\n"); in qedf_stag_change_work()
4039 set_bit(QEDF_STAG_IN_PROGRESS, &qedf->flags); in qedf_stag_change_work()
4042 dev_name(&qedf->pdev->dev), __func__, __LINE__, in qedf_stag_change_work()
4043 qedf->dbg_ctx.host_no); in qedf_stag_change_work()
4044 qedf_ctx_soft_reset(qedf->lport); in qedf_stag_change_work()
4058 return -ENODEV; in qedf_suspend()
4063 QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); in qedf_suspend()
4065 return -EPERM; in qedf_suspend()
4075 QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n"); in qedf_schedule_recovery_handler()
4076 schedule_delayed_work(&qedf->recovery_work, 0); in qedf_schedule_recovery_handler()
4084 if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags)) in qedf_recovery_handler()
4088 * Call common_ops->recovery_prolog to allow the MFW to quiesce in qedf_recovery_handler()
4091 qed_ops->common->recovery_prolog(qedf->cdev); in qedf_recovery_handler()
4093 QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n"); in qedf_recovery_handler()
4094 __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY); in qedf_recovery_handler()
4100 atomic_set(&qedf->link_state, QEDF_LINK_DOWN); in qedf_recovery_handler()
4101 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING); in qedf_recovery_handler()
4102 __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY); in qedf_recovery_handler()
4103 clear_bit(QEDF_IN_RECOVERY, &qedf->flags); in qedf_recovery_handler()
4104 QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n"); in qedf_recovery_handler()
4120 ether_addr_copy(data->mac[0], qedf->mac); in qedf_get_generic_tlv_data()
4139 if (qedf_default_prio > -1) in qedf_init()
4213 return -EINVAL; in qedf_init()