1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <asm/byteorder.h> 35 #include <asm/param.h> 36 #include <linux/delay.h> 37 #include <linux/dma-mapping.h> 38 #include <linux/interrupt.h> 39 #include <linux/kernel.h> 40 #include <linux/log2.h> 41 #include <linux/module.h> 42 #include <linux/pci.h> 43 #include <linux/slab.h> 44 #include <linux/stddef.h> 45 #include <linux/string.h> 46 #include <linux/version.h> 47 #include <linux/workqueue.h> 48 #include <linux/errno.h> 49 #include <linux/list.h> 50 #include <linux/spinlock.h> 51 #define __PREVENT_DUMP_MEM_ARR__ 52 #define __PREVENT_PXP_GLOBAL_WIN__ 53 #include "qed.h" 54 #include "qed_cxt.h" 55 #include "qed_dev_api.h" 56 #include "qed_fcoe.h" 57 #include "qed_hsi.h" 58 #include "qed_hw.h" 59 #include "qed_int.h" 60 #include "qed_ll2.h" 61 #include "qed_mcp.h" 62 #include "qed_reg_addr.h" 63 #include "qed_sp.h" 64 #include "qed_sriov.h" 65 #include <linux/qed/qed_fcoe_if.h> 66 67 struct qed_fcoe_conn { 68 struct list_head list_entry; 69 bool free_on_delete; 70 71 u16 conn_id; 72 u32 icid; 73 u32 fw_cid; 74 u8 layer_code; 75 76 dma_addr_t sq_pbl_addr; 77 dma_addr_t sq_curr_page_addr; 78 dma_addr_t sq_next_page_addr; 79 dma_addr_t xferq_pbl_addr; 80 void *xferq_pbl_addr_virt_addr; 81 dma_addr_t xferq_addr[4]; 82 void *xferq_addr_virt_addr[4]; 83 dma_addr_t confq_pbl_addr; 84 void *confq_pbl_addr_virt_addr; 85 dma_addr_t confq_addr[2]; 86 void *confq_addr_virt_addr[2]; 87 88 dma_addr_t terminate_params; 89 90 u16 dst_mac_addr_lo; 91 u16 dst_mac_addr_mid; 92 u16 dst_mac_addr_hi; 93 u16 src_mac_addr_lo; 94 u16 src_mac_addr_mid; 95 u16 src_mac_addr_hi; 96 97 u16 tx_max_fc_pay_len; 98 u16 e_d_tov_timer_val; 99 u16 rec_tov_timer_val; 100 u16 rx_max_fc_pay_len; 101 u16 vlan_tag; 102 u16 physical_q0; 103 104 struct fc_addr_nw s_id; 105 u8 max_conc_seqs_c3; 106 struct fc_addr_nw d_id; 107 u8 flags; 108 u8 def_q_idx; 109 }; 110 111 static int 112 qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, 113 enum spq_mode comp_mode, 114 struct qed_spq_comp_cb *p_comp_addr) 115 { 116 struct qed_fcoe_pf_params *fcoe_pf_params = NULL; 117 struct fcoe_init_ramrod_params *p_ramrod = NULL; 118 struct fcoe_init_func_ramrod_data *p_data; 119 struct fcoe_conn_context *p_cxt = NULL; 120 struct qed_spq_entry *p_ent = NULL; 121 struct qed_sp_init_data init_data; 122 struct qed_cxt_info cxt_info; 123 u32 dummy_cid; 124 int rc = 0; 125 u16 tmp; 126 u8 i; 127 128 /* Get SPQ entry */ 129 memset(&init_data, 0, sizeof(init_data)); 130 init_data.cid = qed_spq_get_cid(p_hwfn); 131 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 132 init_data.comp_mode = comp_mode; 133 init_data.p_comp_data = p_comp_addr; 134 135 rc = qed_sp_init_request(p_hwfn, &p_ent, 136 FCOE_RAMROD_CMD_ID_INIT_FUNC, 137 PROTOCOLID_FCOE, &init_data); 138 if (rc) 139 return rc; 140 141 p_ramrod = &p_ent->ramrod.fcoe_init; 142 p_data = &p_ramrod->init_ramrod_data; 143 fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; 144 145 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); 146 tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); 147 p_data->sq_num_pages_in_pbl = tmp; 148 149 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); 150 if (rc) 151 return rc; 152 153 cxt_info.iid = dummy_cid; 154 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); 155 if (rc) { 156 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", 157 dummy_cid); 158 return rc; 159 } 160 p_cxt = cxt_info.p_cxt; 161 SET_FIELD(p_cxt->tstorm_ag_context.flags3, 162 TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); 163 164 fcoe_pf_params->dummy_icid = (u16)dummy_cid; 165 166 tmp = cpu_to_le16(fcoe_pf_params->num_tasks); 167 p_data->func_params.num_tasks = tmp; 168 p_data->func_params.log_page_size = fcoe_pf_params->log_page_size; 169 p_data->func_params.debug_mode = fcoe_pf_params->debug_mode; 170 171 DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr, 172 fcoe_pf_params->glbl_q_params_addr); 173 174 tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries); 175 p_data->q_params.cq_num_entries = tmp; 176 177 tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries); 178 p_data->q_params.cmdq_num_entries = tmp; 179 180 tmp = fcoe_pf_params->num_cqs; 181 p_data->q_params.num_queues = (u8)tmp; 182 183 tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; 184 p_data->q_params.queue_relative_offset = (u8)tmp; 185 186 for (i = 0; i < fcoe_pf_params->num_cqs; i++) { 187 tmp = cpu_to_le16(p_hwfn->sbs_info[i]->igu_sb_id); 188 p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; 189 } 190 191 p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi; 192 p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi; 193 194 p_data->q_params.bdq_resource_id = FCOE_BDQ_ID(p_hwfn->port_id); 195 196 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ], 197 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); 198 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] = 199 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ]; 200 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]; 201 p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); 202 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]; 203 p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); 204 205 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA], 206 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); 207 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = 208 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; 209 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; 210 p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); 211 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; 212 p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); 213 tmp = fcoe_pf_params->rq_buffer_size; 214 p_data->q_params.rq_buffer_size = cpu_to_le16(tmp); 215 216 if (fcoe_pf_params->is_target) { 217 SET_FIELD(p_data->q_params.q_validity, 218 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); 219 if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) 220 SET_FIELD(p_data->q_params.q_validity, 221 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); 222 SET_FIELD(p_data->q_params.q_validity, 223 SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); 224 } else { 225 SET_FIELD(p_data->q_params.q_validity, 226 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); 227 } 228 229 rc = qed_spq_post(p_hwfn, p_ent, NULL); 230 231 return rc; 232 } 233 234 static int 235 qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, 236 struct qed_fcoe_conn *p_conn, 237 enum spq_mode comp_mode, 238 struct qed_spq_comp_cb *p_comp_addr) 239 { 240 struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL; 241 struct fcoe_conn_offload_ramrod_data *p_data; 242 struct qed_spq_entry *p_ent = NULL; 243 struct qed_sp_init_data init_data; 244 u16 pq_id = 0, tmp; 245 int rc; 246 247 /* Get SPQ entry */ 248 memset(&init_data, 0, sizeof(init_data)); 249 init_data.cid = p_conn->icid; 250 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 251 init_data.comp_mode = comp_mode; 252 init_data.p_comp_data = p_comp_addr; 253 254 rc = qed_sp_init_request(p_hwfn, &p_ent, 255 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, 256 PROTOCOLID_FCOE, &init_data); 257 if (rc) 258 return rc; 259 260 p_ramrod = &p_ent->ramrod.fcoe_conn_ofld; 261 p_data = &p_ramrod->offload_ramrod_data; 262 263 /* Transmission PQ is the first of the PF */ 264 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_FCOE, NULL); 265 p_conn->physical_q0 = cpu_to_le16(pq_id); 266 p_data->physical_q0 = cpu_to_le16(pq_id); 267 268 p_data->conn_id = cpu_to_le16(p_conn->conn_id); 269 DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr); 270 DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr); 271 DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr); 272 DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr); 273 DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]); 274 DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]); 275 276 DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr); 277 DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]); 278 DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]); 279 280 p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo); 281 p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid); 282 p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi); 283 p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo); 284 p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid); 285 p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi); 286 287 tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len); 288 p_data->tx_max_fc_pay_len = tmp; 289 tmp = cpu_to_le16(p_conn->e_d_tov_timer_val); 290 p_data->e_d_tov_timer_val = tmp; 291 tmp = cpu_to_le16(p_conn->rec_tov_timer_val); 292 p_data->rec_rr_tov_timer_val = tmp; 293 tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len); 294 p_data->rx_max_fc_pay_len = tmp; 295 296 p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag); 297 p_data->s_id.addr_hi = p_conn->s_id.addr_hi; 298 p_data->s_id.addr_mid = p_conn->s_id.addr_mid; 299 p_data->s_id.addr_lo = p_conn->s_id.addr_lo; 300 p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3; 301 p_data->d_id.addr_hi = p_conn->d_id.addr_hi; 302 p_data->d_id.addr_mid = p_conn->d_id.addr_mid; 303 p_data->d_id.addr_lo = p_conn->d_id.addr_lo; 304 p_data->flags = p_conn->flags; 305 p_data->def_q_idx = p_conn->def_q_idx; 306 307 return qed_spq_post(p_hwfn, p_ent, NULL); 308 } 309 310 static int 311 qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn, 312 struct qed_fcoe_conn *p_conn, 313 enum spq_mode comp_mode, 314 struct qed_spq_comp_cb *p_comp_addr) 315 { 316 struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL; 317 struct qed_spq_entry *p_ent = NULL; 318 struct qed_sp_init_data init_data; 319 int rc = 0; 320 321 /* Get SPQ entry */ 322 memset(&init_data, 0, sizeof(init_data)); 323 init_data.cid = p_conn->icid; 324 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 325 init_data.comp_mode = comp_mode; 326 init_data.p_comp_data = p_comp_addr; 327 328 rc = qed_sp_init_request(p_hwfn, &p_ent, 329 FCOE_RAMROD_CMD_ID_TERMINATE_CONN, 330 PROTOCOLID_FCOE, &init_data); 331 if (rc) 332 return rc; 333 334 p_ramrod = &p_ent->ramrod.fcoe_conn_terminate; 335 DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr, 336 p_conn->terminate_params); 337 338 return qed_spq_post(p_hwfn, p_ent, NULL); 339 } 340 341 static int 342 qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn, 343 enum spq_mode comp_mode, 344 struct qed_spq_comp_cb *p_comp_addr) 345 { 346 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 347 struct qed_spq_entry *p_ent = NULL; 348 struct qed_sp_init_data init_data; 349 u32 active_segs = 0; 350 int rc = 0; 351 352 /* Get SPQ entry */ 353 memset(&init_data, 0, sizeof(init_data)); 354 init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid; 355 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 356 init_data.comp_mode = comp_mode; 357 init_data.p_comp_data = p_comp_addr; 358 359 rc = qed_sp_init_request(p_hwfn, &p_ent, 360 FCOE_RAMROD_CMD_ID_DESTROY_FUNC, 361 PROTOCOLID_FCOE, &init_data); 362 if (rc) 363 return rc; 364 365 active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK); 366 active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG); 367 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs); 368 369 return qed_spq_post(p_hwfn, p_ent, NULL); 370 } 371 372 static int 373 qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn, 374 struct qed_fcoe_conn **p_out_conn) 375 { 376 struct qed_fcoe_conn *p_conn = NULL; 377 void *p_addr; 378 u32 i; 379 380 spin_lock_bh(&p_hwfn->p_fcoe_info->lock); 381 if (!list_empty(&p_hwfn->p_fcoe_info->free_list)) 382 p_conn = 383 list_first_entry(&p_hwfn->p_fcoe_info->free_list, 384 struct qed_fcoe_conn, list_entry); 385 if (p_conn) { 386 list_del(&p_conn->list_entry); 387 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); 388 *p_out_conn = p_conn; 389 return 0; 390 } 391 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); 392 393 p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); 394 if (!p_conn) 395 return -ENOMEM; 396 397 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 398 QED_CHAIN_PAGE_SIZE, 399 &p_conn->xferq_pbl_addr, GFP_KERNEL); 400 if (!p_addr) 401 goto nomem_pbl_xferq; 402 p_conn->xferq_pbl_addr_virt_addr = p_addr; 403 404 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { 405 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 406 QED_CHAIN_PAGE_SIZE, 407 &p_conn->xferq_addr[i], GFP_KERNEL); 408 if (!p_addr) 409 goto nomem_xferq; 410 p_conn->xferq_addr_virt_addr[i] = p_addr; 411 412 p_addr = p_conn->xferq_pbl_addr_virt_addr; 413 ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i]; 414 } 415 416 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 417 QED_CHAIN_PAGE_SIZE, 418 &p_conn->confq_pbl_addr, GFP_KERNEL); 419 if (!p_addr) 420 goto nomem_xferq; 421 p_conn->confq_pbl_addr_virt_addr = p_addr; 422 423 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { 424 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 425 QED_CHAIN_PAGE_SIZE, 426 &p_conn->confq_addr[i], GFP_KERNEL); 427 if (!p_addr) 428 goto nomem_confq; 429 p_conn->confq_addr_virt_addr[i] = p_addr; 430 431 p_addr = p_conn->confq_pbl_addr_virt_addr; 432 ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i]; 433 } 434 435 p_conn->free_on_delete = true; 436 *p_out_conn = p_conn; 437 return 0; 438 439 nomem_confq: 440 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 441 QED_CHAIN_PAGE_SIZE, 442 p_conn->confq_pbl_addr_virt_addr, 443 p_conn->confq_pbl_addr); 444 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) 445 if (p_conn->confq_addr_virt_addr[i]) 446 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 447 QED_CHAIN_PAGE_SIZE, 448 p_conn->confq_addr_virt_addr[i], 449 p_conn->confq_addr[i]); 450 nomem_xferq: 451 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 452 QED_CHAIN_PAGE_SIZE, 453 p_conn->xferq_pbl_addr_virt_addr, 454 p_conn->xferq_pbl_addr); 455 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) 456 if (p_conn->xferq_addr_virt_addr[i]) 457 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 458 QED_CHAIN_PAGE_SIZE, 459 p_conn->xferq_addr_virt_addr[i], 460 p_conn->xferq_addr[i]); 461 nomem_pbl_xferq: 462 kfree(p_conn); 463 return -ENOMEM; 464 } 465 466 static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn, 467 struct qed_fcoe_conn *p_conn) 468 { 469 u32 i; 470 471 if (!p_conn) 472 return; 473 474 if (p_conn->confq_pbl_addr_virt_addr) 475 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 476 QED_CHAIN_PAGE_SIZE, 477 p_conn->confq_pbl_addr_virt_addr, 478 p_conn->confq_pbl_addr); 479 480 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { 481 if (!p_conn->confq_addr_virt_addr[i]) 482 continue; 483 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 484 QED_CHAIN_PAGE_SIZE, 485 p_conn->confq_addr_virt_addr[i], 486 p_conn->confq_addr[i]); 487 } 488 489 if (p_conn->xferq_pbl_addr_virt_addr) 490 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 491 QED_CHAIN_PAGE_SIZE, 492 p_conn->xferq_pbl_addr_virt_addr, 493 p_conn->xferq_pbl_addr); 494 495 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { 496 if (!p_conn->xferq_addr_virt_addr[i]) 497 continue; 498 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 499 QED_CHAIN_PAGE_SIZE, 500 p_conn->xferq_addr_virt_addr[i], 501 p_conn->xferq_addr[i]); 502 } 503 kfree(p_conn); 504 } 505 506 static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) 507 { 508 return (u8 __iomem *)p_hwfn->doorbells + 509 qed_db_addr(cid, DQ_DEMS_LEGACY); 510 } 511 512 static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, 513 u8 bdq_id) 514 { 515 u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); 516 517 return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + 518 MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); 519 } 520 521 static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, 522 u8 bdq_id) 523 { 524 u8 bdq_function_id = FCOE_BDQ_ID(p_hwfn->port_id); 525 526 return (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_TSDM_RAM + 527 TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(bdq_function_id, bdq_id); 528 } 529 530 struct qed_fcoe_info *qed_fcoe_alloc(struct qed_hwfn *p_hwfn) 531 { 532 struct qed_fcoe_info *p_fcoe_info; 533 534 /* Allocate LL2's set struct */ 535 p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL); 536 if (!p_fcoe_info) { 537 DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n"); 538 return NULL; 539 } 540 INIT_LIST_HEAD(&p_fcoe_info->free_list); 541 return p_fcoe_info; 542 } 543 544 void qed_fcoe_setup(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) 545 { 546 struct fcoe_task_context *p_task_ctx = NULL; 547 int rc; 548 u32 i; 549 550 spin_lock_init(&p_fcoe_info->lock); 551 for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { 552 rc = qed_cxt_get_task_ctx(p_hwfn, i, 553 QED_CTX_WORKING_MEM, 554 (void **)&p_task_ctx); 555 if (rc) 556 continue; 557 558 memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); 559 SET_FIELD(p_task_ctx->timer_context.logical_client_0, 560 TIMERS_CONTEXT_VALIDLC0, 1); 561 SET_FIELD(p_task_ctx->timer_context.logical_client_1, 562 TIMERS_CONTEXT_VALIDLC1, 1); 563 SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, 564 TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); 565 } 566 } 567 568 void qed_fcoe_free(struct qed_hwfn *p_hwfn, struct qed_fcoe_info *p_fcoe_info) 569 { 570 struct qed_fcoe_conn *p_conn = NULL; 571 572 if (!p_fcoe_info) 573 return; 574 575 while (!list_empty(&p_fcoe_info->free_list)) { 576 p_conn = list_first_entry(&p_fcoe_info->free_list, 577 struct qed_fcoe_conn, list_entry); 578 if (!p_conn) 579 break; 580 list_del(&p_conn->list_entry); 581 qed_fcoe_free_connection(p_hwfn, p_conn); 582 } 583 584 kfree(p_fcoe_info); 585 } 586 587 static int 588 qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn, 589 struct qed_fcoe_conn *p_in_conn, 590 struct qed_fcoe_conn **p_out_conn) 591 { 592 struct qed_fcoe_conn *p_conn = NULL; 593 int rc = 0; 594 u32 icid; 595 596 spin_lock_bh(&p_hwfn->p_fcoe_info->lock); 597 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid); 598 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); 599 if (rc) 600 return rc; 601 602 /* Use input connection [if provided] or allocate a new one */ 603 if (p_in_conn) { 604 p_conn = p_in_conn; 605 } else { 606 rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn); 607 if (rc) { 608 spin_lock_bh(&p_hwfn->p_fcoe_info->lock); 609 qed_cxt_release_cid(p_hwfn, icid); 610 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); 611 return rc; 612 } 613 } 614 615 p_conn->icid = icid; 616 p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; 617 *p_out_conn = p_conn; 618 619 return rc; 620 } 621 622 static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn, 623 struct qed_fcoe_conn *p_conn) 624 { 625 spin_lock_bh(&p_hwfn->p_fcoe_info->lock); 626 list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list); 627 qed_cxt_release_cid(p_hwfn, p_conn->icid); 628 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); 629 } 630 631 static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn, 632 struct qed_ptt *p_ptt, 633 struct qed_fcoe_stats *p_stats) 634 { 635 struct fcoe_rx_stat tstats; 636 u32 tstats_addr; 637 638 memset(&tstats, 0, sizeof(tstats)); 639 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 640 TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id); 641 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); 642 643 p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt); 644 p_stats->fcoe_rx_data_pkt_cnt = 645 HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt); 646 p_stats->fcoe_rx_xfer_pkt_cnt = 647 HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt); 648 p_stats->fcoe_rx_other_pkt_cnt = 649 HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt); 650 651 p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt = 652 le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt); 653 p_stats->fcoe_silent_drop_pkt_rq_full_cnt = 654 le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt); 655 p_stats->fcoe_silent_drop_pkt_crc_error_cnt = 656 le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt); 657 p_stats->fcoe_silent_drop_pkt_task_invalid_cnt = 658 le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt); 659 p_stats->fcoe_silent_drop_total_pkt_cnt = 660 le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt); 661 } 662 663 static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, 664 struct qed_ptt *p_ptt, 665 struct qed_fcoe_stats *p_stats) 666 { 667 struct fcoe_tx_stat pstats; 668 u32 pstats_addr; 669 670 memset(&pstats, 0, sizeof(pstats)); 671 pstats_addr = BAR0_MAP_REG_PSDM_RAM + 672 PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id); 673 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); 674 675 p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt); 676 p_stats->fcoe_tx_data_pkt_cnt = 677 HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt); 678 p_stats->fcoe_tx_xfer_pkt_cnt = 679 HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt); 680 p_stats->fcoe_tx_other_pkt_cnt = 681 HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt); 682 } 683 684 static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, 685 struct qed_fcoe_stats *p_stats) 686 { 687 struct qed_ptt *p_ptt; 688 689 memset(p_stats, 0, sizeof(*p_stats)); 690 691 p_ptt = qed_ptt_acquire(p_hwfn); 692 693 if (!p_ptt) { 694 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 695 return -EINVAL; 696 } 697 698 _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats); 699 _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats); 700 701 qed_ptt_release(p_hwfn, p_ptt); 702 703 return 0; 704 } 705 706 struct qed_hash_fcoe_con { 707 struct hlist_node node; 708 struct qed_fcoe_conn *con; 709 }; 710 711 static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, 712 struct qed_dev_fcoe_info *info) 713 { 714 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 715 int rc; 716 717 memset(info, 0, sizeof(*info)); 718 rc = qed_fill_dev_info(cdev, &info->common); 719 720 info->primary_dbq_rq_addr = 721 qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); 722 info->secondary_bdq_rq_addr = 723 qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); 724 725 return rc; 726 } 727 728 static void qed_register_fcoe_ops(struct qed_dev *cdev, 729 struct qed_fcoe_cb_ops *ops, void *cookie) 730 { 731 cdev->protocol_ops.fcoe = ops; 732 cdev->ops_cookie = cookie; 733 } 734 735 static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev, 736 u32 handle) 737 { 738 struct qed_hash_fcoe_con *hash_con = NULL; 739 740 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) 741 return NULL; 742 743 hash_for_each_possible(cdev->connections, hash_con, node, handle) { 744 if (hash_con->con->icid == handle) 745 break; 746 } 747 748 if (!hash_con || (hash_con->con->icid != handle)) 749 return NULL; 750 751 return hash_con; 752 } 753 754 static int qed_fcoe_stop(struct qed_dev *cdev) 755 { 756 int rc; 757 758 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { 759 DP_NOTICE(cdev, "fcoe already stopped\n"); 760 return 0; 761 } 762 763 if (!hash_empty(cdev->connections)) { 764 DP_NOTICE(cdev, 765 "Can't stop fcoe - not all connections were returned\n"); 766 return -EINVAL; 767 } 768 769 /* Stop the fcoe */ 770 rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), 771 QED_SPQ_MODE_EBLOCK, NULL); 772 cdev->flags &= ~QED_FLAG_STORAGE_STARTED; 773 774 return rc; 775 } 776 777 static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks) 778 { 779 int rc; 780 781 if (cdev->flags & QED_FLAG_STORAGE_STARTED) { 782 DP_NOTICE(cdev, "fcoe already started;\n"); 783 return 0; 784 } 785 786 rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev), 787 QED_SPQ_MODE_EBLOCK, NULL); 788 if (rc) { 789 DP_NOTICE(cdev, "Failed to start fcoe\n"); 790 return rc; 791 } 792 793 cdev->flags |= QED_FLAG_STORAGE_STARTED; 794 hash_init(cdev->connections); 795 796 if (tasks) { 797 struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info), 798 GFP_ATOMIC); 799 800 if (!tid_info) { 801 DP_NOTICE(cdev, 802 "Failed to allocate tasks information\n"); 803 qed_fcoe_stop(cdev); 804 return -ENOMEM; 805 } 806 807 rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info); 808 if (rc) { 809 DP_NOTICE(cdev, "Failed to gather task information\n"); 810 qed_fcoe_stop(cdev); 811 kfree(tid_info); 812 return rc; 813 } 814 815 /* Fill task information */ 816 tasks->size = tid_info->tid_size; 817 tasks->num_tids_per_block = tid_info->num_tids_per_block; 818 memcpy(tasks->blocks, tid_info->blocks, 819 MAX_TID_BLOCKS_FCOE * sizeof(u8 *)); 820 821 kfree(tid_info); 822 } 823 824 return 0; 825 } 826 827 static int qed_fcoe_acquire_conn(struct qed_dev *cdev, 828 u32 *handle, 829 u32 *fw_cid, void __iomem **p_doorbell) 830 { 831 struct qed_hash_fcoe_con *hash_con; 832 int rc; 833 834 /* Allocate a hashed connection */ 835 hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL); 836 if (!hash_con) { 837 DP_NOTICE(cdev, "Failed to allocate hashed connection\n"); 838 return -ENOMEM; 839 } 840 841 /* Acquire the connection */ 842 rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL, 843 &hash_con->con); 844 if (rc) { 845 DP_NOTICE(cdev, "Failed to acquire Connection\n"); 846 kfree(hash_con); 847 return rc; 848 } 849 850 /* Added the connection to hash table */ 851 *handle = hash_con->con->icid; 852 *fw_cid = hash_con->con->fw_cid; 853 hash_add(cdev->connections, &hash_con->node, *handle); 854 855 if (p_doorbell) 856 *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev), 857 *handle); 858 859 return 0; 860 } 861 862 static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle) 863 { 864 struct qed_hash_fcoe_con *hash_con; 865 866 hash_con = qed_fcoe_get_hash(cdev, handle); 867 if (!hash_con) { 868 DP_NOTICE(cdev, "Failed to find connection for handle %d\n", 869 handle); 870 return -EINVAL; 871 } 872 873 hlist_del(&hash_con->node); 874 qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con); 875 kfree(hash_con); 876 877 return 0; 878 } 879 880 static int qed_fcoe_offload_conn(struct qed_dev *cdev, 881 u32 handle, 882 struct qed_fcoe_params_offload *conn_info) 883 { 884 struct qed_hash_fcoe_con *hash_con; 885 struct qed_fcoe_conn *con; 886 887 hash_con = qed_fcoe_get_hash(cdev, handle); 888 if (!hash_con) { 889 DP_NOTICE(cdev, "Failed to find connection for handle %d\n", 890 handle); 891 return -EINVAL; 892 } 893 894 /* Update the connection with information from the params */ 895 con = hash_con->con; 896 897 con->sq_pbl_addr = conn_info->sq_pbl_addr; 898 con->sq_curr_page_addr = conn_info->sq_curr_page_addr; 899 con->sq_next_page_addr = conn_info->sq_next_page_addr; 900 con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len; 901 con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val; 902 con->rec_tov_timer_val = conn_info->rec_tov_timer_val; 903 con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len; 904 con->vlan_tag = conn_info->vlan_tag; 905 con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3; 906 con->flags = conn_info->flags; 907 con->def_q_idx = conn_info->def_q_idx; 908 909 con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) | 910 conn_info->src_mac[4]; 911 con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) | 912 conn_info->src_mac[2]; 913 con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) | 914 conn_info->src_mac[0]; 915 con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) | 916 conn_info->dst_mac[4]; 917 con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) | 918 conn_info->dst_mac[2]; 919 con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) | 920 conn_info->dst_mac[0]; 921 922 con->s_id.addr_hi = conn_info->s_id.addr_hi; 923 con->s_id.addr_mid = conn_info->s_id.addr_mid; 924 con->s_id.addr_lo = conn_info->s_id.addr_lo; 925 con->d_id.addr_hi = conn_info->d_id.addr_hi; 926 con->d_id.addr_mid = conn_info->d_id.addr_mid; 927 con->d_id.addr_lo = conn_info->d_id.addr_lo; 928 929 return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con, 930 QED_SPQ_MODE_EBLOCK, NULL); 931 } 932 933 static int qed_fcoe_destroy_conn(struct qed_dev *cdev, 934 u32 handle, dma_addr_t terminate_params) 935 { 936 struct qed_hash_fcoe_con *hash_con; 937 struct qed_fcoe_conn *con; 938 939 hash_con = qed_fcoe_get_hash(cdev, handle); 940 if (!hash_con) { 941 DP_NOTICE(cdev, "Failed to find connection for handle %d\n", 942 handle); 943 return -EINVAL; 944 } 945 946 /* Update the connection with information from the params */ 947 con = hash_con->con; 948 con->terminate_params = terminate_params; 949 950 return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con, 951 QED_SPQ_MODE_EBLOCK, NULL); 952 } 953 954 static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) 955 { 956 return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats); 957 } 958 959 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, 960 struct qed_mcp_fcoe_stats *stats) 961 { 962 struct qed_fcoe_stats proto_stats; 963 964 /* Retrieve FW statistics */ 965 memset(&proto_stats, 0, sizeof(proto_stats)); 966 if (qed_fcoe_stats(cdev, &proto_stats)) { 967 DP_VERBOSE(cdev, QED_MSG_STORAGE, 968 "Failed to collect FCoE statistics\n"); 969 return; 970 } 971 972 /* Translate FW statistics into struct */ 973 stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt + 974 proto_stats.fcoe_rx_xfer_pkt_cnt + 975 proto_stats.fcoe_rx_other_pkt_cnt; 976 stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt + 977 proto_stats.fcoe_tx_xfer_pkt_cnt + 978 proto_stats.fcoe_tx_other_pkt_cnt; 979 stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt; 980 981 /* Request protocol driver to fill-in the rest */ 982 if (cdev->protocol_ops.fcoe && cdev->ops_cookie) { 983 struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe; 984 void *cookie = cdev->ops_cookie; 985 986 if (ops->get_login_failures) 987 stats->login_failure = ops->get_login_failures(cookie); 988 } 989 } 990 991 static const struct qed_fcoe_ops qed_fcoe_ops_pass = { 992 .common = &qed_common_ops_pass, 993 .ll2 = &qed_ll2_ops_pass, 994 .fill_dev_info = &qed_fill_fcoe_dev_info, 995 .start = &qed_fcoe_start, 996 .stop = &qed_fcoe_stop, 997 .register_ops = &qed_register_fcoe_ops, 998 .acquire_conn = &qed_fcoe_acquire_conn, 999 .release_conn = &qed_fcoe_release_conn, 1000 .offload_conn = &qed_fcoe_offload_conn, 1001 .destroy_conn = &qed_fcoe_destroy_conn, 1002 .get_stats = &qed_fcoe_stats, 1003 }; 1004 1005 const struct qed_fcoe_ops *qed_get_fcoe_ops(void) 1006 { 1007 return &qed_fcoe_ops_pass; 1008 } 1009 EXPORT_SYMBOL(qed_get_fcoe_ops); 1010 1011 void qed_put_fcoe_ops(void) 1012 { 1013 } 1014 EXPORT_SYMBOL(qed_put_fcoe_ops); 1015