1 /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. 2 * This file contains the code that low level functions that interact 3 * with 57712 FCoE firmware. 4 * 5 * Copyright (c) 2008 - 2011 Broadcom Corporation 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 12 */ 13 14 #include "bnx2fc.h" 15 16 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); 17 18 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, 19 struct fcoe_kcqe *new_cqe_kcqe); 20 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, 21 struct fcoe_kcqe *ofld_kcqe); 22 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, 23 struct fcoe_kcqe *ofld_kcqe); 24 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); 25 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, 26 struct fcoe_kcqe *destroy_kcqe); 27 28 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) 29 { 30 struct fcoe_kwqe_stat stat_req; 31 struct kwqe *kwqe_arr[2]; 32 int num_kwqes = 1; 33 int rc = 0; 34 35 memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); 36 stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; 37 stat_req.hdr.flags = 38 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 39 40 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; 41 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); 42 43 kwqe_arr[0] = (struct kwqe *) &stat_req; 44 45 if (hba->cnic && hba->cnic->submit_kwqes) 46 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 47 48 return rc; 49 } 50 51 /** 52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w 53 * 54 * @hba: adapter structure pointer 55 * 56 * Send down FCoE firmware init KWQEs which initiates the initial handshake 57 * with the f/w. 58 * 59 */ 60 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) 61 { 62 struct fcoe_kwqe_init1 fcoe_init1; 63 struct fcoe_kwqe_init2 fcoe_init2; 64 struct fcoe_kwqe_init3 fcoe_init3; 65 struct kwqe *kwqe_arr[3]; 66 int num_kwqes = 3; 67 int rc = 0; 68 69 if (!hba->cnic) { 70 printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n"); 71 return -ENODEV; 72 } 73 74 /* fill init1 KWQE */ 75 memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); 76 fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; 77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << 78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 79 80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; 81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; 82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; 83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; 84 fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; 85 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; 86 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); 87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; 88 fcoe_init1.task_list_pbl_addr_hi = 89 (u32) ((u64) hba->task_ctx_bd_dma >> 32); 90 fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; 91 92 fcoe_init1.flags = (PAGE_SHIFT << 93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); 94 95 fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; 96 97 /* fill init2 KWQE */ 98 memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); 99 fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; 100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << 101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 102 103 fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; 104 fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; 105 106 107 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; 108 fcoe_init2.hash_tbl_pbl_addr_hi = (u32) 109 ((u64) hba->hash_tbl_pbl_dma >> 32); 110 111 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; 112 fcoe_init2.t2_hash_tbl_addr_hi = (u32) 113 ((u64) hba->t2_hash_tbl_dma >> 32); 114 115 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; 116 fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) 117 ((u64) hba->t2_hash_tbl_ptr_dma >> 32); 118 119 fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; 120 121 /* fill init3 KWQE */ 122 memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); 123 fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; 124 fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << 125 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 126 fcoe_init3.error_bit_map_lo = 0xffffffff; 127 fcoe_init3.error_bit_map_hi = 0xffffffff; 128 129 fcoe_init3.perf_config = 1; 130 131 kwqe_arr[0] = (struct kwqe *) &fcoe_init1; 132 kwqe_arr[1] = (struct kwqe *) &fcoe_init2; 133 kwqe_arr[2] = (struct kwqe *) &fcoe_init3; 134 135 if (hba->cnic && hba->cnic->submit_kwqes) 136 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 137 138 return rc; 139 } 140 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) 141 { 142 struct fcoe_kwqe_destroy fcoe_destroy; 143 struct kwqe *kwqe_arr[2]; 144 int num_kwqes = 1; 145 int rc = -1; 146 147 /* fill destroy KWQE */ 148 memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); 149 fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; 150 fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << 151 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 152 kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; 153 154 if (hba->cnic && hba->cnic->submit_kwqes) 155 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 156 return rc; 157 } 158 159 /** 160 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process 161 * 162 * @port: port structure pointer 163 * @tgt: bnx2fc_rport structure pointer 164 */ 165 int bnx2fc_send_session_ofld_req(struct fcoe_port *port, 166 struct bnx2fc_rport *tgt) 167 { 168 struct fc_lport *lport = port->lport; 169 struct bnx2fc_interface *interface = port->priv; 170 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); 171 struct bnx2fc_hba *hba = interface->hba; 172 struct kwqe *kwqe_arr[4]; 173 struct fcoe_kwqe_conn_offload1 ofld_req1; 174 struct fcoe_kwqe_conn_offload2 ofld_req2; 175 struct fcoe_kwqe_conn_offload3 ofld_req3; 176 struct fcoe_kwqe_conn_offload4 ofld_req4; 177 struct fc_rport_priv *rdata = tgt->rdata; 178 struct fc_rport *rport = tgt->rport; 179 int num_kwqes = 4; 180 u32 port_id; 181 int rc = 0; 182 u16 conn_id; 183 184 /* Initialize offload request 1 structure */ 185 memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); 186 187 ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; 188 ofld_req1.hdr.flags = 189 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 190 191 192 conn_id = (u16)tgt->fcoe_conn_id; 193 ofld_req1.fcoe_conn_id = conn_id; 194 195 196 ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; 197 ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); 198 199 ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; 200 ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); 201 202 ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; 203 ofld_req1.rq_first_pbe_addr_hi = 204 (u32)((u64) tgt->rq_dma >> 32); 205 206 ofld_req1.rq_prod = 0x8000; 207 208 /* Initialize offload request 2 structure */ 209 memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); 210 211 ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; 212 ofld_req2.hdr.flags = 213 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 214 215 ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; 216 217 ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; 218 ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); 219 220 ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; 221 ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); 222 223 ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; 224 ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); 225 226 /* Initialize offload request 3 structure */ 227 memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); 228 229 ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; 230 ofld_req3.hdr.flags = 231 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 232 233 ofld_req3.vlan_tag = interface->vlan_id << 234 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; 235 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; 236 237 port_id = fc_host_port_id(lport->host); 238 if (port_id == 0) { 239 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); 240 return -EINVAL; 241 } 242 243 /* 244 * Store s_id of the initiator for further reference. This will 245 * be used during disable/destroy during linkdown processing as 246 * when the lport is reset, the port_id also is reset to 0 247 */ 248 tgt->sid = port_id; 249 ofld_req3.s_id[0] = (port_id & 0x000000FF); 250 ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; 251 ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; 252 253 port_id = rport->port_id; 254 ofld_req3.d_id[0] = (port_id & 0x000000FF); 255 ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; 256 ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; 257 258 ofld_req3.tx_total_conc_seqs = rdata->max_seq; 259 260 ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; 261 ofld_req3.rx_max_fc_pay_len = lport->mfs; 262 263 ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; 264 ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; 265 ofld_req3.rx_open_seqs_exch_c3 = 1; 266 267 ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; 268 ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); 269 270 /* set mul_n_port_ids supported flag to 0, until it is supported */ 271 ofld_req3.flags = 0; 272 /* 273 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << 274 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); 275 */ 276 /* Info from PLOGI response */ 277 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << 278 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); 279 280 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << 281 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); 282 283 /* 284 * Info from PRLI response, this info is used for sequence level error 285 * recovery support 286 */ 287 if (tgt->dev_type == TYPE_TAPE) { 288 ofld_req3.flags |= 1 << 289 FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; 290 ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) 291 ? 1 : 0) << 292 FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); 293 } 294 295 /* vlan flag */ 296 ofld_req3.flags |= (interface->vlan_enabled << 297 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); 298 299 /* C2_VALID and ACK flags are not set as they are not supported */ 300 301 302 /* Initialize offload request 4 structure */ 303 memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); 304 ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; 305 ofld_req4.hdr.flags = 306 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 307 308 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; 309 310 311 ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; 312 /* local mac */ 313 ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; 314 ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; 315 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; 316 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; 317 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; 318 ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; 319 /* fcf mac */ 320 ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; 321 ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; 322 ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; 323 ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; 324 ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; 325 326 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 327 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 328 329 ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; 330 ofld_req4.confq_pbl_base_addr_hi = 331 (u32)((u64) tgt->confq_pbl_dma >> 32); 332 333 kwqe_arr[0] = (struct kwqe *) &ofld_req1; 334 kwqe_arr[1] = (struct kwqe *) &ofld_req2; 335 kwqe_arr[2] = (struct kwqe *) &ofld_req3; 336 kwqe_arr[3] = (struct kwqe *) &ofld_req4; 337 338 if (hba->cnic && hba->cnic->submit_kwqes) 339 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 340 341 return rc; 342 } 343 344 /** 345 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement 346 * 347 * @port: port structure pointer 348 * @tgt: bnx2fc_rport structure pointer 349 */ 350 static int bnx2fc_send_session_enable_req(struct fcoe_port *port, 351 struct bnx2fc_rport *tgt) 352 { 353 struct kwqe *kwqe_arr[2]; 354 struct bnx2fc_interface *interface = port->priv; 355 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); 356 struct bnx2fc_hba *hba = interface->hba; 357 struct fcoe_kwqe_conn_enable_disable enbl_req; 358 struct fc_lport *lport = port->lport; 359 struct fc_rport *rport = tgt->rport; 360 int num_kwqes = 1; 361 int rc = 0; 362 u32 port_id; 363 364 memset(&enbl_req, 0x00, 365 sizeof(struct fcoe_kwqe_conn_enable_disable)); 366 enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; 367 enbl_req.hdr.flags = 368 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 369 370 enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; 371 /* local mac */ 372 enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; 373 enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; 374 enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; 375 enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; 376 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; 377 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); 378 379 enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; 380 enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; 381 enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; 382 enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; 383 enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; 384 enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; 385 386 port_id = fc_host_port_id(lport->host); 387 if (port_id != tgt->sid) { 388 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," 389 "sid = 0x%x\n", port_id, tgt->sid); 390 port_id = tgt->sid; 391 } 392 enbl_req.s_id[0] = (port_id & 0x000000FF); 393 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; 394 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; 395 396 port_id = rport->port_id; 397 enbl_req.d_id[0] = (port_id & 0x000000FF); 398 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; 399 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 400 enbl_req.vlan_tag = interface->vlan_id << 401 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 402 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 403 enbl_req.vlan_flag = interface->vlan_enabled; 404 enbl_req.context_id = tgt->context_id; 405 enbl_req.conn_id = tgt->fcoe_conn_id; 406 407 kwqe_arr[0] = (struct kwqe *) &enbl_req; 408 409 if (hba->cnic && hba->cnic->submit_kwqes) 410 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 411 return rc; 412 } 413 414 /** 415 * bnx2fc_send_session_disable_req - initiates FCoE Session disable 416 * 417 * @port: port structure pointer 418 * @tgt: bnx2fc_rport structure pointer 419 */ 420 int bnx2fc_send_session_disable_req(struct fcoe_port *port, 421 struct bnx2fc_rport *tgt) 422 { 423 struct bnx2fc_interface *interface = port->priv; 424 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); 425 struct bnx2fc_hba *hba = interface->hba; 426 struct fcoe_kwqe_conn_enable_disable disable_req; 427 struct kwqe *kwqe_arr[2]; 428 struct fc_rport *rport = tgt->rport; 429 int num_kwqes = 1; 430 int rc = 0; 431 u32 port_id; 432 433 memset(&disable_req, 0x00, 434 sizeof(struct fcoe_kwqe_conn_enable_disable)); 435 disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; 436 disable_req.hdr.flags = 437 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 438 439 disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; 440 disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; 441 disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; 442 disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; 443 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; 444 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; 445 446 disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; 447 disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; 448 disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; 449 disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; 450 disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; 451 disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; 452 453 port_id = tgt->sid; 454 disable_req.s_id[0] = (port_id & 0x000000FF); 455 disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; 456 disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; 457 458 459 port_id = rport->port_id; 460 disable_req.d_id[0] = (port_id & 0x000000FF); 461 disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; 462 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; 463 disable_req.context_id = tgt->context_id; 464 disable_req.conn_id = tgt->fcoe_conn_id; 465 disable_req.vlan_tag = interface->vlan_id << 466 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; 467 disable_req.vlan_tag |= 468 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; 469 disable_req.vlan_flag = interface->vlan_enabled; 470 471 kwqe_arr[0] = (struct kwqe *) &disable_req; 472 473 if (hba->cnic && hba->cnic->submit_kwqes) 474 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 475 476 return rc; 477 } 478 479 /** 480 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy 481 * 482 * @port: port structure pointer 483 * @tgt: bnx2fc_rport structure pointer 484 */ 485 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, 486 struct bnx2fc_rport *tgt) 487 { 488 struct fcoe_kwqe_conn_destroy destroy_req; 489 struct kwqe *kwqe_arr[2]; 490 int num_kwqes = 1; 491 int rc = 0; 492 493 memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); 494 destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; 495 destroy_req.hdr.flags = 496 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); 497 498 destroy_req.context_id = tgt->context_id; 499 destroy_req.conn_id = tgt->fcoe_conn_id; 500 501 kwqe_arr[0] = (struct kwqe *) &destroy_req; 502 503 if (hba->cnic && hba->cnic->submit_kwqes) 504 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); 505 506 return rc; 507 } 508 509 static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) 510 { 511 struct bnx2fc_lport *blport; 512 513 spin_lock_bh(&hba->hba_lock); 514 list_for_each_entry(blport, &hba->vports, list) { 515 if (blport->lport == lport) { 516 spin_unlock_bh(&hba->hba_lock); 517 return true; 518 } 519 } 520 spin_unlock_bh(&hba->hba_lock); 521 return false; 522 523 } 524 525 526 static void bnx2fc_unsol_els_work(struct work_struct *work) 527 { 528 struct bnx2fc_unsol_els *unsol_els; 529 struct fc_lport *lport; 530 struct bnx2fc_hba *hba; 531 struct fc_frame *fp; 532 533 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); 534 lport = unsol_els->lport; 535 fp = unsol_els->fp; 536 hba = unsol_els->hba; 537 if (is_valid_lport(hba, lport)) 538 fc_exch_recv(lport, fp); 539 kfree(unsol_els); 540 } 541 542 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, 543 unsigned char *buf, 544 u32 frame_len, u16 l2_oxid) 545 { 546 struct fcoe_port *port = tgt->port; 547 struct fc_lport *lport = port->lport; 548 struct bnx2fc_interface *interface = port->priv; 549 struct bnx2fc_unsol_els *unsol_els; 550 struct fc_frame_header *fh; 551 struct fc_frame *fp; 552 struct sk_buff *skb; 553 u32 payload_len; 554 u32 crc; 555 u8 op; 556 557 558 unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); 559 if (!unsol_els) { 560 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); 561 return; 562 } 563 564 BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", 565 l2_oxid, frame_len); 566 567 payload_len = frame_len - sizeof(struct fc_frame_header); 568 569 fp = fc_frame_alloc(lport, payload_len); 570 if (!fp) { 571 printk(KERN_ERR PFX "fc_frame_alloc failure\n"); 572 kfree(unsol_els); 573 return; 574 } 575 576 fh = (struct fc_frame_header *) fc_frame_header_get(fp); 577 /* Copy FC Frame header and payload into the frame */ 578 memcpy(fh, buf, frame_len); 579 580 if (l2_oxid != FC_XID_UNKNOWN) 581 fh->fh_ox_id = htons(l2_oxid); 582 583 skb = fp_skb(fp); 584 585 if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || 586 (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { 587 588 if (fh->fh_type == FC_TYPE_ELS) { 589 op = fc_frame_payload_op(fp); 590 if ((op == ELS_TEST) || (op == ELS_ESTC) || 591 (op == ELS_FAN) || (op == ELS_CSU)) { 592 /* 593 * No need to reply for these 594 * ELS requests 595 */ 596 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); 597 kfree_skb(skb); 598 kfree(unsol_els); 599 return; 600 } 601 } 602 crc = fcoe_fc_crc(fp); 603 fc_frame_init(fp); 604 fr_dev(fp) = lport; 605 fr_sof(fp) = FC_SOF_I3; 606 fr_eof(fp) = FC_EOF_T; 607 fr_crc(fp) = cpu_to_le32(~crc); 608 unsol_els->lport = lport; 609 unsol_els->hba = interface->hba; 610 unsol_els->fp = fp; 611 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); 612 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); 613 } else { 614 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); 615 kfree_skb(skb); 616 kfree(unsol_els); 617 } 618 } 619 620 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) 621 { 622 u8 num_rq; 623 struct fcoe_err_report_entry *err_entry; 624 unsigned char *rq_data; 625 unsigned char *buf = NULL, *buf1; 626 int i; 627 u16 xid; 628 u32 frame_len, len; 629 struct bnx2fc_cmd *io_req = NULL; 630 struct fcoe_task_ctx_entry *task, *task_page; 631 struct bnx2fc_interface *interface = tgt->port->priv; 632 struct bnx2fc_hba *hba = interface->hba; 633 int task_idx, index; 634 int rc = 0; 635 u64 err_warn_bit_map; 636 u8 err_warn = 0xff; 637 638 639 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); 640 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { 641 case FCOE_UNSOLICITED_FRAME_CQE_TYPE: 642 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> 643 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; 644 645 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; 646 647 spin_lock_bh(&tgt->tgt_lock); 648 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); 649 spin_unlock_bh(&tgt->tgt_lock); 650 651 if (rq_data) { 652 buf = rq_data; 653 } else { 654 buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), 655 GFP_ATOMIC); 656 657 if (!buf1) { 658 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); 659 break; 660 } 661 662 for (i = 0; i < num_rq; i++) { 663 spin_lock_bh(&tgt->tgt_lock); 664 rq_data = (unsigned char *) 665 bnx2fc_get_next_rqe(tgt, 1); 666 spin_unlock_bh(&tgt->tgt_lock); 667 len = BNX2FC_RQ_BUF_SZ; 668 memcpy(buf1, rq_data, len); 669 buf1 += len; 670 } 671 } 672 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, 673 FC_XID_UNKNOWN); 674 675 if (buf != rq_data) 676 kfree(buf); 677 spin_lock_bh(&tgt->tgt_lock); 678 bnx2fc_return_rqe(tgt, num_rq); 679 spin_unlock_bh(&tgt->tgt_lock); 680 break; 681 682 case FCOE_ERROR_DETECTION_CQE_TYPE: 683 /* 684 * In case of error reporting CQE a single RQ entry 685 * is consumed. 686 */ 687 spin_lock_bh(&tgt->tgt_lock); 688 num_rq = 1; 689 err_entry = (struct fcoe_err_report_entry *) 690 bnx2fc_get_next_rqe(tgt, 1); 691 xid = err_entry->fc_hdr.ox_id; 692 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); 693 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", 694 err_entry->data.err_warn_bitmap_hi, 695 err_entry->data.err_warn_bitmap_lo); 696 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", 697 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 698 699 700 if (xid > BNX2FC_MAX_XID) { 701 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", 702 xid); 703 goto ret_err_rqe; 704 } 705 706 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 707 index = xid % BNX2FC_TASKS_PER_PAGE; 708 task_page = (struct fcoe_task_ctx_entry *) 709 hba->task_ctx[task_idx]; 710 task = &(task_page[index]); 711 712 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 713 if (!io_req) 714 goto ret_err_rqe; 715 716 if (io_req->cmd_type != BNX2FC_SCSI_CMD) { 717 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); 718 goto ret_err_rqe; 719 } 720 721 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, 722 &io_req->req_flags)) { 723 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " 724 "progress.. ignore unsol err\n"); 725 goto ret_err_rqe; 726 } 727 728 err_warn_bit_map = (u64) 729 ((u64)err_entry->data.err_warn_bitmap_hi << 32) | 730 (u64)err_entry->data.err_warn_bitmap_lo; 731 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { 732 if (err_warn_bit_map & (u64)((u64)1 << i)) { 733 err_warn = i; 734 break; 735 } 736 } 737 738 /* 739 * If ABTS is already in progress, and FW error is 740 * received after that, do not cancel the timeout_work 741 * and let the error recovery continue by explicitly 742 * logging out the target, when the ABTS eventually 743 * times out. 744 */ 745 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 746 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " 747 "in ABTS processing\n", xid); 748 goto ret_err_rqe; 749 } 750 BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn); 751 if (tgt->dev_type != TYPE_TAPE) 752 goto skip_rec; 753 switch (err_warn) { 754 case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: 755 case FCOE_ERROR_CODE_DATA_OOO_RO: 756 case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: 757 case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: 758 case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: 759 case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: 760 BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", 761 xid); 762 memset(&io_req->err_entry, 0, 763 sizeof(struct fcoe_err_report_entry)); 764 memcpy(&io_req->err_entry, err_entry, 765 sizeof(struct fcoe_err_report_entry)); 766 if (!test_bit(BNX2FC_FLAG_SRR_SENT, 767 &io_req->req_flags)) { 768 spin_unlock_bh(&tgt->tgt_lock); 769 rc = bnx2fc_send_rec(io_req); 770 spin_lock_bh(&tgt->tgt_lock); 771 772 if (rc) 773 goto skip_rec; 774 } else 775 printk(KERN_ERR PFX "SRR in progress\n"); 776 goto ret_err_rqe; 777 break; 778 default: 779 break; 780 } 781 782 skip_rec: 783 set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags); 784 /* 785 * Cancel the timeout_work, as we received IO 786 * completion with FW error. 787 */ 788 if (cancel_delayed_work(&io_req->timeout_work)) 789 kref_put(&io_req->refcount, bnx2fc_cmd_release); 790 791 rc = bnx2fc_initiate_abts(io_req); 792 if (rc != SUCCESS) { 793 printk(KERN_ERR PFX "err_warn: initiate_abts " 794 "failed xid = 0x%x. issue cleanup\n", 795 io_req->xid); 796 bnx2fc_initiate_cleanup(io_req); 797 } 798 ret_err_rqe: 799 bnx2fc_return_rqe(tgt, 1); 800 spin_unlock_bh(&tgt->tgt_lock); 801 break; 802 803 case FCOE_WARNING_DETECTION_CQE_TYPE: 804 /* 805 *In case of warning reporting CQE a single RQ entry 806 * is consumes. 807 */ 808 spin_lock_bh(&tgt->tgt_lock); 809 num_rq = 1; 810 err_entry = (struct fcoe_err_report_entry *) 811 bnx2fc_get_next_rqe(tgt, 1); 812 xid = cpu_to_be16(err_entry->fc_hdr.ox_id); 813 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); 814 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", 815 err_entry->data.err_warn_bitmap_hi, 816 err_entry->data.err_warn_bitmap_lo); 817 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", 818 err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); 819 820 if (xid > BNX2FC_MAX_XID) { 821 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); 822 goto ret_warn_rqe; 823 } 824 825 err_warn_bit_map = (u64) 826 ((u64)err_entry->data.err_warn_bitmap_hi << 32) | 827 (u64)err_entry->data.err_warn_bitmap_lo; 828 for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { 829 if (err_warn_bit_map & (u64) (1 << i)) { 830 err_warn = i; 831 break; 832 } 833 } 834 BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn); 835 836 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 837 index = xid % BNX2FC_TASKS_PER_PAGE; 838 task_page = (struct fcoe_task_ctx_entry *) 839 interface->hba->task_ctx[task_idx]; 840 task = &(task_page[index]); 841 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 842 if (!io_req) 843 goto ret_warn_rqe; 844 845 if (io_req->cmd_type != BNX2FC_SCSI_CMD) { 846 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); 847 goto ret_warn_rqe; 848 } 849 850 memset(&io_req->err_entry, 0, 851 sizeof(struct fcoe_err_report_entry)); 852 memcpy(&io_req->err_entry, err_entry, 853 sizeof(struct fcoe_err_report_entry)); 854 855 if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) 856 /* REC_TOV is not a warning code */ 857 BUG_ON(1); 858 else 859 BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n"); 860 ret_warn_rqe: 861 bnx2fc_return_rqe(tgt, 1); 862 spin_unlock_bh(&tgt->tgt_lock); 863 break; 864 865 default: 866 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); 867 break; 868 } 869 } 870 871 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) 872 { 873 struct fcoe_task_ctx_entry *task; 874 struct fcoe_task_ctx_entry *task_page; 875 struct fcoe_port *port = tgt->port; 876 struct bnx2fc_interface *interface = port->priv; 877 struct bnx2fc_hba *hba = interface->hba; 878 struct bnx2fc_cmd *io_req; 879 int task_idx, index; 880 u16 xid; 881 u8 cmd_type; 882 u8 rx_state = 0; 883 u8 num_rq; 884 885 spin_lock_bh(&tgt->tgt_lock); 886 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; 887 if (xid >= BNX2FC_MAX_TASKS) { 888 printk(KERN_ERR PFX "ERROR:xid out of range\n"); 889 spin_unlock_bh(&tgt->tgt_lock); 890 return; 891 } 892 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 893 index = xid % BNX2FC_TASKS_PER_PAGE; 894 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; 895 task = &(task_page[index]); 896 897 num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & 898 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> 899 FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); 900 901 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; 902 903 if (io_req == NULL) { 904 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); 905 spin_unlock_bh(&tgt->tgt_lock); 906 return; 907 } 908 909 /* Timestamp IO completion time */ 910 cmd_type = io_req->cmd_type; 911 912 rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & 913 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> 914 FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); 915 916 /* Process other IO completion types */ 917 switch (cmd_type) { 918 case BNX2FC_SCSI_CMD: 919 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { 920 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); 921 spin_unlock_bh(&tgt->tgt_lock); 922 return; 923 } 924 925 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) 926 bnx2fc_process_abts_compl(io_req, task, num_rq); 927 else if (rx_state == 928 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) 929 bnx2fc_process_cleanup_compl(io_req, task, num_rq); 930 else 931 printk(KERN_ERR PFX "Invalid rx state - %d\n", 932 rx_state); 933 break; 934 935 case BNX2FC_TASK_MGMT_CMD: 936 BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); 937 bnx2fc_process_tm_compl(io_req, task, num_rq); 938 break; 939 940 case BNX2FC_ABTS: 941 /* 942 * ABTS request received by firmware. ABTS response 943 * will be delivered to the task belonging to the IO 944 * that was aborted 945 */ 946 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); 947 kref_put(&io_req->refcount, bnx2fc_cmd_release); 948 break; 949 950 case BNX2FC_ELS: 951 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) 952 bnx2fc_process_els_compl(io_req, task, num_rq); 953 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) 954 bnx2fc_process_abts_compl(io_req, task, num_rq); 955 else if (rx_state == 956 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) 957 bnx2fc_process_cleanup_compl(io_req, task, num_rq); 958 else 959 printk(KERN_ERR PFX "Invalid rx state = %d\n", 960 rx_state); 961 break; 962 963 case BNX2FC_CLEANUP: 964 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); 965 kref_put(&io_req->refcount, bnx2fc_cmd_release); 966 break; 967 968 case BNX2FC_SEQ_CLEANUP: 969 BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n", 970 io_req->xid); 971 bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); 972 kref_put(&io_req->refcount, bnx2fc_cmd_release); 973 break; 974 975 default: 976 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); 977 break; 978 } 979 spin_unlock_bh(&tgt->tgt_lock); 980 } 981 982 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) 983 { 984 struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; 985 u32 msg; 986 987 wmb(); 988 rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << 989 FCOE_CQE_TOGGLE_BIT_SHIFT); 990 msg = *((u32 *)rx_db); 991 writel(cpu_to_le32(msg), tgt->ctx_base); 992 mmiowb(); 993 994 } 995 996 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) 997 { 998 struct bnx2fc_work *work; 999 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); 1000 if (!work) 1001 return NULL; 1002 1003 INIT_LIST_HEAD(&work->list); 1004 work->tgt = tgt; 1005 work->wqe = wqe; 1006 return work; 1007 } 1008 1009 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 1010 { 1011 struct fcoe_cqe *cq; 1012 u32 cq_cons; 1013 struct fcoe_cqe *cqe; 1014 u32 num_free_sqes = 0; 1015 u32 num_cqes = 0; 1016 u16 wqe; 1017 1018 /* 1019 * cq_lock is a low contention lock used to protect 1020 * the CQ data structure from being freed up during 1021 * the upload operation 1022 */ 1023 spin_lock_bh(&tgt->cq_lock); 1024 1025 if (!tgt->cq) { 1026 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); 1027 spin_unlock_bh(&tgt->cq_lock); 1028 return 0; 1029 } 1030 cq = tgt->cq; 1031 cq_cons = tgt->cq_cons_idx; 1032 cqe = &cq[cq_cons]; 1033 1034 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == 1035 (tgt->cq_curr_toggle_bit << 1036 FCOE_CQE_TOGGLE_BIT_SHIFT)) { 1037 1038 /* new entry on the cq */ 1039 if (wqe & FCOE_CQE_CQE_TYPE) { 1040 /* Unsolicited event notification */ 1041 bnx2fc_process_unsol_compl(tgt, wqe); 1042 } else { 1043 /* Pending work request completion */ 1044 struct bnx2fc_work *work = NULL; 1045 struct bnx2fc_percpu_s *fps = NULL; 1046 unsigned int cpu = wqe % num_possible_cpus(); 1047 1048 fps = &per_cpu(bnx2fc_percpu, cpu); 1049 spin_lock_bh(&fps->fp_work_lock); 1050 if (unlikely(!fps->iothread)) 1051 goto unlock; 1052 1053 work = bnx2fc_alloc_work(tgt, wqe); 1054 if (work) 1055 list_add_tail(&work->list, 1056 &fps->work_list); 1057 unlock: 1058 spin_unlock_bh(&fps->fp_work_lock); 1059 1060 /* Pending work request completion */ 1061 if (fps->iothread && work) 1062 wake_up_process(fps->iothread); 1063 else 1064 bnx2fc_process_cq_compl(tgt, wqe); 1065 num_free_sqes++; 1066 } 1067 cqe++; 1068 tgt->cq_cons_idx++; 1069 num_cqes++; 1070 1071 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { 1072 tgt->cq_cons_idx = 0; 1073 cqe = cq; 1074 tgt->cq_curr_toggle_bit = 1075 1 - tgt->cq_curr_toggle_bit; 1076 } 1077 } 1078 if (num_cqes) { 1079 /* Arm CQ only if doorbell is mapped */ 1080 if (tgt->ctx_base) 1081 bnx2fc_arm_cq(tgt); 1082 atomic_add(num_free_sqes, &tgt->free_sqes); 1083 } 1084 spin_unlock_bh(&tgt->cq_lock); 1085 return 0; 1086 } 1087 1088 /** 1089 * bnx2fc_fastpath_notification - process global event queue (KCQ) 1090 * 1091 * @hba: adapter structure pointer 1092 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry 1093 * 1094 * Fast path event notification handler 1095 */ 1096 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, 1097 struct fcoe_kcqe *new_cqe_kcqe) 1098 { 1099 u32 conn_id = new_cqe_kcqe->fcoe_conn_id; 1100 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; 1101 1102 if (!tgt) { 1103 printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id); 1104 return; 1105 } 1106 1107 bnx2fc_process_new_cqes(tgt); 1108 } 1109 1110 /** 1111 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion 1112 * 1113 * @hba: adapter structure pointer 1114 * @ofld_kcqe: connection offload kcqe pointer 1115 * 1116 * handle session offload completion, enable the session if offload is 1117 * successful. 1118 */ 1119 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, 1120 struct fcoe_kcqe *ofld_kcqe) 1121 { 1122 struct bnx2fc_rport *tgt; 1123 struct fcoe_port *port; 1124 struct bnx2fc_interface *interface; 1125 u32 conn_id; 1126 u32 context_id; 1127 int rc; 1128 1129 conn_id = ofld_kcqe->fcoe_conn_id; 1130 context_id = ofld_kcqe->fcoe_conn_context_id; 1131 tgt = hba->tgt_ofld_list[conn_id]; 1132 if (!tgt) { 1133 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); 1134 return; 1135 } 1136 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", 1137 ofld_kcqe->fcoe_conn_context_id); 1138 port = tgt->port; 1139 interface = tgt->port->priv; 1140 if (hba != interface->hba) { 1141 printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); 1142 goto ofld_cmpl_err; 1143 } 1144 /* 1145 * cnic has allocated a context_id for this session; use this 1146 * while enabling the session. 1147 */ 1148 tgt->context_id = context_id; 1149 if (ofld_kcqe->completion_status) { 1150 if (ofld_kcqe->completion_status == 1151 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { 1152 printk(KERN_ERR PFX "unable to allocate FCoE context " 1153 "resources\n"); 1154 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); 1155 } 1156 goto ofld_cmpl_err; 1157 } else { 1158 1159 /* now enable the session */ 1160 rc = bnx2fc_send_session_enable_req(port, tgt); 1161 if (rc) { 1162 printk(KERN_ERR PFX "enable session failed\n"); 1163 goto ofld_cmpl_err; 1164 } 1165 } 1166 return; 1167 ofld_cmpl_err: 1168 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1169 wake_up_interruptible(&tgt->ofld_wait); 1170 } 1171 1172 /** 1173 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion 1174 * 1175 * @hba: adapter structure pointer 1176 * @ofld_kcqe: connection offload kcqe pointer 1177 * 1178 * handle session enable completion, mark the rport as ready 1179 */ 1180 1181 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, 1182 struct fcoe_kcqe *ofld_kcqe) 1183 { 1184 struct bnx2fc_rport *tgt; 1185 struct bnx2fc_interface *interface; 1186 u32 conn_id; 1187 u32 context_id; 1188 1189 context_id = ofld_kcqe->fcoe_conn_context_id; 1190 conn_id = ofld_kcqe->fcoe_conn_id; 1191 tgt = hba->tgt_ofld_list[conn_id]; 1192 if (!tgt) { 1193 printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n"); 1194 return; 1195 } 1196 1197 BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", 1198 ofld_kcqe->fcoe_conn_context_id); 1199 1200 /* 1201 * context_id should be the same for this target during offload 1202 * and enable 1203 */ 1204 if (tgt->context_id != context_id) { 1205 printk(KERN_ERR PFX "context id mis-match\n"); 1206 return; 1207 } 1208 interface = tgt->port->priv; 1209 if (hba != interface->hba) { 1210 printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); 1211 goto enbl_cmpl_err; 1212 } 1213 if (ofld_kcqe->completion_status) 1214 goto enbl_cmpl_err; 1215 else { 1216 /* enable successful - rport ready for issuing IOs */ 1217 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1218 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1219 wake_up_interruptible(&tgt->ofld_wait); 1220 } 1221 return; 1222 1223 enbl_cmpl_err: 1224 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); 1225 wake_up_interruptible(&tgt->ofld_wait); 1226 } 1227 1228 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, 1229 struct fcoe_kcqe *disable_kcqe) 1230 { 1231 1232 struct bnx2fc_rport *tgt; 1233 u32 conn_id; 1234 1235 conn_id = disable_kcqe->fcoe_conn_id; 1236 tgt = hba->tgt_ofld_list[conn_id]; 1237 if (!tgt) { 1238 printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n"); 1239 return; 1240 } 1241 1242 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); 1243 1244 if (disable_kcqe->completion_status) { 1245 printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", 1246 disable_kcqe->completion_status); 1247 set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags); 1248 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 1249 wake_up_interruptible(&tgt->upld_wait); 1250 } else { 1251 /* disable successful */ 1252 BNX2FC_TGT_DBG(tgt, "disable successful\n"); 1253 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); 1254 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); 1255 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 1256 wake_up_interruptible(&tgt->upld_wait); 1257 } 1258 } 1259 1260 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, 1261 struct fcoe_kcqe *destroy_kcqe) 1262 { 1263 struct bnx2fc_rport *tgt; 1264 u32 conn_id; 1265 1266 conn_id = destroy_kcqe->fcoe_conn_id; 1267 tgt = hba->tgt_ofld_list[conn_id]; 1268 if (!tgt) { 1269 printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n"); 1270 return; 1271 } 1272 1273 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); 1274 1275 if (destroy_kcqe->completion_status) { 1276 printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n", 1277 destroy_kcqe->completion_status); 1278 return; 1279 } else { 1280 /* destroy successful */ 1281 BNX2FC_TGT_DBG(tgt, "upload successful\n"); 1282 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); 1283 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); 1284 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); 1285 wake_up_interruptible(&tgt->upld_wait); 1286 } 1287 } 1288 1289 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) 1290 { 1291 switch (err_code) { 1292 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: 1293 printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); 1294 break; 1295 1296 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: 1297 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); 1298 break; 1299 1300 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: 1301 printk(KERN_ERR PFX "init_failure due to NIC error\n"); 1302 break; 1303 case FCOE_KCQE_COMPLETION_STATUS_ERROR: 1304 printk(KERN_ERR PFX "init failure due to compl status err\n"); 1305 break; 1306 case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: 1307 printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); 1308 break; 1309 default: 1310 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); 1311 } 1312 } 1313 1314 /** 1315 * bnx2fc_indicae_kcqe - process KCQE 1316 * 1317 * @hba: adapter structure pointer 1318 * @kcqe: kcqe pointer 1319 * @num_cqe: Number of completion queue elements 1320 * 1321 * Generic KCQ event handler 1322 */ 1323 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], 1324 u32 num_cqe) 1325 { 1326 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; 1327 int i = 0; 1328 struct fcoe_kcqe *kcqe = NULL; 1329 1330 while (i < num_cqe) { 1331 kcqe = (struct fcoe_kcqe *) kcq[i++]; 1332 1333 switch (kcqe->op_code) { 1334 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: 1335 bnx2fc_fastpath_notification(hba, kcqe); 1336 break; 1337 1338 case FCOE_KCQE_OPCODE_OFFLOAD_CONN: 1339 bnx2fc_process_ofld_cmpl(hba, kcqe); 1340 break; 1341 1342 case FCOE_KCQE_OPCODE_ENABLE_CONN: 1343 bnx2fc_process_enable_conn_cmpl(hba, kcqe); 1344 break; 1345 1346 case FCOE_KCQE_OPCODE_INIT_FUNC: 1347 if (kcqe->completion_status != 1348 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { 1349 bnx2fc_init_failure(hba, 1350 kcqe->completion_status); 1351 } else { 1352 set_bit(ADAPTER_STATE_UP, &hba->adapter_state); 1353 bnx2fc_get_link_state(hba); 1354 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", 1355 (u8)hba->pcidev->bus->number); 1356 } 1357 break; 1358 1359 case FCOE_KCQE_OPCODE_DESTROY_FUNC: 1360 if (kcqe->completion_status != 1361 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { 1362 1363 printk(KERN_ERR PFX "DESTROY failed\n"); 1364 } else { 1365 printk(KERN_ERR PFX "DESTROY success\n"); 1366 } 1367 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); 1368 wake_up_interruptible(&hba->destroy_wait); 1369 break; 1370 1371 case FCOE_KCQE_OPCODE_DISABLE_CONN: 1372 bnx2fc_process_conn_disable_cmpl(hba, kcqe); 1373 break; 1374 1375 case FCOE_KCQE_OPCODE_DESTROY_CONN: 1376 bnx2fc_process_conn_destroy_cmpl(hba, kcqe); 1377 break; 1378 1379 case FCOE_KCQE_OPCODE_STAT_FUNC: 1380 if (kcqe->completion_status != 1381 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) 1382 printk(KERN_ERR PFX "STAT failed\n"); 1383 complete(&hba->stat_req_done); 1384 break; 1385 1386 case FCOE_KCQE_OPCODE_FCOE_ERROR: 1387 /* fall thru */ 1388 default: 1389 printk(KERN_ERR PFX "unknown opcode 0x%x\n", 1390 kcqe->op_code); 1391 } 1392 } 1393 } 1394 1395 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) 1396 { 1397 struct fcoe_sqe *sqe; 1398 1399 sqe = &tgt->sq[tgt->sq_prod_idx]; 1400 1401 /* Fill SQ WQE */ 1402 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; 1403 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; 1404 1405 /* Advance SQ Prod Idx */ 1406 if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { 1407 tgt->sq_prod_idx = 0; 1408 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; 1409 } 1410 } 1411 1412 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) 1413 { 1414 struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; 1415 u32 msg; 1416 1417 wmb(); 1418 sq_db->prod = tgt->sq_prod_idx | 1419 (tgt->sq_curr_toggle_bit << 15); 1420 msg = *((u32 *)sq_db); 1421 writel(cpu_to_le32(msg), tgt->ctx_base); 1422 mmiowb(); 1423 1424 } 1425 1426 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) 1427 { 1428 u32 context_id = tgt->context_id; 1429 struct fcoe_port *port = tgt->port; 1430 u32 reg_off; 1431 resource_size_t reg_base; 1432 struct bnx2fc_interface *interface = port->priv; 1433 struct bnx2fc_hba *hba = interface->hba; 1434 1435 reg_base = pci_resource_start(hba->pcidev, 1436 BNX2X_DOORBELL_PCI_BAR); 1437 reg_off = BNX2FC_5771X_DB_PAGE_SIZE * 1438 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; 1439 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); 1440 if (!tgt->ctx_base) 1441 return -ENOMEM; 1442 return 0; 1443 } 1444 1445 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) 1446 { 1447 char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); 1448 1449 if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) 1450 return NULL; 1451 1452 tgt->rq_cons_idx += num_items; 1453 1454 if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) 1455 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; 1456 1457 return buf; 1458 } 1459 1460 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) 1461 { 1462 /* return the rq buffer */ 1463 u32 next_prod_idx = tgt->rq_prod_idx + num_items; 1464 if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { 1465 /* Wrap around RQ */ 1466 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; 1467 } 1468 tgt->rq_prod_idx = next_prod_idx; 1469 tgt->conn_db->rq_prod = tgt->rq_prod_idx; 1470 } 1471 1472 void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, 1473 struct fcoe_task_ctx_entry *task, 1474 struct bnx2fc_cmd *orig_io_req, 1475 u32 offset) 1476 { 1477 struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; 1478 struct bnx2fc_rport *tgt = seq_clnp_req->tgt; 1479 struct bnx2fc_interface *interface = tgt->port->priv; 1480 struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; 1481 struct fcoe_task_ctx_entry *orig_task; 1482 struct fcoe_task_ctx_entry *task_page; 1483 struct fcoe_ext_mul_sges_ctx *sgl; 1484 u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; 1485 u8 orig_task_type; 1486 u16 orig_xid = orig_io_req->xid; 1487 u32 context_id = tgt->context_id; 1488 u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; 1489 u32 orig_offset = offset; 1490 int bd_count; 1491 int orig_task_idx, index; 1492 int i; 1493 1494 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); 1495 1496 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) 1497 orig_task_type = FCOE_TASK_TYPE_WRITE; 1498 else 1499 orig_task_type = FCOE_TASK_TYPE_READ; 1500 1501 /* Tx flags */ 1502 task->txwr_rxrd.const_ctx.tx_flags = 1503 FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << 1504 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; 1505 /* init flags */ 1506 task->txwr_rxrd.const_ctx.init_flags = task_type << 1507 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1508 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1509 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; 1510 task->rxwr_txrd.const_ctx.init_flags = context_id << 1511 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; 1512 task->rxwr_txrd.const_ctx.init_flags = context_id << 1513 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; 1514 1515 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; 1516 1517 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; 1518 task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; 1519 1520 bd_count = orig_io_req->bd_tbl->bd_valid; 1521 1522 /* obtain the appropriate bd entry from relative offset */ 1523 for (i = 0; i < bd_count; i++) { 1524 if (offset < bd[i].buf_len) 1525 break; 1526 offset -= bd[i].buf_len; 1527 } 1528 phys_addr += (i * sizeof(struct fcoe_bd_ctx)); 1529 1530 if (orig_task_type == FCOE_TASK_TYPE_WRITE) { 1531 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = 1532 (u32)phys_addr; 1533 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = 1534 (u32)((u64)phys_addr >> 32); 1535 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1536 bd_count; 1537 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = 1538 offset; /* adjusted offset */ 1539 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; 1540 } else { 1541 orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE; 1542 index = orig_xid % BNX2FC_TASKS_PER_PAGE; 1543 1544 task_page = (struct fcoe_task_ctx_entry *) 1545 interface->hba->task_ctx[orig_task_idx]; 1546 orig_task = &(task_page[index]); 1547 1548 /* Multiple SGEs were used for this IO */ 1549 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; 1550 sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; 1551 sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); 1552 sgl->mul_sgl.sgl_size = bd_count; 1553 sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ 1554 sgl->mul_sgl.cur_sge_idx = i; 1555 1556 memset(&task->rxwr_only.rx_seq_ctx, 0, 1557 sizeof(struct fcoe_rx_seq_ctx)); 1558 task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; 1559 task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; 1560 } 1561 } 1562 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, 1563 struct fcoe_task_ctx_entry *task, 1564 u16 orig_xid) 1565 { 1566 u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; 1567 struct bnx2fc_rport *tgt = io_req->tgt; 1568 u32 context_id = tgt->context_id; 1569 1570 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); 1571 1572 /* Tx Write Rx Read */ 1573 /* init flags */ 1574 task->txwr_rxrd.const_ctx.init_flags = task_type << 1575 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1576 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1577 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; 1578 if (tgt->dev_type == TYPE_TAPE) 1579 task->txwr_rxrd.const_ctx.init_flags |= 1580 FCOE_TASK_DEV_TYPE_TAPE << 1581 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1582 else 1583 task->txwr_rxrd.const_ctx.init_flags |= 1584 FCOE_TASK_DEV_TYPE_DISK << 1585 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1586 task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; 1587 1588 /* Tx flags */ 1589 task->txwr_rxrd.const_ctx.tx_flags = 1590 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << 1591 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; 1592 1593 /* Rx Read Tx Write */ 1594 task->rxwr_txrd.const_ctx.init_flags = context_id << 1595 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; 1596 task->rxwr_txrd.var_ctx.rx_flags |= 1 << 1597 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; 1598 } 1599 1600 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, 1601 struct fcoe_task_ctx_entry *task) 1602 { 1603 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 1604 struct bnx2fc_rport *tgt = io_req->tgt; 1605 struct fc_frame_header *fc_hdr; 1606 struct fcoe_ext_mul_sges_ctx *sgl; 1607 u8 task_type = 0; 1608 u64 *hdr; 1609 u64 temp_hdr[3]; 1610 u32 context_id; 1611 1612 1613 /* Obtain task_type */ 1614 if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || 1615 (io_req->cmd_type == BNX2FC_ELS)) { 1616 task_type = FCOE_TASK_TYPE_MIDPATH; 1617 } else if (io_req->cmd_type == BNX2FC_ABTS) { 1618 task_type = FCOE_TASK_TYPE_ABTS; 1619 } 1620 1621 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); 1622 1623 /* Setup the task from io_req for easy reference */ 1624 io_req->task = task; 1625 1626 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", 1627 io_req->cmd_type, task_type); 1628 1629 /* Tx only */ 1630 if ((task_type == FCOE_TASK_TYPE_MIDPATH) || 1631 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { 1632 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = 1633 (u32)mp_req->mp_req_bd_dma; 1634 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = 1635 (u32)((u64)mp_req->mp_req_bd_dma >> 32); 1636 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; 1637 } 1638 1639 /* Tx Write Rx Read */ 1640 /* init flags */ 1641 task->txwr_rxrd.const_ctx.init_flags = task_type << 1642 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1643 if (tgt->dev_type == TYPE_TAPE) 1644 task->txwr_rxrd.const_ctx.init_flags |= 1645 FCOE_TASK_DEV_TYPE_TAPE << 1646 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1647 else 1648 task->txwr_rxrd.const_ctx.init_flags |= 1649 FCOE_TASK_DEV_TYPE_DISK << 1650 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1651 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1652 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; 1653 1654 /* tx flags */ 1655 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << 1656 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; 1657 1658 /* Rx Write Tx Read */ 1659 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; 1660 1661 /* rx flags */ 1662 task->rxwr_txrd.var_ctx.rx_flags |= 1 << 1663 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; 1664 1665 context_id = tgt->context_id; 1666 task->rxwr_txrd.const_ctx.init_flags = context_id << 1667 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; 1668 1669 fc_hdr = &(mp_req->req_fc_hdr); 1670 if (task_type == FCOE_TASK_TYPE_MIDPATH) { 1671 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); 1672 fc_hdr->fh_rx_id = htons(0xffff); 1673 task->rxwr_txrd.var_ctx.rx_id = 0xffff; 1674 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { 1675 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); 1676 } 1677 1678 /* Fill FC Header into middle path buffer */ 1679 hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; 1680 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); 1681 hdr[0] = cpu_to_be64(temp_hdr[0]); 1682 hdr[1] = cpu_to_be64(temp_hdr[1]); 1683 hdr[2] = cpu_to_be64(temp_hdr[2]); 1684 1685 /* Rx Only */ 1686 if (task_type == FCOE_TASK_TYPE_MIDPATH) { 1687 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; 1688 1689 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; 1690 sgl->mul_sgl.cur_sge_addr.hi = 1691 (u32)((u64)mp_req->mp_resp_bd_dma >> 32); 1692 sgl->mul_sgl.sgl_size = 1; 1693 } 1694 } 1695 1696 void bnx2fc_init_task(struct bnx2fc_cmd *io_req, 1697 struct fcoe_task_ctx_entry *task) 1698 { 1699 u8 task_type; 1700 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1701 struct io_bdt *bd_tbl = io_req->bd_tbl; 1702 struct bnx2fc_rport *tgt = io_req->tgt; 1703 struct fcoe_cached_sge_ctx *cached_sge; 1704 struct fcoe_ext_mul_sges_ctx *sgl; 1705 int dev_type = tgt->dev_type; 1706 u64 *fcp_cmnd; 1707 u64 tmp_fcp_cmnd[4]; 1708 u32 context_id; 1709 int cnt, i; 1710 int bd_count; 1711 1712 memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); 1713 1714 /* Setup the task from io_req for easy reference */ 1715 io_req->task = task; 1716 1717 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) 1718 task_type = FCOE_TASK_TYPE_WRITE; 1719 else 1720 task_type = FCOE_TASK_TYPE_READ; 1721 1722 /* Tx only */ 1723 bd_count = bd_tbl->bd_valid; 1724 cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; 1725 if (task_type == FCOE_TASK_TYPE_WRITE) { 1726 if ((dev_type == TYPE_DISK) && (bd_count == 1)) { 1727 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; 1728 1729 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = 1730 cached_sge->cur_buf_addr.lo = 1731 fcoe_bd_tbl->buf_addr_lo; 1732 task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = 1733 cached_sge->cur_buf_addr.hi = 1734 fcoe_bd_tbl->buf_addr_hi; 1735 task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = 1736 cached_sge->cur_buf_rem = 1737 fcoe_bd_tbl->buf_len; 1738 1739 task->txwr_rxrd.const_ctx.init_flags |= 1 << 1740 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; 1741 } else { 1742 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = 1743 (u32)bd_tbl->bd_tbl_dma; 1744 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = 1745 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1746 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1747 bd_tbl->bd_valid; 1748 } 1749 } 1750 1751 /*Tx Write Rx Read */ 1752 /* Init state to NORMAL */ 1753 task->txwr_rxrd.const_ctx.init_flags |= task_type << 1754 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1755 if (dev_type == TYPE_TAPE) { 1756 task->txwr_rxrd.const_ctx.init_flags |= 1757 FCOE_TASK_DEV_TYPE_TAPE << 1758 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1759 io_req->rec_retry = 0; 1760 io_req->rec_retry = 0; 1761 } else 1762 task->txwr_rxrd.const_ctx.init_flags |= 1763 FCOE_TASK_DEV_TYPE_DISK << 1764 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1765 task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << 1766 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; 1767 /* tx flags */ 1768 task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << 1769 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; 1770 1771 /* Set initial seq counter */ 1772 task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; 1773 1774 /* Fill FCP_CMND IU */ 1775 fcp_cmnd = (u64 *) 1776 task->txwr_rxrd.union_ctx.fcp_cmd.opaque; 1777 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); 1778 1779 /* swap fcp_cmnd */ 1780 cnt = sizeof(struct fcp_cmnd) / sizeof(u64); 1781 1782 for (i = 0; i < cnt; i++) { 1783 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); 1784 fcp_cmnd++; 1785 } 1786 1787 /* Rx Write Tx Read */ 1788 task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; 1789 1790 context_id = tgt->context_id; 1791 task->rxwr_txrd.const_ctx.init_flags = context_id << 1792 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; 1793 1794 /* rx flags */ 1795 /* Set state to "waiting for the first packet" */ 1796 task->rxwr_txrd.var_ctx.rx_flags |= 1 << 1797 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; 1798 1799 task->rxwr_txrd.var_ctx.rx_id = 0xffff; 1800 1801 /* Rx Only */ 1802 if (task_type != FCOE_TASK_TYPE_READ) 1803 return; 1804 1805 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; 1806 bd_count = bd_tbl->bd_valid; 1807 1808 if (dev_type == TYPE_DISK) { 1809 if (bd_count == 1) { 1810 1811 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; 1812 1813 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; 1814 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; 1815 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; 1816 task->txwr_rxrd.const_ctx.init_flags |= 1 << 1817 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; 1818 } else if (bd_count == 2) { 1819 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; 1820 1821 cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; 1822 cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; 1823 cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; 1824 1825 fcoe_bd_tbl++; 1826 cached_sge->second_buf_addr.lo = 1827 fcoe_bd_tbl->buf_addr_lo; 1828 cached_sge->second_buf_addr.hi = 1829 fcoe_bd_tbl->buf_addr_hi; 1830 cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; 1831 task->txwr_rxrd.const_ctx.init_flags |= 1 << 1832 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; 1833 } else { 1834 1835 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; 1836 sgl->mul_sgl.cur_sge_addr.hi = 1837 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1838 sgl->mul_sgl.sgl_size = bd_count; 1839 } 1840 } else { 1841 sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; 1842 sgl->mul_sgl.cur_sge_addr.hi = 1843 (u32)((u64)bd_tbl->bd_tbl_dma >> 32); 1844 sgl->mul_sgl.sgl_size = bd_count; 1845 } 1846 } 1847 1848 /** 1849 * bnx2fc_setup_task_ctx - allocate and map task context 1850 * 1851 * @hba: pointer to adapter structure 1852 * 1853 * allocate memory for task context, and associated BD table to be used 1854 * by firmware 1855 * 1856 */ 1857 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) 1858 { 1859 int rc = 0; 1860 struct regpair *task_ctx_bdt; 1861 dma_addr_t addr; 1862 int i; 1863 1864 /* 1865 * Allocate task context bd table. A page size of bd table 1866 * can map 256 buffers. Each buffer contains 32 task context 1867 * entries. Hence the limit with one page is 8192 task context 1868 * entries. 1869 */ 1870 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 1871 PAGE_SIZE, 1872 &hba->task_ctx_bd_dma, 1873 GFP_KERNEL); 1874 if (!hba->task_ctx_bd_tbl) { 1875 printk(KERN_ERR PFX "unable to allocate task context BDT\n"); 1876 rc = -1; 1877 goto out; 1878 } 1879 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE); 1880 1881 /* 1882 * Allocate task_ctx which is an array of pointers pointing to 1883 * a page containing 32 task contexts 1884 */ 1885 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), 1886 GFP_KERNEL); 1887 if (!hba->task_ctx) { 1888 printk(KERN_ERR PFX "unable to allocate task context array\n"); 1889 rc = -1; 1890 goto out1; 1891 } 1892 1893 /* 1894 * Allocate task_ctx_dma which is an array of dma addresses 1895 */ 1896 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * 1897 sizeof(dma_addr_t)), GFP_KERNEL); 1898 if (!hba->task_ctx_dma) { 1899 printk(KERN_ERR PFX "unable to alloc context mapping array\n"); 1900 rc = -1; 1901 goto out2; 1902 } 1903 1904 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; 1905 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1906 1907 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, 1908 PAGE_SIZE, 1909 &hba->task_ctx_dma[i], 1910 GFP_KERNEL); 1911 if (!hba->task_ctx[i]) { 1912 printk(KERN_ERR PFX "unable to alloc task context\n"); 1913 rc = -1; 1914 goto out3; 1915 } 1916 memset(hba->task_ctx[i], 0, PAGE_SIZE); 1917 addr = (u64)hba->task_ctx_dma[i]; 1918 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); 1919 task_ctx_bdt->lo = cpu_to_le32((u32)addr); 1920 task_ctx_bdt++; 1921 } 1922 return 0; 1923 1924 out3: 1925 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1926 if (hba->task_ctx[i]) { 1927 1928 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1929 hba->task_ctx[i], hba->task_ctx_dma[i]); 1930 hba->task_ctx[i] = NULL; 1931 } 1932 } 1933 1934 kfree(hba->task_ctx_dma); 1935 hba->task_ctx_dma = NULL; 1936 out2: 1937 kfree(hba->task_ctx); 1938 hba->task_ctx = NULL; 1939 out1: 1940 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1941 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); 1942 hba->task_ctx_bd_tbl = NULL; 1943 out: 1944 return rc; 1945 } 1946 1947 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) 1948 { 1949 int i; 1950 1951 if (hba->task_ctx_bd_tbl) { 1952 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1953 hba->task_ctx_bd_tbl, 1954 hba->task_ctx_bd_dma); 1955 hba->task_ctx_bd_tbl = NULL; 1956 } 1957 1958 if (hba->task_ctx) { 1959 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { 1960 if (hba->task_ctx[i]) { 1961 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 1962 hba->task_ctx[i], 1963 hba->task_ctx_dma[i]); 1964 hba->task_ctx[i] = NULL; 1965 } 1966 } 1967 kfree(hba->task_ctx); 1968 hba->task_ctx = NULL; 1969 } 1970 1971 kfree(hba->task_ctx_dma); 1972 hba->task_ctx_dma = NULL; 1973 } 1974 1975 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) 1976 { 1977 int i; 1978 int segment_count; 1979 int hash_table_size; 1980 u32 *pbl; 1981 1982 segment_count = hba->hash_tbl_segment_count; 1983 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * 1984 sizeof(struct fcoe_hash_table_entry); 1985 1986 pbl = hba->hash_tbl_pbl; 1987 for (i = 0; i < segment_count; ++i) { 1988 dma_addr_t dma_address; 1989 1990 dma_address = le32_to_cpu(*pbl); 1991 ++pbl; 1992 dma_address += ((u64)le32_to_cpu(*pbl)) << 32; 1993 ++pbl; 1994 dma_free_coherent(&hba->pcidev->dev, 1995 BNX2FC_HASH_TBL_CHUNK_SIZE, 1996 hba->hash_tbl_segments[i], 1997 dma_address); 1998 1999 } 2000 2001 if (hba->hash_tbl_pbl) { 2002 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 2003 hba->hash_tbl_pbl, 2004 hba->hash_tbl_pbl_dma); 2005 hba->hash_tbl_pbl = NULL; 2006 } 2007 } 2008 2009 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) 2010 { 2011 int i; 2012 int hash_table_size; 2013 int segment_count; 2014 int segment_array_size; 2015 int dma_segment_array_size; 2016 dma_addr_t *dma_segment_array; 2017 u32 *pbl; 2018 2019 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * 2020 sizeof(struct fcoe_hash_table_entry); 2021 2022 segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; 2023 segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; 2024 hba->hash_tbl_segment_count = segment_count; 2025 2026 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); 2027 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); 2028 if (!hba->hash_tbl_segments) { 2029 printk(KERN_ERR PFX "hash table pointers alloc failed\n"); 2030 return -ENOMEM; 2031 } 2032 dma_segment_array_size = segment_count * sizeof(*dma_segment_array); 2033 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); 2034 if (!dma_segment_array) { 2035 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); 2036 return -ENOMEM; 2037 } 2038 2039 for (i = 0; i < segment_count; ++i) { 2040 hba->hash_tbl_segments[i] = 2041 dma_alloc_coherent(&hba->pcidev->dev, 2042 BNX2FC_HASH_TBL_CHUNK_SIZE, 2043 &dma_segment_array[i], 2044 GFP_KERNEL); 2045 if (!hba->hash_tbl_segments[i]) { 2046 printk(KERN_ERR PFX "hash segment alloc failed\n"); 2047 while (--i >= 0) { 2048 dma_free_coherent(&hba->pcidev->dev, 2049 BNX2FC_HASH_TBL_CHUNK_SIZE, 2050 hba->hash_tbl_segments[i], 2051 dma_segment_array[i]); 2052 hba->hash_tbl_segments[i] = NULL; 2053 } 2054 kfree(dma_segment_array); 2055 return -ENOMEM; 2056 } 2057 memset(hba->hash_tbl_segments[i], 0, 2058 BNX2FC_HASH_TBL_CHUNK_SIZE); 2059 } 2060 2061 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, 2062 PAGE_SIZE, 2063 &hba->hash_tbl_pbl_dma, 2064 GFP_KERNEL); 2065 if (!hba->hash_tbl_pbl) { 2066 printk(KERN_ERR PFX "hash table pbl alloc failed\n"); 2067 kfree(dma_segment_array); 2068 return -ENOMEM; 2069 } 2070 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); 2071 2072 pbl = hba->hash_tbl_pbl; 2073 for (i = 0; i < segment_count; ++i) { 2074 u64 paddr = dma_segment_array[i]; 2075 *pbl = cpu_to_le32((u32) paddr); 2076 ++pbl; 2077 *pbl = cpu_to_le32((u32) (paddr >> 32)); 2078 ++pbl; 2079 } 2080 pbl = hba->hash_tbl_pbl; 2081 i = 0; 2082 while (*pbl && *(pbl + 1)) { 2083 u32 lo; 2084 u32 hi; 2085 lo = *pbl; 2086 ++pbl; 2087 hi = *pbl; 2088 ++pbl; 2089 ++i; 2090 } 2091 kfree(dma_segment_array); 2092 return 0; 2093 } 2094 2095 /** 2096 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer 2097 * 2098 * @hba: Pointer to adapter structure 2099 * 2100 */ 2101 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) 2102 { 2103 u64 addr; 2104 u32 mem_size; 2105 int i; 2106 2107 if (bnx2fc_allocate_hash_table(hba)) 2108 return -ENOMEM; 2109 2110 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 2111 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 2112 &hba->t2_hash_tbl_ptr_dma, 2113 GFP_KERNEL); 2114 if (!hba->t2_hash_tbl_ptr) { 2115 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); 2116 bnx2fc_free_fw_resc(hba); 2117 return -ENOMEM; 2118 } 2119 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size); 2120 2121 mem_size = BNX2FC_NUM_MAX_SESS * 2122 sizeof(struct fcoe_t2_hash_table_entry); 2123 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, 2124 &hba->t2_hash_tbl_dma, 2125 GFP_KERNEL); 2126 if (!hba->t2_hash_tbl) { 2127 printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); 2128 bnx2fc_free_fw_resc(hba); 2129 return -ENOMEM; 2130 } 2131 memset(hba->t2_hash_tbl, 0x00, mem_size); 2132 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { 2133 addr = (unsigned long) hba->t2_hash_tbl_dma + 2134 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); 2135 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; 2136 hba->t2_hash_tbl[i].next.hi = addr >> 32; 2137 } 2138 2139 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, 2140 PAGE_SIZE, &hba->dummy_buf_dma, 2141 GFP_KERNEL); 2142 if (!hba->dummy_buffer) { 2143 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); 2144 bnx2fc_free_fw_resc(hba); 2145 return -ENOMEM; 2146 } 2147 2148 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, 2149 PAGE_SIZE, 2150 &hba->stats_buf_dma, 2151 GFP_KERNEL); 2152 if (!hba->stats_buffer) { 2153 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); 2154 bnx2fc_free_fw_resc(hba); 2155 return -ENOMEM; 2156 } 2157 memset(hba->stats_buffer, 0x00, PAGE_SIZE); 2158 2159 return 0; 2160 } 2161 2162 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) 2163 { 2164 u32 mem_size; 2165 2166 if (hba->stats_buffer) { 2167 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 2168 hba->stats_buffer, hba->stats_buf_dma); 2169 hba->stats_buffer = NULL; 2170 } 2171 2172 if (hba->dummy_buffer) { 2173 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, 2174 hba->dummy_buffer, hba->dummy_buf_dma); 2175 hba->dummy_buffer = NULL; 2176 } 2177 2178 if (hba->t2_hash_tbl_ptr) { 2179 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); 2180 dma_free_coherent(&hba->pcidev->dev, mem_size, 2181 hba->t2_hash_tbl_ptr, 2182 hba->t2_hash_tbl_ptr_dma); 2183 hba->t2_hash_tbl_ptr = NULL; 2184 } 2185 2186 if (hba->t2_hash_tbl) { 2187 mem_size = BNX2FC_NUM_MAX_SESS * 2188 sizeof(struct fcoe_t2_hash_table_entry); 2189 dma_free_coherent(&hba->pcidev->dev, mem_size, 2190 hba->t2_hash_tbl, hba->t2_hash_tbl_dma); 2191 hba->t2_hash_tbl = NULL; 2192 } 2193 bnx2fc_free_hash_table(hba); 2194 } 2195