1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic iSCSI Offload Driver 4 * Copyright (c) 2016 Cavium Inc. 5 */ 6 7 #include <linux/blkdev.h> 8 #include <scsi/scsi_tcq.h> 9 #include <linux/delay.h> 10 11 #include "qedi.h" 12 #include "qedi_iscsi.h" 13 #include "qedi_gbl.h" 14 #include "qedi_fw_iscsi.h" 15 #include "qedi_fw_scsi.h" 16 17 static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, 18 struct iscsi_task *mtask); 19 20 void qedi_iscsi_unmap_sg_list(struct qedi_cmd *cmd) 21 { 22 struct scsi_cmnd *sc = cmd->scsi_cmd; 23 24 if (cmd->io_tbl.sge_valid && sc) { 25 cmd->io_tbl.sge_valid = 0; 26 scsi_dma_unmap(sc); 27 } 28 } 29 30 static void qedi_process_logout_resp(struct qedi_ctx *qedi, 31 union iscsi_cqe *cqe, 32 struct iscsi_task *task, 33 struct qedi_conn *qedi_conn) 34 { 35 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 36 struct iscsi_logout_rsp *resp_hdr; 37 struct iscsi_session *session = conn->session; 38 struct iscsi_logout_response_hdr *cqe_logout_response; 39 struct qedi_cmd *cmd; 40 41 cmd = (struct qedi_cmd *)task->dd_data; 42 cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response; 43 spin_lock(&session->back_lock); 44 resp_hdr = (struct iscsi_logout_rsp *)&qedi_conn->gen_pdu.resp_hdr; 45 memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); 46 resp_hdr->opcode = cqe_logout_response->opcode; 47 resp_hdr->flags = cqe_logout_response->flags; 48 resp_hdr->hlength = 0; 49 50 resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); 51 resp_hdr->statsn = cpu_to_be32(cqe_logout_response->stat_sn); 52 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn); 53 resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn); 54 55 resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait); 56 resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain); 57 58 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, 59 "Freeing tid=0x%x for cid=0x%x\n", 60 cmd->task_id, qedi_conn->iscsi_conn_id); 61 62 if (likely(cmd->io_cmd_in_list)) { 63 cmd->io_cmd_in_list = false; 64 list_del_init(&cmd->io_cmd); 65 qedi_conn->active_cmd_count--; 66 } else { 67 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 68 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n", 69 cmd->task_id, qedi_conn->iscsi_conn_id, 70 &cmd->io_cmd); 71 } 72 73 cmd->state = RESPONSE_RECEIVED; 74 qedi_clear_task_idx(qedi, cmd->task_id); 75 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); 76 77 spin_unlock(&session->back_lock); 78 } 79 80 static void qedi_process_text_resp(struct qedi_ctx *qedi, 81 union iscsi_cqe *cqe, 82 struct iscsi_task *task, 83 struct qedi_conn *qedi_conn) 84 { 85 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 86 struct iscsi_session *session = conn->session; 87 struct e4_iscsi_task_context *task_ctx; 88 struct iscsi_text_rsp *resp_hdr_ptr; 89 struct iscsi_text_response_hdr *cqe_text_response; 90 struct qedi_cmd *cmd; 91 int pld_len; 92 93 cmd = (struct qedi_cmd *)task->dd_data; 94 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); 95 96 cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response; 97 spin_lock(&session->back_lock); 98 resp_hdr_ptr = (struct iscsi_text_rsp *)&qedi_conn->gen_pdu.resp_hdr; 99 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_hdr)); 100 resp_hdr_ptr->opcode = cqe_text_response->opcode; 101 resp_hdr_ptr->flags = cqe_text_response->flags; 102 resp_hdr_ptr->hlength = 0; 103 104 hton24(resp_hdr_ptr->dlength, 105 (cqe_text_response->hdr_second_dword & 106 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK)); 107 108 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, 109 conn->session->age); 110 resp_hdr_ptr->ttt = cqe_text_response->ttt; 111 resp_hdr_ptr->statsn = cpu_to_be32(cqe_text_response->stat_sn); 112 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_text_response->exp_cmd_sn); 113 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_text_response->max_cmd_sn); 114 115 pld_len = cqe_text_response->hdr_second_dword & 116 ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK; 117 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; 118 119 memset(task_ctx, '\0', sizeof(*task_ctx)); 120 121 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, 122 "Freeing tid=0x%x for cid=0x%x\n", 123 cmd->task_id, qedi_conn->iscsi_conn_id); 124 125 if (likely(cmd->io_cmd_in_list)) { 126 cmd->io_cmd_in_list = false; 127 list_del_init(&cmd->io_cmd); 128 qedi_conn->active_cmd_count--; 129 } else { 130 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 131 "Active cmd list node already deleted, tid=0x%x, cid=0x%x, io_cmd_node=%p\n", 132 cmd->task_id, qedi_conn->iscsi_conn_id, 133 &cmd->io_cmd); 134 } 135 136 cmd->state = RESPONSE_RECEIVED; 137 qedi_clear_task_idx(qedi, cmd->task_id); 138 139 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, 140 qedi_conn->gen_pdu.resp_buf, 141 (qedi_conn->gen_pdu.resp_wr_ptr - 142 qedi_conn->gen_pdu.resp_buf)); 143 spin_unlock(&session->back_lock); 144 } 145 146 static void qedi_tmf_resp_work(struct work_struct *work) 147 { 148 struct qedi_cmd *qedi_cmd = 149 container_of(work, struct qedi_cmd, tmf_work); 150 struct qedi_conn *qedi_conn = qedi_cmd->conn; 151 struct qedi_ctx *qedi = qedi_conn->qedi; 152 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 153 struct iscsi_session *session = conn->session; 154 struct iscsi_tm_rsp *resp_hdr_ptr; 155 int rval = 0; 156 157 set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); 158 resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf; 159 160 iscsi_block_session(session->cls_session); 161 rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true); 162 if (rval) { 163 qedi_clear_task_idx(qedi, qedi_cmd->task_id); 164 iscsi_unblock_session(session->cls_session); 165 goto exit_tmf_resp; 166 } 167 168 iscsi_unblock_session(session->cls_session); 169 qedi_clear_task_idx(qedi, qedi_cmd->task_id); 170 171 spin_lock(&session->back_lock); 172 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); 173 spin_unlock(&session->back_lock); 174 175 exit_tmf_resp: 176 kfree(resp_hdr_ptr); 177 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); 178 } 179 180 static void qedi_process_tmf_resp(struct qedi_ctx *qedi, 181 union iscsi_cqe *cqe, 182 struct iscsi_task *task, 183 struct qedi_conn *qedi_conn) 184 185 { 186 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 187 struct iscsi_session *session = conn->session; 188 struct iscsi_tmf_response_hdr *cqe_tmp_response; 189 struct iscsi_tm_rsp *resp_hdr_ptr; 190 struct iscsi_tm *tmf_hdr; 191 struct qedi_cmd *qedi_cmd = NULL; 192 193 cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response; 194 195 qedi_cmd = task->dd_data; 196 qedi_cmd->tmf_resp_buf = kzalloc(sizeof(*resp_hdr_ptr), GFP_ATOMIC); 197 if (!qedi_cmd->tmf_resp_buf) { 198 QEDI_ERR(&qedi->dbg_ctx, 199 "Failed to allocate resp buf, cid=0x%x\n", 200 qedi_conn->iscsi_conn_id); 201 return; 202 } 203 204 spin_lock(&session->back_lock); 205 resp_hdr_ptr = (struct iscsi_tm_rsp *)qedi_cmd->tmf_resp_buf; 206 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_tm_rsp)); 207 208 /* Fill up the header */ 209 resp_hdr_ptr->opcode = cqe_tmp_response->opcode; 210 resp_hdr_ptr->flags = cqe_tmp_response->hdr_flags; 211 resp_hdr_ptr->response = cqe_tmp_response->hdr_response; 212 resp_hdr_ptr->hlength = 0; 213 214 hton24(resp_hdr_ptr->dlength, 215 (cqe_tmp_response->hdr_second_dword & 216 ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK)); 217 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, 218 conn->session->age); 219 resp_hdr_ptr->statsn = cpu_to_be32(cqe_tmp_response->stat_sn); 220 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_tmp_response->exp_cmd_sn); 221 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_tmp_response->max_cmd_sn); 222 223 tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr; 224 225 if (likely(qedi_cmd->io_cmd_in_list)) { 226 qedi_cmd->io_cmd_in_list = false; 227 list_del_init(&qedi_cmd->io_cmd); 228 qedi_conn->active_cmd_count--; 229 } 230 231 if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 232 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) || 233 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 234 ISCSI_TM_FUNC_TARGET_WARM_RESET) || 235 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 236 ISCSI_TM_FUNC_TARGET_COLD_RESET)) { 237 INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_resp_work); 238 queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work); 239 goto unblock_sess; 240 } 241 242 qedi_clear_task_idx(qedi, qedi_cmd->task_id); 243 244 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0); 245 kfree(resp_hdr_ptr); 246 247 unblock_sess: 248 spin_unlock(&session->back_lock); 249 } 250 251 static void qedi_process_login_resp(struct qedi_ctx *qedi, 252 union iscsi_cqe *cqe, 253 struct iscsi_task *task, 254 struct qedi_conn *qedi_conn) 255 { 256 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 257 struct iscsi_session *session = conn->session; 258 struct e4_iscsi_task_context *task_ctx; 259 struct iscsi_login_rsp *resp_hdr_ptr; 260 struct iscsi_login_response_hdr *cqe_login_response; 261 struct qedi_cmd *cmd; 262 int pld_len; 263 264 cmd = (struct qedi_cmd *)task->dd_data; 265 266 cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response; 267 task_ctx = qedi_get_task_mem(&qedi->tasks, cmd->task_id); 268 269 spin_lock(&session->back_lock); 270 resp_hdr_ptr = (struct iscsi_login_rsp *)&qedi_conn->gen_pdu.resp_hdr; 271 memset(resp_hdr_ptr, 0, sizeof(struct iscsi_login_rsp)); 272 resp_hdr_ptr->opcode = cqe_login_response->opcode; 273 resp_hdr_ptr->flags = cqe_login_response->flags_attr; 274 resp_hdr_ptr->hlength = 0; 275 276 hton24(resp_hdr_ptr->dlength, 277 (cqe_login_response->hdr_second_dword & 278 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK)); 279 resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid, 280 conn->session->age); 281 resp_hdr_ptr->tsih = cqe_login_response->tsih; 282 resp_hdr_ptr->statsn = cpu_to_be32(cqe_login_response->stat_sn); 283 resp_hdr_ptr->exp_cmdsn = cpu_to_be32(cqe_login_response->exp_cmd_sn); 284 resp_hdr_ptr->max_cmdsn = cpu_to_be32(cqe_login_response->max_cmd_sn); 285 resp_hdr_ptr->status_class = cqe_login_response->status_class; 286 resp_hdr_ptr->status_detail = cqe_login_response->status_detail; 287 pld_len = cqe_login_response->hdr_second_dword & 288 ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK; 289 qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len; 290 291 if (likely(cmd->io_cmd_in_list)) { 292 cmd->io_cmd_in_list = false; 293 list_del_init(&cmd->io_cmd); 294 qedi_conn->active_cmd_count--; 295 } 296 297 memset(task_ctx, '\0', sizeof(*task_ctx)); 298 299 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, 300 qedi_conn->gen_pdu.resp_buf, 301 (qedi_conn->gen_pdu.resp_wr_ptr - 302 qedi_conn->gen_pdu.resp_buf)); 303 304 spin_unlock(&session->back_lock); 305 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, 306 "Freeing tid=0x%x for cid=0x%x\n", 307 cmd->task_id, qedi_conn->iscsi_conn_id); 308 cmd->state = RESPONSE_RECEIVED; 309 qedi_clear_task_idx(qedi, cmd->task_id); 310 } 311 312 static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi, 313 struct iscsi_cqe_unsolicited *cqe, 314 char *ptr, int len) 315 { 316 u16 idx = 0; 317 318 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 319 "pld_len [%d], bdq_prod_idx [%d], idx [%d]\n", 320 len, qedi->bdq_prod_idx, 321 (qedi->bdq_prod_idx % qedi->rq_num_entries)); 322 323 /* Obtain buffer address from rqe_opaque */ 324 idx = cqe->rqe_opaque; 325 if (idx > (QEDI_BDQ_NUM - 1)) { 326 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 327 "wrong idx %d returned by FW, dropping the unsolicited pkt\n", 328 idx); 329 return; 330 } 331 332 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 333 "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx); 334 335 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 336 "unsol_cqe_type = %d\n", cqe->unsol_cqe_type); 337 switch (cqe->unsol_cqe_type) { 338 case ISCSI_CQE_UNSOLICITED_SINGLE: 339 case ISCSI_CQE_UNSOLICITED_FIRST: 340 if (len) 341 memcpy(ptr, (void *)qedi->bdq[idx].buf_addr, len); 342 break; 343 case ISCSI_CQE_UNSOLICITED_MIDDLE: 344 case ISCSI_CQE_UNSOLICITED_LAST: 345 break; 346 default: 347 break; 348 } 349 } 350 351 static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi, 352 struct iscsi_cqe_unsolicited *cqe, 353 int count) 354 { 355 u16 tmp; 356 u16 idx = 0; 357 struct scsi_bd *pbl; 358 359 /* Obtain buffer address from rqe_opaque */ 360 idx = cqe->rqe_opaque; 361 if (idx > (QEDI_BDQ_NUM - 1)) { 362 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 363 "wrong idx %d returned by FW, dropping the unsolicited pkt\n", 364 idx); 365 return; 366 } 367 368 pbl = (struct scsi_bd *)qedi->bdq_pbl; 369 pbl += (qedi->bdq_prod_idx % qedi->rq_num_entries); 370 pbl->address.hi = cpu_to_le32(QEDI_U64_HI(qedi->bdq[idx].buf_dma)); 371 pbl->address.lo = cpu_to_le32(QEDI_U64_LO(qedi->bdq[idx].buf_dma)); 372 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 373 "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n", 374 pbl, pbl->address.hi, pbl->address.lo, idx); 375 pbl->opaque.iscsi_opaque.reserved_zero[0] = 0; 376 pbl->opaque.iscsi_opaque.reserved_zero[1] = 0; 377 pbl->opaque.iscsi_opaque.reserved_zero[2] = 0; 378 pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx); 379 380 /* Increment producer to let f/w know we've handled the frame */ 381 qedi->bdq_prod_idx += count; 382 383 writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod); 384 tmp = readw(qedi->bdq_primary_prod); 385 386 writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod); 387 tmp = readw(qedi->bdq_secondary_prod); 388 } 389 390 static void qedi_unsol_pdu_adjust_bdq(struct qedi_ctx *qedi, 391 struct iscsi_cqe_unsolicited *cqe, 392 u32 pdu_len, u32 num_bdqs, 393 char *bdq_data) 394 { 395 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 396 "num_bdqs [%d]\n", num_bdqs); 397 398 qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len); 399 qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1)); 400 } 401 402 static int qedi_process_nopin_mesg(struct qedi_ctx *qedi, 403 union iscsi_cqe *cqe, 404 struct iscsi_task *task, 405 struct qedi_conn *qedi_conn, u16 que_idx) 406 { 407 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 408 struct iscsi_session *session = conn->session; 409 struct iscsi_nop_in_hdr *cqe_nop_in; 410 struct iscsi_nopin *hdr; 411 struct qedi_cmd *cmd; 412 int tgt_async_nop = 0; 413 u32 lun[2]; 414 u32 pdu_len, num_bdqs; 415 char bdq_data[QEDI_BDQ_BUF_SIZE]; 416 unsigned long flags; 417 418 spin_lock_bh(&session->back_lock); 419 cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in; 420 421 pdu_len = cqe_nop_in->hdr_second_dword & 422 ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK; 423 num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE; 424 425 hdr = (struct iscsi_nopin *)&qedi_conn->gen_pdu.resp_hdr; 426 memset(hdr, 0, sizeof(struct iscsi_hdr)); 427 hdr->opcode = cqe_nop_in->opcode; 428 hdr->max_cmdsn = cpu_to_be32(cqe_nop_in->max_cmd_sn); 429 hdr->exp_cmdsn = cpu_to_be32(cqe_nop_in->exp_cmd_sn); 430 hdr->statsn = cpu_to_be32(cqe_nop_in->stat_sn); 431 hdr->ttt = cpu_to_be32(cqe_nop_in->ttt); 432 433 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { 434 spin_lock_irqsave(&qedi->hba_lock, flags); 435 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, 436 pdu_len, num_bdqs, bdq_data); 437 hdr->itt = RESERVED_ITT; 438 tgt_async_nop = 1; 439 spin_unlock_irqrestore(&qedi->hba_lock, flags); 440 goto done; 441 } 442 443 /* Response to one of our nop-outs */ 444 if (task) { 445 cmd = task->dd_data; 446 hdr->flags = ISCSI_FLAG_CMD_FINAL; 447 hdr->itt = build_itt(cqe->cqe_solicited.itid, 448 conn->session->age); 449 lun[0] = 0xffffffff; 450 lun[1] = 0xffffffff; 451 memcpy(&hdr->lun, lun, sizeof(struct scsi_lun)); 452 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, 453 "Freeing tid=0x%x for cid=0x%x\n", 454 cmd->task_id, qedi_conn->iscsi_conn_id); 455 cmd->state = RESPONSE_RECEIVED; 456 spin_lock(&qedi_conn->list_lock); 457 if (likely(cmd->io_cmd_in_list)) { 458 cmd->io_cmd_in_list = false; 459 list_del_init(&cmd->io_cmd); 460 qedi_conn->active_cmd_count--; 461 } 462 463 spin_unlock(&qedi_conn->list_lock); 464 qedi_clear_task_idx(qedi, cmd->task_id); 465 } 466 467 done: 468 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, bdq_data, pdu_len); 469 470 spin_unlock_bh(&session->back_lock); 471 return tgt_async_nop; 472 } 473 474 static void qedi_process_async_mesg(struct qedi_ctx *qedi, 475 union iscsi_cqe *cqe, 476 struct iscsi_task *task, 477 struct qedi_conn *qedi_conn, 478 u16 que_idx) 479 { 480 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 481 struct iscsi_session *session = conn->session; 482 struct iscsi_async_msg_hdr *cqe_async_msg; 483 struct iscsi_async *resp_hdr; 484 u32 lun[2]; 485 u32 pdu_len, num_bdqs; 486 char bdq_data[QEDI_BDQ_BUF_SIZE]; 487 unsigned long flags; 488 489 spin_lock_bh(&session->back_lock); 490 491 cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg; 492 pdu_len = cqe_async_msg->hdr_second_dword & 493 ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK; 494 num_bdqs = pdu_len / QEDI_BDQ_BUF_SIZE; 495 496 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { 497 spin_lock_irqsave(&qedi->hba_lock, flags); 498 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, 499 pdu_len, num_bdqs, bdq_data); 500 spin_unlock_irqrestore(&qedi->hba_lock, flags); 501 } 502 503 resp_hdr = (struct iscsi_async *)&qedi_conn->gen_pdu.resp_hdr; 504 memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); 505 resp_hdr->opcode = cqe_async_msg->opcode; 506 resp_hdr->flags = 0x80; 507 508 lun[0] = cpu_to_be32(cqe_async_msg->lun.lo); 509 lun[1] = cpu_to_be32(cqe_async_msg->lun.hi); 510 memcpy(&resp_hdr->lun, lun, sizeof(struct scsi_lun)); 511 resp_hdr->exp_cmdsn = cpu_to_be32(cqe_async_msg->exp_cmd_sn); 512 resp_hdr->max_cmdsn = cpu_to_be32(cqe_async_msg->max_cmd_sn); 513 resp_hdr->statsn = cpu_to_be32(cqe_async_msg->stat_sn); 514 515 resp_hdr->async_event = cqe_async_msg->async_event; 516 resp_hdr->async_vcode = cqe_async_msg->async_vcode; 517 518 resp_hdr->param1 = cpu_to_be16(cqe_async_msg->param1_rsrv); 519 resp_hdr->param2 = cpu_to_be16(cqe_async_msg->param2_rsrv); 520 resp_hdr->param3 = cpu_to_be16(cqe_async_msg->param3_rsrv); 521 522 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, bdq_data, 523 pdu_len); 524 525 spin_unlock_bh(&session->back_lock); 526 } 527 528 static void qedi_process_reject_mesg(struct qedi_ctx *qedi, 529 union iscsi_cqe *cqe, 530 struct iscsi_task *task, 531 struct qedi_conn *qedi_conn, 532 uint16_t que_idx) 533 { 534 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 535 struct iscsi_session *session = conn->session; 536 struct iscsi_reject_hdr *cqe_reject; 537 struct iscsi_reject *hdr; 538 u32 pld_len, num_bdqs; 539 unsigned long flags; 540 541 spin_lock_bh(&session->back_lock); 542 cqe_reject = &cqe->cqe_common.iscsi_hdr.reject; 543 pld_len = cqe_reject->hdr_second_dword & 544 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK; 545 num_bdqs = pld_len / QEDI_BDQ_BUF_SIZE; 546 547 if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) { 548 spin_lock_irqsave(&qedi->hba_lock, flags); 549 qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited, 550 pld_len, num_bdqs, conn->data); 551 spin_unlock_irqrestore(&qedi->hba_lock, flags); 552 } 553 hdr = (struct iscsi_reject *)&qedi_conn->gen_pdu.resp_hdr; 554 memset(hdr, 0, sizeof(struct iscsi_hdr)); 555 hdr->opcode = cqe_reject->opcode; 556 hdr->reason = cqe_reject->hdr_reason; 557 hdr->flags = cqe_reject->hdr_flags; 558 hton24(hdr->dlength, (cqe_reject->hdr_second_dword & 559 ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK)); 560 hdr->max_cmdsn = cpu_to_be32(cqe_reject->max_cmd_sn); 561 hdr->exp_cmdsn = cpu_to_be32(cqe_reject->exp_cmd_sn); 562 hdr->statsn = cpu_to_be32(cqe_reject->stat_sn); 563 hdr->ffffffff = cpu_to_be32(0xffffffff); 564 565 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, 566 conn->data, pld_len); 567 spin_unlock_bh(&session->back_lock); 568 } 569 570 static void qedi_scsi_completion(struct qedi_ctx *qedi, 571 union iscsi_cqe *cqe, 572 struct iscsi_task *task, 573 struct iscsi_conn *conn) 574 { 575 struct scsi_cmnd *sc_cmd; 576 struct qedi_cmd *cmd = task->dd_data; 577 struct iscsi_session *session = conn->session; 578 struct iscsi_scsi_rsp *hdr; 579 struct iscsi_data_in_hdr *cqe_data_in; 580 int datalen = 0; 581 struct qedi_conn *qedi_conn; 582 u32 iscsi_cid; 583 u8 cqe_err_bits = 0; 584 585 iscsi_cid = cqe->cqe_common.conn_id; 586 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; 587 588 cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in; 589 cqe_err_bits = 590 cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits; 591 592 spin_lock_bh(&session->back_lock); 593 /* get the scsi command */ 594 sc_cmd = cmd->scsi_cmd; 595 596 if (!sc_cmd) { 597 QEDI_WARN(&qedi->dbg_ctx, "sc_cmd is NULL!\n"); 598 goto error; 599 } 600 601 if (!sc_cmd->SCp.ptr) { 602 QEDI_WARN(&qedi->dbg_ctx, 603 "SCp.ptr is NULL, returned in another context.\n"); 604 goto error; 605 } 606 607 if (!sc_cmd->request) { 608 QEDI_WARN(&qedi->dbg_ctx, 609 "sc_cmd->request is NULL, sc_cmd=%p.\n", 610 sc_cmd); 611 goto error; 612 } 613 614 if (!sc_cmd->request->q) { 615 QEDI_WARN(&qedi->dbg_ctx, 616 "request->q is NULL so request is not valid, sc_cmd=%p.\n", 617 sc_cmd); 618 goto error; 619 } 620 621 qedi_iscsi_unmap_sg_list(cmd); 622 623 hdr = (struct iscsi_scsi_rsp *)task->hdr; 624 hdr->opcode = cqe_data_in->opcode; 625 hdr->max_cmdsn = cpu_to_be32(cqe_data_in->max_cmd_sn); 626 hdr->exp_cmdsn = cpu_to_be32(cqe_data_in->exp_cmd_sn); 627 hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age); 628 hdr->response = cqe_data_in->reserved1; 629 hdr->cmd_status = cqe_data_in->status_rsvd; 630 hdr->flags = cqe_data_in->flags; 631 hdr->residual_count = cpu_to_be32(cqe_data_in->residual_count); 632 633 if (hdr->cmd_status == SAM_STAT_CHECK_CONDITION) { 634 datalen = cqe_data_in->reserved2 & 635 ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK; 636 memcpy((char *)conn->data, (char *)cmd->sense_buffer, datalen); 637 } 638 639 /* If f/w reports data underrun err then set residual to IO transfer 640 * length, set Underrun flag and clear Overrun flag explicitly 641 */ 642 if (unlikely(cqe_err_bits && 643 GET_FIELD(cqe_err_bits, CQE_ERROR_BITMAP_UNDER_RUN_ERR))) { 644 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 645 "Under flow itt=0x%x proto flags=0x%x tid=0x%x cid 0x%x fw resid 0x%x sc dlen 0x%x\n", 646 hdr->itt, cqe_data_in->flags, cmd->task_id, 647 qedi_conn->iscsi_conn_id, hdr->residual_count, 648 scsi_bufflen(sc_cmd)); 649 hdr->residual_count = cpu_to_be32(scsi_bufflen(sc_cmd)); 650 hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW; 651 hdr->flags &= (~ISCSI_FLAG_CMD_OVERFLOW); 652 } 653 654 spin_lock(&qedi_conn->list_lock); 655 if (likely(cmd->io_cmd_in_list)) { 656 cmd->io_cmd_in_list = false; 657 list_del_init(&cmd->io_cmd); 658 qedi_conn->active_cmd_count--; 659 } 660 spin_unlock(&qedi_conn->list_lock); 661 662 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, 663 "Freeing tid=0x%x for cid=0x%x\n", 664 cmd->task_id, qedi_conn->iscsi_conn_id); 665 cmd->state = RESPONSE_RECEIVED; 666 if (qedi_io_tracing) 667 qedi_trace_io(qedi, task, cmd->task_id, QEDI_IO_TRACE_RSP); 668 669 qedi_clear_task_idx(qedi, cmd->task_id); 670 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, 671 conn->data, datalen); 672 error: 673 spin_unlock_bh(&session->back_lock); 674 } 675 676 static void qedi_mtask_completion(struct qedi_ctx *qedi, 677 union iscsi_cqe *cqe, 678 struct iscsi_task *task, 679 struct qedi_conn *conn, uint16_t que_idx) 680 { 681 struct iscsi_conn *iscsi_conn; 682 u32 hdr_opcode; 683 684 hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte; 685 iscsi_conn = conn->cls_conn->dd_data; 686 687 switch (hdr_opcode) { 688 case ISCSI_OPCODE_SCSI_RESPONSE: 689 case ISCSI_OPCODE_DATA_IN: 690 qedi_scsi_completion(qedi, cqe, task, iscsi_conn); 691 break; 692 case ISCSI_OPCODE_LOGIN_RESPONSE: 693 qedi_process_login_resp(qedi, cqe, task, conn); 694 break; 695 case ISCSI_OPCODE_TMF_RESPONSE: 696 qedi_process_tmf_resp(qedi, cqe, task, conn); 697 break; 698 case ISCSI_OPCODE_TEXT_RESPONSE: 699 qedi_process_text_resp(qedi, cqe, task, conn); 700 break; 701 case ISCSI_OPCODE_LOGOUT_RESPONSE: 702 qedi_process_logout_resp(qedi, cqe, task, conn); 703 break; 704 case ISCSI_OPCODE_NOP_IN: 705 qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx); 706 break; 707 default: 708 QEDI_ERR(&qedi->dbg_ctx, "unknown opcode\n"); 709 } 710 } 711 712 static void qedi_process_nopin_local_cmpl(struct qedi_ctx *qedi, 713 struct iscsi_cqe_solicited *cqe, 714 struct iscsi_task *task, 715 struct qedi_conn *qedi_conn) 716 { 717 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 718 struct iscsi_session *session = conn->session; 719 struct qedi_cmd *cmd = task->dd_data; 720 721 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UNSOL, 722 "itid=0x%x, cmd task id=0x%x\n", 723 cqe->itid, cmd->task_id); 724 725 cmd->state = RESPONSE_RECEIVED; 726 qedi_clear_task_idx(qedi, cmd->task_id); 727 728 spin_lock_bh(&session->back_lock); 729 __iscsi_put_task(task); 730 spin_unlock_bh(&session->back_lock); 731 } 732 733 static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi, 734 struct iscsi_cqe_solicited *cqe, 735 struct iscsi_task *task, 736 struct iscsi_conn *conn) 737 { 738 struct qedi_work_map *work, *work_tmp; 739 u32 proto_itt = cqe->itid; 740 u32 ptmp_itt = 0; 741 itt_t protoitt = 0; 742 int found = 0; 743 struct qedi_cmd *qedi_cmd = NULL; 744 u32 rtid = 0; 745 u32 iscsi_cid; 746 struct qedi_conn *qedi_conn; 747 struct qedi_cmd *dbg_cmd; 748 struct iscsi_task *mtask; 749 struct iscsi_tm *tmf_hdr = NULL; 750 751 iscsi_cid = cqe->conn_id; 752 qedi_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; 753 if (!qedi_conn) { 754 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 755 "icid not found 0x%x\n", cqe->conn_id); 756 return; 757 } 758 759 /* Based on this itt get the corresponding qedi_cmd */ 760 spin_lock_bh(&qedi_conn->tmf_work_lock); 761 list_for_each_entry_safe(work, work_tmp, &qedi_conn->tmf_work_list, 762 list) { 763 if (work->rtid == proto_itt) { 764 /* We found the command */ 765 qedi_cmd = work->qedi_cmd; 766 if (!qedi_cmd->list_tmf_work) { 767 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 768 "TMF work not found, cqe->tid=0x%x, cid=0x%x\n", 769 proto_itt, qedi_conn->iscsi_conn_id); 770 WARN_ON(1); 771 } 772 found = 1; 773 mtask = qedi_cmd->task; 774 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 775 rtid = work->rtid; 776 777 list_del_init(&work->list); 778 kfree(work); 779 qedi_cmd->list_tmf_work = NULL; 780 } 781 } 782 spin_unlock_bh(&qedi_conn->tmf_work_lock); 783 784 if (found) { 785 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 786 "TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n", 787 proto_itt, tmf_hdr->flags, qedi_conn->iscsi_conn_id); 788 789 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 790 ISCSI_TM_FUNC_ABORT_TASK) { 791 spin_lock_bh(&conn->session->back_lock); 792 793 protoitt = build_itt(get_itt(tmf_hdr->rtt), 794 conn->session->age); 795 task = iscsi_itt_to_task(conn, protoitt); 796 797 spin_unlock_bh(&conn->session->back_lock); 798 799 if (!task) { 800 QEDI_NOTICE(&qedi->dbg_ctx, 801 "IO task completed, tmf rtt=0x%x, cid=0x%x\n", 802 get_itt(tmf_hdr->rtt), 803 qedi_conn->iscsi_conn_id); 804 return; 805 } 806 807 dbg_cmd = task->dd_data; 808 809 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 810 "Abort tmf rtt=0x%x, i/o itt=0x%x, i/o tid=0x%x, cid=0x%x\n", 811 get_itt(tmf_hdr->rtt), get_itt(task->itt), 812 dbg_cmd->task_id, qedi_conn->iscsi_conn_id); 813 814 if (qedi_cmd->state == CLEANUP_WAIT_FAILED) 815 qedi_cmd->state = CLEANUP_RECV; 816 817 qedi_clear_task_idx(qedi_conn->qedi, rtid); 818 819 spin_lock(&qedi_conn->list_lock); 820 list_del_init(&dbg_cmd->io_cmd); 821 qedi_conn->active_cmd_count--; 822 spin_unlock(&qedi_conn->list_lock); 823 qedi_cmd->state = CLEANUP_RECV; 824 wake_up_interruptible(&qedi_conn->wait_queue); 825 } 826 } else if (qedi_conn->cmd_cleanup_req > 0) { 827 spin_lock_bh(&conn->session->back_lock); 828 qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt); 829 protoitt = build_itt(ptmp_itt, conn->session->age); 830 task = iscsi_itt_to_task(conn, protoitt); 831 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 832 "cleanup io itid=0x%x, protoitt=0x%x, cmd_cleanup_cmpl=%d, cid=0x%x\n", 833 cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl, 834 qedi_conn->iscsi_conn_id); 835 836 spin_unlock_bh(&conn->session->back_lock); 837 if (!task) { 838 QEDI_NOTICE(&qedi->dbg_ctx, 839 "task is null, itid=0x%x, cid=0x%x\n", 840 cqe->itid, qedi_conn->iscsi_conn_id); 841 return; 842 } 843 qedi_conn->cmd_cleanup_cmpl++; 844 wake_up(&qedi_conn->wait_queue); 845 846 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID, 847 "Freeing tid=0x%x for cid=0x%x\n", 848 cqe->itid, qedi_conn->iscsi_conn_id); 849 qedi_clear_task_idx(qedi_conn->qedi, cqe->itid); 850 851 } else { 852 qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt); 853 protoitt = build_itt(ptmp_itt, conn->session->age); 854 task = iscsi_itt_to_task(conn, protoitt); 855 QEDI_ERR(&qedi->dbg_ctx, 856 "Delayed or untracked cleanup response, itt=0x%x, tid=0x%x, cid=0x%x, task=%p\n", 857 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task); 858 } 859 } 860 861 void qedi_fp_process_cqes(struct qedi_work *work) 862 { 863 struct qedi_ctx *qedi = work->qedi; 864 union iscsi_cqe *cqe = &work->cqe; 865 struct iscsi_task *task = NULL; 866 struct iscsi_nopout *nopout_hdr; 867 struct qedi_conn *q_conn; 868 struct iscsi_conn *conn; 869 struct qedi_cmd *qedi_cmd; 870 u32 comp_type; 871 u32 iscsi_cid; 872 u32 hdr_opcode; 873 u16 que_idx = work->que_idx; 874 u8 cqe_err_bits = 0; 875 876 comp_type = cqe->cqe_common.cqe_type; 877 hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte; 878 cqe_err_bits = 879 cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits; 880 881 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, 882 "fw_cid=0x%x, cqe type=0x%x, opcode=0x%x\n", 883 cqe->cqe_common.conn_id, comp_type, hdr_opcode); 884 885 if (comp_type >= MAX_ISCSI_CQES_TYPE) { 886 QEDI_WARN(&qedi->dbg_ctx, "Invalid CqE type\n"); 887 return; 888 } 889 890 iscsi_cid = cqe->cqe_common.conn_id; 891 q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid]; 892 if (!q_conn) { 893 QEDI_WARN(&qedi->dbg_ctx, 894 "Session no longer exists for cid=0x%x!!\n", 895 iscsi_cid); 896 return; 897 } 898 899 conn = q_conn->cls_conn->dd_data; 900 901 if (unlikely(cqe_err_bits && 902 GET_FIELD(cqe_err_bits, 903 CQE_ERROR_BITMAP_DATA_DIGEST_ERR))) { 904 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST); 905 return; 906 } 907 908 switch (comp_type) { 909 case ISCSI_CQE_TYPE_SOLICITED: 910 case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE: 911 qedi_cmd = container_of(work, struct qedi_cmd, cqe_work); 912 task = qedi_cmd->task; 913 if (!task) { 914 QEDI_WARN(&qedi->dbg_ctx, "task is NULL\n"); 915 return; 916 } 917 918 /* Process NOPIN local completion */ 919 nopout_hdr = (struct iscsi_nopout *)task->hdr; 920 if ((nopout_hdr->itt == RESERVED_ITT) && 921 (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) { 922 qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited, 923 task, q_conn); 924 } else { 925 cqe->cqe_solicited.itid = 926 qedi_get_itt(cqe->cqe_solicited); 927 /* Process other solicited responses */ 928 qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx); 929 } 930 break; 931 case ISCSI_CQE_TYPE_UNSOLICITED: 932 switch (hdr_opcode) { 933 case ISCSI_OPCODE_NOP_IN: 934 qedi_process_nopin_mesg(qedi, cqe, task, q_conn, 935 que_idx); 936 break; 937 case ISCSI_OPCODE_ASYNC_MSG: 938 qedi_process_async_mesg(qedi, cqe, task, q_conn, 939 que_idx); 940 break; 941 case ISCSI_OPCODE_REJECT: 942 qedi_process_reject_mesg(qedi, cqe, task, q_conn, 943 que_idx); 944 break; 945 } 946 goto exit_fp_process; 947 case ISCSI_CQE_TYPE_DUMMY: 948 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "Dummy CqE\n"); 949 goto exit_fp_process; 950 case ISCSI_CQE_TYPE_TASK_CLEANUP: 951 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, "CleanUp CqE\n"); 952 qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task, 953 conn); 954 goto exit_fp_process; 955 default: 956 QEDI_ERR(&qedi->dbg_ctx, "Error cqe.\n"); 957 break; 958 } 959 960 exit_fp_process: 961 return; 962 } 963 964 static void qedi_ring_doorbell(struct qedi_conn *qedi_conn) 965 { 966 struct iscsi_db_data dbell = { 0 }; 967 968 dbell.agg_flags = 0; 969 970 dbell.params |= DB_DEST_XCM << ISCSI_DB_DATA_DEST_SHIFT; 971 dbell.params |= DB_AGG_CMD_SET << ISCSI_DB_DATA_AGG_CMD_SHIFT; 972 dbell.params |= 973 DQ_XCM_ISCSI_SQ_PROD_CMD << ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT; 974 975 dbell.sq_prod = qedi_conn->ep->fw_sq_prod_idx; 976 writel(*(u32 *)&dbell, qedi_conn->ep->p_doorbell); 977 978 /* Make sure fw write idx is coherent, and include both memory barriers 979 * as a failsafe as for some architectures the call is the same but on 980 * others they are two different assembly operations. 981 */ 982 wmb(); 983 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_MP_REQ, 984 "prod_idx=0x%x, fw_prod_idx=0x%x, cid=0x%x\n", 985 qedi_conn->ep->sq_prod_idx, qedi_conn->ep->fw_sq_prod_idx, 986 qedi_conn->iscsi_conn_id); 987 } 988 989 static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn) 990 { 991 struct qedi_endpoint *ep; 992 u16 rval; 993 994 ep = qedi_conn->ep; 995 rval = ep->sq_prod_idx; 996 997 /* Increament SQ index */ 998 ep->sq_prod_idx++; 999 ep->fw_sq_prod_idx++; 1000 if (ep->sq_prod_idx == QEDI_SQ_SIZE) 1001 ep->sq_prod_idx = 0; 1002 1003 return rval; 1004 } 1005 1006 int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, 1007 struct iscsi_task *task) 1008 { 1009 struct iscsi_login_req_hdr login_req_pdu_header; 1010 struct scsi_sgl_task_params tx_sgl_task_params; 1011 struct scsi_sgl_task_params rx_sgl_task_params; 1012 struct iscsi_task_params task_params; 1013 struct e4_iscsi_task_context *fw_task_ctx; 1014 struct qedi_ctx *qedi = qedi_conn->qedi; 1015 struct iscsi_login_req *login_hdr; 1016 struct scsi_sge *resp_sge = NULL; 1017 struct qedi_cmd *qedi_cmd; 1018 struct qedi_endpoint *ep; 1019 s16 tid = 0; 1020 u16 sq_idx = 0; 1021 int rval = 0; 1022 1023 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1024 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1025 ep = qedi_conn->ep; 1026 login_hdr = (struct iscsi_login_req *)task->hdr; 1027 1028 tid = qedi_get_task_idx(qedi); 1029 if (tid == -1) 1030 return -ENOMEM; 1031 1032 fw_task_ctx = 1033 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, 1034 tid); 1035 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); 1036 1037 qedi_cmd->task_id = tid; 1038 1039 memset(&task_params, 0, sizeof(task_params)); 1040 memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header)); 1041 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); 1042 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); 1043 /* Update header info */ 1044 login_req_pdu_header.opcode = login_hdr->opcode; 1045 login_req_pdu_header.version_min = login_hdr->min_version; 1046 login_req_pdu_header.version_max = login_hdr->max_version; 1047 login_req_pdu_header.flags_attr = login_hdr->flags; 1048 login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid); 1049 login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]); 1050 1051 login_req_pdu_header.tsih = login_hdr->tsih; 1052 login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength); 1053 1054 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); 1055 login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); 1056 login_req_pdu_header.cid = qedi_conn->iscsi_conn_id; 1057 login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn); 1058 login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); 1059 login_req_pdu_header.exp_stat_sn = 0; 1060 1061 /* Fill tx AHS and rx buffer */ 1062 tx_sgl_task_params.sgl = 1063 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; 1064 tx_sgl_task_params.sgl_phys_addr.lo = 1065 (u32)(qedi_conn->gen_pdu.req_dma_addr); 1066 tx_sgl_task_params.sgl_phys_addr.hi = 1067 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); 1068 tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength); 1069 tx_sgl_task_params.num_sges = 1; 1070 1071 rx_sgl_task_params.sgl = 1072 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1073 rx_sgl_task_params.sgl_phys_addr.lo = 1074 (u32)(qedi_conn->gen_pdu.resp_dma_addr); 1075 rx_sgl_task_params.sgl_phys_addr.hi = 1076 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); 1077 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; 1078 rx_sgl_task_params.num_sges = 1; 1079 1080 /* Fill fw input params */ 1081 task_params.context = fw_task_ctx; 1082 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; 1083 task_params.itid = tid; 1084 task_params.cq_rss_number = 0; 1085 task_params.tx_io_size = ntoh24(login_hdr->dlength); 1086 task_params.rx_io_size = resp_sge->sge_len; 1087 1088 sq_idx = qedi_get_wqe_idx(qedi_conn); 1089 task_params.sqe = &ep->sq[sq_idx]; 1090 1091 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); 1092 rval = init_initiator_login_request_task(&task_params, 1093 &login_req_pdu_header, 1094 &tx_sgl_task_params, 1095 &rx_sgl_task_params); 1096 if (rval) 1097 return -1; 1098 1099 spin_lock(&qedi_conn->list_lock); 1100 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1101 qedi_cmd->io_cmd_in_list = true; 1102 qedi_conn->active_cmd_count++; 1103 spin_unlock(&qedi_conn->list_lock); 1104 1105 qedi_ring_doorbell(qedi_conn); 1106 return 0; 1107 } 1108 1109 int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, 1110 struct iscsi_task *task) 1111 { 1112 struct iscsi_logout_req_hdr logout_pdu_header; 1113 struct scsi_sgl_task_params tx_sgl_task_params; 1114 struct scsi_sgl_task_params rx_sgl_task_params; 1115 struct iscsi_task_params task_params; 1116 struct e4_iscsi_task_context *fw_task_ctx; 1117 struct iscsi_logout *logout_hdr = NULL; 1118 struct qedi_ctx *qedi = qedi_conn->qedi; 1119 struct qedi_cmd *qedi_cmd; 1120 struct qedi_endpoint *ep; 1121 s16 tid = 0; 1122 u16 sq_idx = 0; 1123 int rval = 0; 1124 1125 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1126 logout_hdr = (struct iscsi_logout *)task->hdr; 1127 ep = qedi_conn->ep; 1128 1129 tid = qedi_get_task_idx(qedi); 1130 if (tid == -1) 1131 return -ENOMEM; 1132 1133 fw_task_ctx = 1134 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, 1135 tid); 1136 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); 1137 1138 qedi_cmd->task_id = tid; 1139 1140 memset(&task_params, 0, sizeof(task_params)); 1141 memset(&logout_pdu_header, 0, sizeof(logout_pdu_header)); 1142 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); 1143 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); 1144 1145 /* Update header info */ 1146 logout_pdu_header.opcode = logout_hdr->opcode; 1147 logout_pdu_header.reason_code = 0x80 | logout_hdr->flags; 1148 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); 1149 logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); 1150 logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn); 1151 logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn); 1152 logout_pdu_header.cid = qedi_conn->iscsi_conn_id; 1153 1154 /* Fill fw input params */ 1155 task_params.context = fw_task_ctx; 1156 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; 1157 task_params.itid = tid; 1158 task_params.cq_rss_number = 0; 1159 task_params.tx_io_size = 0; 1160 task_params.rx_io_size = 0; 1161 1162 sq_idx = qedi_get_wqe_idx(qedi_conn); 1163 task_params.sqe = &ep->sq[sq_idx]; 1164 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); 1165 1166 rval = init_initiator_logout_request_task(&task_params, 1167 &logout_pdu_header, 1168 NULL, NULL); 1169 if (rval) 1170 return -1; 1171 1172 spin_lock(&qedi_conn->list_lock); 1173 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1174 qedi_cmd->io_cmd_in_list = true; 1175 qedi_conn->active_cmd_count++; 1176 spin_unlock(&qedi_conn->list_lock); 1177 1178 qedi_ring_doorbell(qedi_conn); 1179 return 0; 1180 } 1181 1182 int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, 1183 struct iscsi_task *task, bool in_recovery) 1184 { 1185 int rval; 1186 struct iscsi_task *ctask; 1187 struct qedi_cmd *cmd, *cmd_tmp; 1188 struct iscsi_tm *tmf_hdr; 1189 unsigned int lun = 0; 1190 bool lun_reset = false; 1191 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 1192 struct iscsi_session *session = conn->session; 1193 1194 /* From recovery, task is NULL or from tmf resp valid task */ 1195 if (task) { 1196 tmf_hdr = (struct iscsi_tm *)task->hdr; 1197 1198 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 1199 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) { 1200 lun_reset = true; 1201 lun = scsilun_to_int(&tmf_hdr->lun); 1202 } 1203 } 1204 1205 qedi_conn->cmd_cleanup_req = 0; 1206 qedi_conn->cmd_cleanup_cmpl = 0; 1207 1208 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 1209 "active_cmd_count=%d, cid=0x%x, in_recovery=%d, lun_reset=%d\n", 1210 qedi_conn->active_cmd_count, qedi_conn->iscsi_conn_id, 1211 in_recovery, lun_reset); 1212 1213 if (lun_reset) 1214 spin_lock_bh(&session->back_lock); 1215 1216 spin_lock(&qedi_conn->list_lock); 1217 1218 list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list, 1219 io_cmd) { 1220 ctask = cmd->task; 1221 if (ctask == task) 1222 continue; 1223 1224 if (lun_reset) { 1225 if (cmd->scsi_cmd && cmd->scsi_cmd->device) { 1226 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 1227 "tid=0x%x itt=0x%x scsi_cmd_ptr=%p device=%p task_state=%d cmd_state=0%x cid=0x%x\n", 1228 cmd->task_id, get_itt(ctask->itt), 1229 cmd->scsi_cmd, cmd->scsi_cmd->device, 1230 ctask->state, cmd->state, 1231 qedi_conn->iscsi_conn_id); 1232 if (cmd->scsi_cmd->device->lun != lun) 1233 continue; 1234 } 1235 } 1236 qedi_conn->cmd_cleanup_req++; 1237 qedi_iscsi_cleanup_task(ctask, true); 1238 1239 list_del_init(&cmd->io_cmd); 1240 qedi_conn->active_cmd_count--; 1241 QEDI_WARN(&qedi->dbg_ctx, 1242 "Deleted active cmd list node io_cmd=%p, cid=0x%x\n", 1243 &cmd->io_cmd, qedi_conn->iscsi_conn_id); 1244 } 1245 1246 spin_unlock(&qedi_conn->list_lock); 1247 1248 if (lun_reset) 1249 spin_unlock_bh(&session->back_lock); 1250 1251 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 1252 "cmd_cleanup_req=%d, cid=0x%x\n", 1253 qedi_conn->cmd_cleanup_req, 1254 qedi_conn->iscsi_conn_id); 1255 1256 rval = wait_event_interruptible_timeout(qedi_conn->wait_queue, 1257 ((qedi_conn->cmd_cleanup_req == 1258 qedi_conn->cmd_cleanup_cmpl) || 1259 qedi_conn->ep), 1260 5 * HZ); 1261 if (rval) { 1262 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 1263 "i/o cmd_cleanup_req=%d, equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", 1264 qedi_conn->cmd_cleanup_req, 1265 qedi_conn->cmd_cleanup_cmpl, 1266 qedi_conn->iscsi_conn_id); 1267 1268 return 0; 1269 } 1270 1271 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 1272 "i/o cmd_cleanup_req=%d, not equal to cmd_cleanup_cmpl=%d, cid=0x%x\n", 1273 qedi_conn->cmd_cleanup_req, 1274 qedi_conn->cmd_cleanup_cmpl, 1275 qedi_conn->iscsi_conn_id); 1276 1277 iscsi_host_for_each_session(qedi->shost, 1278 qedi_mark_device_missing); 1279 qedi_ops->common->drain(qedi->cdev); 1280 1281 /* Enable IOs for all other sessions except current.*/ 1282 if (!wait_event_interruptible_timeout(qedi_conn->wait_queue, 1283 (qedi_conn->cmd_cleanup_req == 1284 qedi_conn->cmd_cleanup_cmpl), 1285 5 * HZ)) { 1286 iscsi_host_for_each_session(qedi->shost, 1287 qedi_mark_device_available); 1288 return -1; 1289 } 1290 1291 iscsi_host_for_each_session(qedi->shost, 1292 qedi_mark_device_available); 1293 1294 return 0; 1295 } 1296 1297 void qedi_clearsq(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn, 1298 struct iscsi_task *task) 1299 { 1300 struct qedi_endpoint *qedi_ep; 1301 int rval; 1302 1303 qedi_ep = qedi_conn->ep; 1304 qedi_conn->cmd_cleanup_req = 0; 1305 qedi_conn->cmd_cleanup_cmpl = 0; 1306 1307 if (!qedi_ep) { 1308 QEDI_WARN(&qedi->dbg_ctx, 1309 "Cannot proceed, ep already disconnected, cid=0x%x\n", 1310 qedi_conn->iscsi_conn_id); 1311 return; 1312 } 1313 1314 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1315 "Clearing SQ for cid=0x%x, conn=%p, ep=%p\n", 1316 qedi_conn->iscsi_conn_id, qedi_conn, qedi_ep); 1317 1318 qedi_ops->clear_sq(qedi->cdev, qedi_ep->handle); 1319 1320 rval = qedi_cleanup_all_io(qedi, qedi_conn, task, true); 1321 if (rval) { 1322 QEDI_ERR(&qedi->dbg_ctx, 1323 "fatal error, need hard reset, cid=0x%x\n", 1324 qedi_conn->iscsi_conn_id); 1325 WARN_ON(1); 1326 } 1327 } 1328 1329 static int qedi_wait_for_cleanup_request(struct qedi_ctx *qedi, 1330 struct qedi_conn *qedi_conn, 1331 struct iscsi_task *task, 1332 struct qedi_cmd *qedi_cmd, 1333 struct qedi_work_map *list_work) 1334 { 1335 struct qedi_cmd *cmd = (struct qedi_cmd *)task->dd_data; 1336 int wait; 1337 1338 wait = wait_event_interruptible_timeout(qedi_conn->wait_queue, 1339 ((qedi_cmd->state == 1340 CLEANUP_RECV) || 1341 ((qedi_cmd->type == TYPEIO) && 1342 (cmd->state == 1343 RESPONSE_RECEIVED))), 1344 5 * HZ); 1345 if (!wait) { 1346 qedi_cmd->state = CLEANUP_WAIT_FAILED; 1347 1348 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 1349 "Cleanup timedout tid=0x%x, issue connection recovery, cid=0x%x\n", 1350 cmd->task_id, qedi_conn->iscsi_conn_id); 1351 1352 return -1; 1353 } 1354 return 0; 1355 } 1356 1357 static void qedi_tmf_work(struct work_struct *work) 1358 { 1359 struct qedi_cmd *qedi_cmd = 1360 container_of(work, struct qedi_cmd, tmf_work); 1361 struct qedi_conn *qedi_conn = qedi_cmd->conn; 1362 struct qedi_ctx *qedi = qedi_conn->qedi; 1363 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 1364 struct qedi_work_map *list_work = NULL; 1365 struct iscsi_task *mtask; 1366 struct qedi_cmd *cmd; 1367 struct iscsi_task *ctask; 1368 struct iscsi_tm *tmf_hdr; 1369 s16 rval = 0; 1370 s16 tid = 0; 1371 1372 mtask = qedi_cmd->task; 1373 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 1374 set_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); 1375 1376 ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt); 1377 if (!ctask || !ctask->sc) { 1378 QEDI_ERR(&qedi->dbg_ctx, "Task already completed\n"); 1379 goto abort_ret; 1380 } 1381 1382 cmd = (struct qedi_cmd *)ctask->dd_data; 1383 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1384 "Abort tmf rtt=0x%x, cmd itt=0x%x, cmd tid=0x%x, cid=0x%x\n", 1385 get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id, 1386 qedi_conn->iscsi_conn_id); 1387 1388 if (qedi_do_not_recover) { 1389 QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n", 1390 qedi_do_not_recover); 1391 goto abort_ret; 1392 } 1393 1394 list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC); 1395 if (!list_work) { 1396 QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n"); 1397 goto abort_ret; 1398 } 1399 1400 qedi_cmd->type = TYPEIO; 1401 list_work->qedi_cmd = qedi_cmd; 1402 list_work->rtid = cmd->task_id; 1403 list_work->state = QEDI_WORK_SCHEDULED; 1404 qedi_cmd->list_tmf_work = list_work; 1405 1406 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 1407 "Queue tmf work=%p, list node=%p, cid=0x%x, tmf flags=0x%x\n", 1408 list_work->ptr_tmf_work, list_work, qedi_conn->iscsi_conn_id, 1409 tmf_hdr->flags); 1410 1411 spin_lock_bh(&qedi_conn->tmf_work_lock); 1412 list_add_tail(&list_work->list, &qedi_conn->tmf_work_list); 1413 spin_unlock_bh(&qedi_conn->tmf_work_lock); 1414 1415 qedi_iscsi_cleanup_task(ctask, false); 1416 1417 rval = qedi_wait_for_cleanup_request(qedi, qedi_conn, ctask, qedi_cmd, 1418 list_work); 1419 if (rval == -1) { 1420 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 1421 "FW cleanup got escalated, cid=0x%x\n", 1422 qedi_conn->iscsi_conn_id); 1423 goto ldel_exit; 1424 } 1425 1426 tid = qedi_get_task_idx(qedi); 1427 if (tid == -1) { 1428 QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n", 1429 qedi_conn->iscsi_conn_id); 1430 goto ldel_exit; 1431 } 1432 1433 qedi_cmd->task_id = tid; 1434 qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task); 1435 1436 abort_ret: 1437 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); 1438 return; 1439 1440 ldel_exit: 1441 spin_lock_bh(&qedi_conn->tmf_work_lock); 1442 if (!qedi_cmd->list_tmf_work) { 1443 list_del_init(&list_work->list); 1444 qedi_cmd->list_tmf_work = NULL; 1445 kfree(list_work); 1446 } 1447 spin_unlock_bh(&qedi_conn->tmf_work_lock); 1448 1449 spin_lock(&qedi_conn->list_lock); 1450 list_del_init(&cmd->io_cmd); 1451 qedi_conn->active_cmd_count--; 1452 spin_unlock(&qedi_conn->list_lock); 1453 1454 clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags); 1455 } 1456 1457 static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, 1458 struct iscsi_task *mtask) 1459 { 1460 struct iscsi_tmf_request_hdr tmf_pdu_header; 1461 struct iscsi_task_params task_params; 1462 struct qedi_ctx *qedi = qedi_conn->qedi; 1463 struct e4_iscsi_task_context *fw_task_ctx; 1464 struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; 1465 struct iscsi_task *ctask; 1466 struct iscsi_tm *tmf_hdr; 1467 struct qedi_cmd *qedi_cmd; 1468 struct qedi_cmd *cmd; 1469 struct qedi_endpoint *ep; 1470 u32 scsi_lun[2]; 1471 s16 tid = 0; 1472 u16 sq_idx = 0; 1473 int rval = 0; 1474 1475 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 1476 qedi_cmd = (struct qedi_cmd *)mtask->dd_data; 1477 ep = qedi_conn->ep; 1478 if (!ep) 1479 return -ENODEV; 1480 1481 tid = qedi_get_task_idx(qedi); 1482 if (tid == -1) 1483 return -ENOMEM; 1484 1485 fw_task_ctx = 1486 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, 1487 tid); 1488 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); 1489 1490 qedi_cmd->task_id = tid; 1491 1492 memset(&task_params, 0, sizeof(task_params)); 1493 memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header)); 1494 1495 /* Update header info */ 1496 qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd); 1497 tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt)); 1498 tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn); 1499 1500 memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun)); 1501 tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); 1502 tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); 1503 1504 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 1505 ISCSI_TM_FUNC_ABORT_TASK) { 1506 ctask = iscsi_itt_to_task(conn, tmf_hdr->rtt); 1507 if (!ctask || !ctask->sc) { 1508 QEDI_ERR(&qedi->dbg_ctx, 1509 "Could not get reference task\n"); 1510 return 0; 1511 } 1512 cmd = (struct qedi_cmd *)ctask->dd_data; 1513 tmf_pdu_header.rtt = 1514 qedi_set_itt(cmd->task_id, 1515 get_itt(tmf_hdr->rtt)); 1516 } else { 1517 tmf_pdu_header.rtt = ISCSI_RESERVED_TAG; 1518 } 1519 1520 tmf_pdu_header.opcode = tmf_hdr->opcode; 1521 tmf_pdu_header.function = tmf_hdr->flags; 1522 tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength); 1523 tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn); 1524 1525 /* Fill fw input params */ 1526 task_params.context = fw_task_ctx; 1527 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; 1528 task_params.itid = tid; 1529 task_params.cq_rss_number = 0; 1530 task_params.tx_io_size = 0; 1531 task_params.rx_io_size = 0; 1532 1533 sq_idx = qedi_get_wqe_idx(qedi_conn); 1534 task_params.sqe = &ep->sq[sq_idx]; 1535 1536 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); 1537 rval = init_initiator_tmf_request_task(&task_params, 1538 &tmf_pdu_header); 1539 if (rval) 1540 return -1; 1541 1542 spin_lock(&qedi_conn->list_lock); 1543 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1544 qedi_cmd->io_cmd_in_list = true; 1545 qedi_conn->active_cmd_count++; 1546 spin_unlock(&qedi_conn->list_lock); 1547 1548 qedi_ring_doorbell(qedi_conn); 1549 return 0; 1550 } 1551 1552 int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn, 1553 struct iscsi_task *mtask) 1554 { 1555 struct qedi_ctx *qedi = qedi_conn->qedi; 1556 struct iscsi_tm *tmf_hdr; 1557 struct qedi_cmd *qedi_cmd = (struct qedi_cmd *)mtask->dd_data; 1558 s16 tid = 0; 1559 1560 tmf_hdr = (struct iscsi_tm *)mtask->hdr; 1561 qedi_cmd->task = mtask; 1562 1563 /* If abort task then schedule the work and return */ 1564 if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 1565 ISCSI_TM_FUNC_ABORT_TASK) { 1566 qedi_cmd->state = CLEANUP_WAIT; 1567 INIT_WORK(&qedi_cmd->tmf_work, qedi_tmf_work); 1568 queue_work(qedi->tmf_thread, &qedi_cmd->tmf_work); 1569 1570 } else if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 1571 ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) || 1572 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 1573 ISCSI_TM_FUNC_TARGET_WARM_RESET) || 1574 ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) == 1575 ISCSI_TM_FUNC_TARGET_COLD_RESET)) { 1576 tid = qedi_get_task_idx(qedi); 1577 if (tid == -1) { 1578 QEDI_ERR(&qedi->dbg_ctx, "Invalid tid, cid=0x%x\n", 1579 qedi_conn->iscsi_conn_id); 1580 return -1; 1581 } 1582 qedi_cmd->task_id = tid; 1583 1584 qedi_send_iscsi_tmf(qedi_conn, qedi_cmd->task); 1585 1586 } else { 1587 QEDI_ERR(&qedi->dbg_ctx, "Invalid tmf, cid=0x%x\n", 1588 qedi_conn->iscsi_conn_id); 1589 return -1; 1590 } 1591 1592 return 0; 1593 } 1594 1595 int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, 1596 struct iscsi_task *task) 1597 { 1598 struct iscsi_text_request_hdr text_request_pdu_header; 1599 struct scsi_sgl_task_params tx_sgl_task_params; 1600 struct scsi_sgl_task_params rx_sgl_task_params; 1601 struct iscsi_task_params task_params; 1602 struct e4_iscsi_task_context *fw_task_ctx; 1603 struct qedi_ctx *qedi = qedi_conn->qedi; 1604 struct iscsi_text *text_hdr; 1605 struct scsi_sge *req_sge = NULL; 1606 struct scsi_sge *resp_sge = NULL; 1607 struct qedi_cmd *qedi_cmd; 1608 struct qedi_endpoint *ep; 1609 s16 tid = 0; 1610 u16 sq_idx = 0; 1611 int rval = 0; 1612 1613 req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; 1614 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1615 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1616 text_hdr = (struct iscsi_text *)task->hdr; 1617 ep = qedi_conn->ep; 1618 1619 tid = qedi_get_task_idx(qedi); 1620 if (tid == -1) 1621 return -ENOMEM; 1622 1623 fw_task_ctx = 1624 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, 1625 tid); 1626 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); 1627 1628 qedi_cmd->task_id = tid; 1629 1630 memset(&task_params, 0, sizeof(task_params)); 1631 memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header)); 1632 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); 1633 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); 1634 1635 /* Update header info */ 1636 text_request_pdu_header.opcode = text_hdr->opcode; 1637 text_request_pdu_header.flags_attr = text_hdr->flags; 1638 1639 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); 1640 text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); 1641 text_request_pdu_header.ttt = text_hdr->ttt; 1642 text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn); 1643 text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn); 1644 text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength); 1645 1646 /* Fill tx AHS and rx buffer */ 1647 tx_sgl_task_params.sgl = 1648 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; 1649 tx_sgl_task_params.sgl_phys_addr.lo = 1650 (u32)(qedi_conn->gen_pdu.req_dma_addr); 1651 tx_sgl_task_params.sgl_phys_addr.hi = 1652 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); 1653 tx_sgl_task_params.total_buffer_size = req_sge->sge_len; 1654 tx_sgl_task_params.num_sges = 1; 1655 1656 rx_sgl_task_params.sgl = 1657 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1658 rx_sgl_task_params.sgl_phys_addr.lo = 1659 (u32)(qedi_conn->gen_pdu.resp_dma_addr); 1660 rx_sgl_task_params.sgl_phys_addr.hi = 1661 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); 1662 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; 1663 rx_sgl_task_params.num_sges = 1; 1664 1665 /* Fill fw input params */ 1666 task_params.context = fw_task_ctx; 1667 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; 1668 task_params.itid = tid; 1669 task_params.cq_rss_number = 0; 1670 task_params.tx_io_size = ntoh24(text_hdr->dlength); 1671 task_params.rx_io_size = resp_sge->sge_len; 1672 1673 sq_idx = qedi_get_wqe_idx(qedi_conn); 1674 task_params.sqe = &ep->sq[sq_idx]; 1675 1676 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); 1677 rval = init_initiator_text_request_task(&task_params, 1678 &text_request_pdu_header, 1679 &tx_sgl_task_params, 1680 &rx_sgl_task_params); 1681 if (rval) 1682 return -1; 1683 1684 spin_lock(&qedi_conn->list_lock); 1685 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1686 qedi_cmd->io_cmd_in_list = true; 1687 qedi_conn->active_cmd_count++; 1688 spin_unlock(&qedi_conn->list_lock); 1689 1690 qedi_ring_doorbell(qedi_conn); 1691 return 0; 1692 } 1693 1694 int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, 1695 struct iscsi_task *task, 1696 char *datap, int data_len, int unsol) 1697 { 1698 struct iscsi_nop_out_hdr nop_out_pdu_header; 1699 struct scsi_sgl_task_params tx_sgl_task_params; 1700 struct scsi_sgl_task_params rx_sgl_task_params; 1701 struct iscsi_task_params task_params; 1702 struct qedi_ctx *qedi = qedi_conn->qedi; 1703 struct e4_iscsi_task_context *fw_task_ctx; 1704 struct iscsi_nopout *nopout_hdr; 1705 struct scsi_sge *resp_sge = NULL; 1706 struct qedi_cmd *qedi_cmd; 1707 struct qedi_endpoint *ep; 1708 u32 scsi_lun[2]; 1709 s16 tid = 0; 1710 u16 sq_idx = 0; 1711 int rval = 0; 1712 1713 resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1714 qedi_cmd = (struct qedi_cmd *)task->dd_data; 1715 nopout_hdr = (struct iscsi_nopout *)task->hdr; 1716 ep = qedi_conn->ep; 1717 1718 tid = qedi_get_task_idx(qedi); 1719 if (tid == -1) 1720 return -ENOMEM; 1721 1722 fw_task_ctx = 1723 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, 1724 tid); 1725 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); 1726 1727 qedi_cmd->task_id = tid; 1728 1729 memset(&task_params, 0, sizeof(task_params)); 1730 memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header)); 1731 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); 1732 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); 1733 1734 /* Update header info */ 1735 nop_out_pdu_header.opcode = nopout_hdr->opcode; 1736 SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1); 1737 SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0); 1738 1739 memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun)); 1740 nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); 1741 nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); 1742 nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); 1743 nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn); 1744 1745 qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd); 1746 1747 if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) { 1748 nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt); 1749 nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt); 1750 } else { 1751 nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); 1752 nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES; 1753 1754 spin_lock(&qedi_conn->list_lock); 1755 list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list); 1756 qedi_cmd->io_cmd_in_list = true; 1757 qedi_conn->active_cmd_count++; 1758 spin_unlock(&qedi_conn->list_lock); 1759 } 1760 1761 /* Fill tx AHS and rx buffer */ 1762 if (data_len) { 1763 tx_sgl_task_params.sgl = 1764 (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl; 1765 tx_sgl_task_params.sgl_phys_addr.lo = 1766 (u32)(qedi_conn->gen_pdu.req_dma_addr); 1767 tx_sgl_task_params.sgl_phys_addr.hi = 1768 (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32); 1769 tx_sgl_task_params.total_buffer_size = data_len; 1770 tx_sgl_task_params.num_sges = 1; 1771 1772 rx_sgl_task_params.sgl = 1773 (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl; 1774 rx_sgl_task_params.sgl_phys_addr.lo = 1775 (u32)(qedi_conn->gen_pdu.resp_dma_addr); 1776 rx_sgl_task_params.sgl_phys_addr.hi = 1777 (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32); 1778 rx_sgl_task_params.total_buffer_size = resp_sge->sge_len; 1779 rx_sgl_task_params.num_sges = 1; 1780 } 1781 1782 /* Fill fw input params */ 1783 task_params.context = fw_task_ctx; 1784 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; 1785 task_params.itid = tid; 1786 task_params.cq_rss_number = 0; 1787 task_params.tx_io_size = data_len; 1788 task_params.rx_io_size = resp_sge->sge_len; 1789 1790 sq_idx = qedi_get_wqe_idx(qedi_conn); 1791 task_params.sqe = &ep->sq[sq_idx]; 1792 1793 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); 1794 rval = init_initiator_nop_out_task(&task_params, 1795 &nop_out_pdu_header, 1796 &tx_sgl_task_params, 1797 &rx_sgl_task_params); 1798 if (rval) 1799 return -1; 1800 1801 qedi_ring_doorbell(qedi_conn); 1802 return 0; 1803 } 1804 1805 static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len, 1806 int bd_index) 1807 { 1808 struct scsi_sge *bd = cmd->io_tbl.sge_tbl; 1809 int frag_size, sg_frags; 1810 1811 sg_frags = 0; 1812 1813 while (sg_len) { 1814 if (addr % QEDI_PAGE_SIZE) 1815 frag_size = 1816 (QEDI_PAGE_SIZE - (addr % QEDI_PAGE_SIZE)); 1817 else 1818 frag_size = (sg_len > QEDI_BD_SPLIT_SZ) ? 0 : 1819 (sg_len % QEDI_BD_SPLIT_SZ); 1820 1821 if (frag_size == 0) 1822 frag_size = QEDI_BD_SPLIT_SZ; 1823 1824 bd[bd_index + sg_frags].sge_addr.lo = (addr & 0xffffffff); 1825 bd[bd_index + sg_frags].sge_addr.hi = (addr >> 32); 1826 bd[bd_index + sg_frags].sge_len = (u16)frag_size; 1827 QEDI_INFO(&cmd->conn->qedi->dbg_ctx, QEDI_LOG_IO, 1828 "split sge %d: addr=%llx, len=%x", 1829 (bd_index + sg_frags), addr, frag_size); 1830 1831 addr += (u64)frag_size; 1832 sg_frags++; 1833 sg_len -= frag_size; 1834 } 1835 return sg_frags; 1836 } 1837 1838 static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd) 1839 { 1840 struct scsi_cmnd *sc = cmd->scsi_cmd; 1841 struct scsi_sge *bd = cmd->io_tbl.sge_tbl; 1842 struct scatterlist *sg; 1843 int byte_count = 0; 1844 int bd_count = 0; 1845 int sg_count; 1846 int sg_len; 1847 int sg_frags; 1848 u64 addr, end_addr; 1849 int i; 1850 1851 WARN_ON(scsi_sg_count(sc) > QEDI_ISCSI_MAX_BDS_PER_CMD); 1852 1853 sg_count = dma_map_sg(&qedi->pdev->dev, scsi_sglist(sc), 1854 scsi_sg_count(sc), sc->sc_data_direction); 1855 1856 /* 1857 * New condition to send single SGE as cached-SGL. 1858 * Single SGE with length less than 64K. 1859 */ 1860 sg = scsi_sglist(sc); 1861 if ((sg_count == 1) && (sg_dma_len(sg) <= MAX_SGLEN_FOR_CACHESGL)) { 1862 sg_len = sg_dma_len(sg); 1863 addr = (u64)sg_dma_address(sg); 1864 1865 bd[bd_count].sge_addr.lo = (addr & 0xffffffff); 1866 bd[bd_count].sge_addr.hi = (addr >> 32); 1867 bd[bd_count].sge_len = (u16)sg_len; 1868 1869 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, 1870 "single-cached-sgl: bd_count:%d addr=%llx, len=%x", 1871 sg_count, addr, sg_len); 1872 1873 return ++bd_count; 1874 } 1875 1876 scsi_for_each_sg(sc, sg, sg_count, i) { 1877 sg_len = sg_dma_len(sg); 1878 addr = (u64)sg_dma_address(sg); 1879 end_addr = (addr + sg_len); 1880 1881 /* 1882 * first sg elem in the 'list', 1883 * check if end addr is page-aligned. 1884 */ 1885 if ((i == 0) && (sg_count > 1) && (end_addr % QEDI_PAGE_SIZE)) 1886 cmd->use_slowpath = true; 1887 1888 /* 1889 * last sg elem in the 'list', 1890 * check if start addr is page-aligned. 1891 */ 1892 else if ((i == (sg_count - 1)) && 1893 (sg_count > 1) && (addr % QEDI_PAGE_SIZE)) 1894 cmd->use_slowpath = true; 1895 1896 /* 1897 * middle sg elements in list, 1898 * check if start and end addr is page-aligned 1899 */ 1900 else if ((i != 0) && (i != (sg_count - 1)) && 1901 ((addr % QEDI_PAGE_SIZE) || 1902 (end_addr % QEDI_PAGE_SIZE))) 1903 cmd->use_slowpath = true; 1904 1905 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "sg[%d] size=0x%x", 1906 i, sg_len); 1907 1908 if (sg_len > QEDI_BD_SPLIT_SZ) { 1909 sg_frags = qedi_split_bd(cmd, addr, sg_len, bd_count); 1910 } else { 1911 sg_frags = 1; 1912 bd[bd_count].sge_addr.lo = addr & 0xffffffff; 1913 bd[bd_count].sge_addr.hi = addr >> 32; 1914 bd[bd_count].sge_len = sg_len; 1915 } 1916 byte_count += sg_len; 1917 bd_count += sg_frags; 1918 } 1919 1920 if (byte_count != scsi_bufflen(sc)) 1921 QEDI_ERR(&qedi->dbg_ctx, 1922 "byte_count = %d != scsi_bufflen = %d\n", byte_count, 1923 scsi_bufflen(sc)); 1924 else 1925 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "byte_count = %d\n", 1926 byte_count); 1927 1928 WARN_ON(byte_count != scsi_bufflen(sc)); 1929 1930 return bd_count; 1931 } 1932 1933 static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd) 1934 { 1935 int bd_count; 1936 struct scsi_cmnd *sc = cmd->scsi_cmd; 1937 1938 if (scsi_sg_count(sc)) { 1939 bd_count = qedi_map_scsi_sg(cmd->conn->qedi, cmd); 1940 if (bd_count == 0) 1941 return; 1942 } else { 1943 struct scsi_sge *bd = cmd->io_tbl.sge_tbl; 1944 1945 bd[0].sge_addr.lo = 0; 1946 bd[0].sge_addr.hi = 0; 1947 bd[0].sge_len = 0; 1948 bd_count = 0; 1949 } 1950 cmd->io_tbl.sge_valid = bd_count; 1951 } 1952 1953 static void qedi_cpy_scsi_cdb(struct scsi_cmnd *sc, u32 *dstp) 1954 { 1955 u32 dword; 1956 int lpcnt; 1957 u8 *srcp; 1958 1959 lpcnt = sc->cmd_len / sizeof(dword); 1960 srcp = (u8 *)sc->cmnd; 1961 while (lpcnt--) { 1962 memcpy(&dword, (const void *)srcp, 4); 1963 *dstp = cpu_to_be32(dword); 1964 srcp += 4; 1965 dstp++; 1966 } 1967 if (sc->cmd_len & 0x3) { 1968 dword = (u32)srcp[0] | ((u32)srcp[1] << 8); 1969 *dstp = cpu_to_be32(dword); 1970 } 1971 } 1972 1973 void qedi_trace_io(struct qedi_ctx *qedi, struct iscsi_task *task, 1974 u16 tid, int8_t direction) 1975 { 1976 struct qedi_io_log *io_log; 1977 struct iscsi_conn *conn = task->conn; 1978 struct qedi_conn *qedi_conn = conn->dd_data; 1979 struct scsi_cmnd *sc_cmd = task->sc; 1980 unsigned long flags; 1981 1982 spin_lock_irqsave(&qedi->io_trace_lock, flags); 1983 1984 io_log = &qedi->io_trace_buf[qedi->io_trace_idx]; 1985 io_log->direction = direction; 1986 io_log->task_id = tid; 1987 io_log->cid = qedi_conn->iscsi_conn_id; 1988 io_log->lun = sc_cmd->device->lun; 1989 io_log->op = sc_cmd->cmnd[0]; 1990 io_log->lba[0] = sc_cmd->cmnd[2]; 1991 io_log->lba[1] = sc_cmd->cmnd[3]; 1992 io_log->lba[2] = sc_cmd->cmnd[4]; 1993 io_log->lba[3] = sc_cmd->cmnd[5]; 1994 io_log->bufflen = scsi_bufflen(sc_cmd); 1995 io_log->sg_count = scsi_sg_count(sc_cmd); 1996 io_log->fast_sgs = qedi->fast_sgls; 1997 io_log->cached_sgs = qedi->cached_sgls; 1998 io_log->slow_sgs = qedi->slow_sgls; 1999 io_log->cached_sge = qedi->use_cached_sge; 2000 io_log->slow_sge = qedi->use_slow_sge; 2001 io_log->fast_sge = qedi->use_fast_sge; 2002 io_log->result = sc_cmd->result; 2003 io_log->jiffies = jiffies; 2004 io_log->blk_req_cpu = smp_processor_id(); 2005 2006 if (direction == QEDI_IO_TRACE_REQ) { 2007 /* For requests we only care about the submission CPU */ 2008 io_log->req_cpu = smp_processor_id() % qedi->num_queues; 2009 io_log->intr_cpu = 0; 2010 io_log->blk_rsp_cpu = 0; 2011 } else if (direction == QEDI_IO_TRACE_RSP) { 2012 io_log->req_cpu = smp_processor_id() % qedi->num_queues; 2013 io_log->intr_cpu = qedi->intr_cpu; 2014 io_log->blk_rsp_cpu = smp_processor_id(); 2015 } 2016 2017 qedi->io_trace_idx++; 2018 if (qedi->io_trace_idx == QEDI_IO_TRACE_SIZE) 2019 qedi->io_trace_idx = 0; 2020 2021 qedi->use_cached_sge = false; 2022 qedi->use_slow_sge = false; 2023 qedi->use_fast_sge = false; 2024 2025 spin_unlock_irqrestore(&qedi->io_trace_lock, flags); 2026 } 2027 2028 int qedi_iscsi_send_ioreq(struct iscsi_task *task) 2029 { 2030 struct iscsi_conn *conn = task->conn; 2031 struct iscsi_session *session = conn->session; 2032 struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); 2033 struct qedi_ctx *qedi = iscsi_host_priv(shost); 2034 struct qedi_conn *qedi_conn = conn->dd_data; 2035 struct qedi_cmd *cmd = task->dd_data; 2036 struct scsi_cmnd *sc = task->sc; 2037 struct iscsi_cmd_hdr cmd_pdu_header; 2038 struct scsi_sgl_task_params tx_sgl_task_params; 2039 struct scsi_sgl_task_params rx_sgl_task_params; 2040 struct scsi_sgl_task_params *prx_sgl = NULL; 2041 struct scsi_sgl_task_params *ptx_sgl = NULL; 2042 struct iscsi_task_params task_params; 2043 struct iscsi_conn_params conn_params; 2044 struct scsi_initiator_cmd_params cmd_params; 2045 struct e4_iscsi_task_context *fw_task_ctx; 2046 struct iscsi_cls_conn *cls_conn; 2047 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; 2048 enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE; 2049 struct qedi_endpoint *ep; 2050 u32 scsi_lun[2]; 2051 s16 tid = 0; 2052 u16 sq_idx = 0; 2053 u16 cq_idx; 2054 int rval = 0; 2055 2056 ep = qedi_conn->ep; 2057 cls_conn = qedi_conn->cls_conn; 2058 conn = cls_conn->dd_data; 2059 2060 qedi_iscsi_map_sg_list(cmd); 2061 int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun); 2062 2063 tid = qedi_get_task_idx(qedi); 2064 if (tid == -1) 2065 return -ENOMEM; 2066 2067 fw_task_ctx = 2068 (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, 2069 tid); 2070 memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); 2071 2072 cmd->task_id = tid; 2073 2074 memset(&task_params, 0, sizeof(task_params)); 2075 memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header)); 2076 memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params)); 2077 memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params)); 2078 memset(&conn_params, 0, sizeof(conn_params)); 2079 memset(&cmd_params, 0, sizeof(cmd_params)); 2080 2081 cq_idx = smp_processor_id() % qedi->num_queues; 2082 /* Update header info */ 2083 SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR, 2084 ISCSI_ATTR_SIMPLE); 2085 if (hdr->cdb[0] != TEST_UNIT_READY) { 2086 if (sc->sc_data_direction == DMA_TO_DEVICE) { 2087 SET_FIELD(cmd_pdu_header.flags_attr, 2088 ISCSI_CMD_HDR_WRITE, 1); 2089 task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE; 2090 } else { 2091 SET_FIELD(cmd_pdu_header.flags_attr, 2092 ISCSI_CMD_HDR_READ, 1); 2093 task_type = ISCSI_TASK_TYPE_INITIATOR_READ; 2094 } 2095 } 2096 2097 cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]); 2098 cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]); 2099 2100 qedi_update_itt_map(qedi, tid, task->itt, cmd); 2101 cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt)); 2102 cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length); 2103 cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength); 2104 cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn); 2105 cmd_pdu_header.hdr_first_byte = hdr->opcode; 2106 qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb); 2107 2108 /* Fill tx AHS and rx buffer */ 2109 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { 2110 tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl; 2111 tx_sgl_task_params.sgl_phys_addr.lo = 2112 (u32)(cmd->io_tbl.sge_tbl_dma); 2113 tx_sgl_task_params.sgl_phys_addr.hi = 2114 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); 2115 tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc); 2116 tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid; 2117 if (cmd->use_slowpath) 2118 tx_sgl_task_params.small_mid_sge = true; 2119 } else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) { 2120 rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl; 2121 rx_sgl_task_params.sgl_phys_addr.lo = 2122 (u32)(cmd->io_tbl.sge_tbl_dma); 2123 rx_sgl_task_params.sgl_phys_addr.hi = 2124 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32); 2125 rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc); 2126 rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid; 2127 } 2128 2129 /* Add conn param */ 2130 conn_params.first_burst_length = conn->session->first_burst; 2131 conn_params.max_send_pdu_length = conn->max_xmit_dlength; 2132 conn_params.max_burst_length = conn->session->max_burst; 2133 if (conn->session->initial_r2t_en) 2134 conn_params.initial_r2t = true; 2135 if (conn->session->imm_data_en) 2136 conn_params.immediate_data = true; 2137 2138 /* Add cmd params */ 2139 cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma; 2140 cmd_params.sense_data_buffer_phys_addr.hi = 2141 (u32)((u64)cmd->sense_buffer_dma >> 32); 2142 /* Fill fw input params */ 2143 task_params.context = fw_task_ctx; 2144 task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id; 2145 task_params.itid = tid; 2146 task_params.cq_rss_number = cq_idx; 2147 if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) 2148 task_params.tx_io_size = scsi_bufflen(sc); 2149 else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) 2150 task_params.rx_io_size = scsi_bufflen(sc); 2151 2152 sq_idx = qedi_get_wqe_idx(qedi_conn); 2153 task_params.sqe = &ep->sq[sq_idx]; 2154 2155 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, 2156 "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x\n", 2157 (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ? 2158 "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ? 2159 "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"), 2160 (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc), 2161 (u32)(cmd->io_tbl.sge_tbl_dma), 2162 (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32)); 2163 2164 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); 2165 2166 if (task_params.tx_io_size != 0) 2167 ptx_sgl = &tx_sgl_task_params; 2168 if (task_params.rx_io_size != 0) 2169 prx_sgl = &rx_sgl_task_params; 2170 2171 rval = init_initiator_rw_iscsi_task(&task_params, &conn_params, 2172 &cmd_params, &cmd_pdu_header, 2173 ptx_sgl, prx_sgl, 2174 NULL); 2175 if (rval) 2176 return -1; 2177 2178 spin_lock(&qedi_conn->list_lock); 2179 list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list); 2180 cmd->io_cmd_in_list = true; 2181 qedi_conn->active_cmd_count++; 2182 spin_unlock(&qedi_conn->list_lock); 2183 2184 qedi_ring_doorbell(qedi_conn); 2185 return 0; 2186 } 2187 2188 int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted) 2189 { 2190 struct iscsi_task_params task_params; 2191 struct qedi_endpoint *ep; 2192 struct iscsi_conn *conn = task->conn; 2193 struct qedi_conn *qedi_conn = conn->dd_data; 2194 struct qedi_cmd *cmd = task->dd_data; 2195 u16 sq_idx = 0; 2196 int rval = 0; 2197 2198 QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM, 2199 "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n", 2200 cmd->task_id, get_itt(task->itt), task->state, 2201 cmd->state, qedi_conn->iscsi_conn_id); 2202 2203 memset(&task_params, 0, sizeof(task_params)); 2204 ep = qedi_conn->ep; 2205 2206 sq_idx = qedi_get_wqe_idx(qedi_conn); 2207 2208 task_params.sqe = &ep->sq[sq_idx]; 2209 memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); 2210 task_params.itid = cmd->task_id; 2211 2212 rval = init_cleanup_task(&task_params); 2213 if (rval) 2214 return rval; 2215 2216 qedi_ring_doorbell(qedi_conn); 2217 return 0; 2218 } 2219