Lines Matching full:cmd

150 	struct nvmet_tcp_cmd	*cmd;  member
195 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
198 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_cmd_tag() argument
205 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
208 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_data_in() argument
210 return nvme_is_write(cmd->req.cmd) && in nvmet_tcp_has_data_in()
211 cmd->rbytes_done < cmd->req.transfer_len; in nvmet_tcp_has_data_in()
214 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_in() argument
216 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; in nvmet_tcp_need_data_in()
219 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_need_data_out() argument
221 return !nvme_is_write(cmd->req.cmd) && in nvmet_tcp_need_data_out()
222 cmd->req.transfer_len > 0 && in nvmet_tcp_need_data_out()
223 !cmd->req.cqe->status; in nvmet_tcp_need_data_out()
226 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_has_inline_data() argument
228 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && in nvmet_tcp_has_inline_data()
229 !cmd->rbytes_done; in nvmet_tcp_has_inline_data()
235 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_get_cmd() local
237 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
239 if (!cmd) in nvmet_tcp_get_cmd()
241 list_del_init(&cmd->entry); in nvmet_tcp_get_cmd()
243 cmd->rbytes_done = cmd->wbytes_done = 0; in nvmet_tcp_get_cmd()
244 cmd->pdu_len = 0; in nvmet_tcp_get_cmd()
245 cmd->pdu_recv = 0; in nvmet_tcp_get_cmd()
246 cmd->iov = NULL; in nvmet_tcp_get_cmd()
247 cmd->flags = 0; in nvmet_tcp_get_cmd()
248 return cmd; in nvmet_tcp_get_cmd()
251 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_put_cmd() argument
253 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
256 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
327 /* If cmd buffers are NULL, no operation is performed */
328 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_free_cmd_buffers() argument
330 kfree(cmd->iov); in nvmet_tcp_free_cmd_buffers()
331 sgl_free(cmd->req.sg); in nvmet_tcp_free_cmd_buffers()
332 cmd->iov = NULL; in nvmet_tcp_free_cmd_buffers()
333 cmd->req.sg = NULL; in nvmet_tcp_free_cmd_buffers()
336 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_build_pdu_iovec() argument
338 struct bio_vec *iov = cmd->iov; in nvmet_tcp_build_pdu_iovec()
343 length = cmd->pdu_len; in nvmet_tcp_build_pdu_iovec()
345 offset = cmd->rbytes_done; in nvmet_tcp_build_pdu_iovec()
346 cmd->sg_idx = offset / PAGE_SIZE; in nvmet_tcp_build_pdu_iovec()
348 sg = &cmd->req.sg[cmd->sg_idx]; in nvmet_tcp_build_pdu_iovec()
362 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov, in nvmet_tcp_build_pdu_iovec()
363 nr_pages, cmd->pdu_len); in nvmet_tcp_build_pdu_iovec()
384 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_map_data() argument
386 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_map_data()
394 if (!nvme_is_write(cmd->req.cmd)) in nvmet_tcp_map_data()
397 if (len > cmd->req.port->inline_data_size) in nvmet_tcp_map_data()
399 cmd->pdu_len = len; in nvmet_tcp_map_data()
401 cmd->req.transfer_len += len; in nvmet_tcp_map_data()
403 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); in nvmet_tcp_map_data()
404 if (!cmd->req.sg) in nvmet_tcp_map_data()
406 cmd->cur_sg = cmd->req.sg; in nvmet_tcp_map_data()
408 if (nvmet_tcp_has_data_in(cmd)) { in nvmet_tcp_map_data()
409 cmd->iov = kmalloc_array(cmd->req.sg_cnt, in nvmet_tcp_map_data()
410 sizeof(*cmd->iov), GFP_KERNEL); in nvmet_tcp_map_data()
411 if (!cmd->iov) in nvmet_tcp_map_data()
417 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_map_data()
422 struct nvmet_tcp_cmd *cmd) in nvmet_tcp_calc_ddgst() argument
424 ahash_request_set_crypt(hash, cmd->req.sg, in nvmet_tcp_calc_ddgst()
425 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); in nvmet_tcp_calc_ddgst()
429 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_c2h_data_pdu() argument
431 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; in nvmet_setup_c2h_data_pdu()
432 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu()
433 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
434 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
436 cmd->offset = 0; in nvmet_setup_c2h_data_pdu()
437 cmd->state = NVMET_TCP_SEND_DATA_PDU; in nvmet_setup_c2h_data_pdu()
446 cmd->req.transfer_len + ddgst); in nvmet_setup_c2h_data_pdu()
447 pdu->command_id = cmd->req.cqe->command_id; in nvmet_setup_c2h_data_pdu()
448 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); in nvmet_setup_c2h_data_pdu()
449 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); in nvmet_setup_c2h_data_pdu()
453 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); in nvmet_setup_c2h_data_pdu()
456 if (cmd->queue->hdr_digest) { in nvmet_setup_c2h_data_pdu()
462 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_r2t_pdu() argument
464 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; in nvmet_setup_r2t_pdu()
465 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_r2t_pdu()
466 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_r2t_pdu()
468 cmd->offset = 0; in nvmet_setup_r2t_pdu()
469 cmd->state = NVMET_TCP_SEND_R2T; in nvmet_setup_r2t_pdu()
477 pdu->command_id = cmd->req.cmd->common.command_id; in nvmet_setup_r2t_pdu()
478 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
479 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); in nvmet_setup_r2t_pdu()
480 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); in nvmet_setup_r2t_pdu()
481 if (cmd->queue->hdr_digest) { in nvmet_setup_r2t_pdu()
487 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_setup_response_pdu() argument
489 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; in nvmet_setup_response_pdu()
490 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_response_pdu()
491 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_response_pdu()
493 cmd->offset = 0; in nvmet_setup_response_pdu()
494 cmd->state = NVMET_TCP_SEND_RESPONSE; in nvmet_setup_response_pdu()
501 if (cmd->queue->hdr_digest) { in nvmet_setup_response_pdu()
510 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_process_resp_list() local
513 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); in nvmet_tcp_process_resp_list()
514 list_add(&cmd->entry, &queue->resp_send_list); in nvmet_tcp_process_resp_list()
547 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_queue_response() local
549 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response()
557 queue_cmd = READ_ONCE(queue->cmd); in nvmet_tcp_queue_response()
559 if (unlikely(cmd == queue_cmd)) { in nvmet_tcp_queue_response()
560 sgl = &cmd->req.cmd->common.dptr.sgl; in nvmet_tcp_queue_response()
569 len && len <= cmd->req.port->inline_data_size && in nvmet_tcp_queue_response()
570 nvme_is_write(cmd->req.cmd)) in nvmet_tcp_queue_response()
574 llist_add(&cmd->lentry, &queue->resp_list); in nvmet_tcp_queue_response()
575 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); in nvmet_tcp_queue_response()
578 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_execute_request() argument
580 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) in nvmet_tcp_execute_request()
581 nvmet_tcp_queue_response(&cmd->req); in nvmet_tcp_execute_request()
583 cmd->req.execute(&cmd->req); in nvmet_tcp_execute_request()
586 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) in nvmet_try_send_data_pdu() argument
592 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_data_pdu()
593 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; in nvmet_try_send_data_pdu()
596 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left); in nvmet_try_send_data_pdu()
598 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_data_pdu()
602 cmd->offset += ret; in nvmet_try_send_data_pdu()
608 cmd->state = NVMET_TCP_SEND_DATA; in nvmet_try_send_data_pdu()
609 cmd->offset = 0; in nvmet_try_send_data_pdu()
613 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_data() argument
615 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data()
618 while (cmd->cur_sg) { in nvmet_try_send_data()
622 struct page *page = sg_page(cmd->cur_sg); in nvmet_try_send_data()
624 u32 left = cmd->cur_sg->length - cmd->offset; in nvmet_try_send_data()
626 if ((!last_in_batch && cmd->queue->send_list_len) || in nvmet_try_send_data()
627 cmd->wbytes_done + left < cmd->req.transfer_len || in nvmet_try_send_data()
631 bvec_set_page(&bvec, page, left, cmd->offset); in nvmet_try_send_data()
633 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_data()
637 cmd->offset += ret; in nvmet_try_send_data()
638 cmd->wbytes_done += ret; in nvmet_try_send_data()
641 if (cmd->offset == cmd->cur_sg->length) { in nvmet_try_send_data()
642 cmd->cur_sg = sg_next(cmd->cur_sg); in nvmet_try_send_data()
643 cmd->offset = 0; in nvmet_try_send_data()
648 cmd->state = NVMET_TCP_SEND_DDGST; in nvmet_try_send_data()
649 cmd->offset = 0; in nvmet_try_send_data()
652 cmd->queue->snd_cmd = NULL; in nvmet_try_send_data()
653 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_data()
655 nvmet_setup_response_pdu(cmd); in nvmet_try_send_data()
660 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_try_send_data()
666 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, in nvmet_try_send_response() argument
671 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_response()
672 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; in nvmet_try_send_response()
675 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_response()
680 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left); in nvmet_try_send_response()
682 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_response()
685 cmd->offset += ret; in nvmet_try_send_response()
691 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_try_send_response()
692 cmd->queue->snd_cmd = NULL; in nvmet_try_send_response()
693 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_response()
697 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_r2t() argument
701 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_r2t()
702 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; in nvmet_try_send_r2t()
705 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_r2t()
710 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left); in nvmet_try_send_r2t()
712 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_r2t()
715 cmd->offset += ret; in nvmet_try_send_r2t()
721 cmd->queue->snd_cmd = NULL; in nvmet_try_send_r2t()
725 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) in nvmet_try_send_ddgst() argument
727 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst()
728 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; in nvmet_try_send_ddgst()
731 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, in nvmet_try_send_ddgst()
736 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_ddgst()
745 cmd->offset += ret; in nvmet_try_send_ddgst()
752 cmd->queue->snd_cmd = NULL; in nvmet_try_send_ddgst()
753 nvmet_tcp_put_cmd(cmd); in nvmet_try_send_ddgst()
755 nvmet_setup_response_pdu(cmd); in nvmet_try_send_ddgst()
763 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; in nvmet_tcp_try_send_one() local
766 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_try_send_one()
767 cmd = nvmet_tcp_fetch_cmd(queue); in nvmet_tcp_try_send_one()
768 if (unlikely(!cmd)) in nvmet_tcp_try_send_one()
772 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { in nvmet_tcp_try_send_one()
773 ret = nvmet_try_send_data_pdu(cmd); in nvmet_tcp_try_send_one()
778 if (cmd->state == NVMET_TCP_SEND_DATA) { in nvmet_tcp_try_send_one()
779 ret = nvmet_try_send_data(cmd, last_in_batch); in nvmet_tcp_try_send_one()
784 if (cmd->state == NVMET_TCP_SEND_DDGST) { in nvmet_tcp_try_send_one()
785 ret = nvmet_try_send_ddgst(cmd, last_in_batch); in nvmet_tcp_try_send_one()
790 if (cmd->state == NVMET_TCP_SEND_R2T) { in nvmet_tcp_try_send_one()
791 ret = nvmet_try_send_r2t(cmd, last_in_batch); in nvmet_tcp_try_send_one()
796 if (cmd->state == NVMET_TCP_SEND_RESPONSE) in nvmet_tcp_try_send_one()
797 ret = nvmet_try_send_response(cmd, last_in_batch); in nvmet_tcp_try_send_one()
832 WRITE_ONCE(queue->cmd, NULL); in nvmet_prepare_receive_pdu()
833 /* Ensure rcv_state is visible only after queue->cmd is set */ in nvmet_prepare_receive_pdu()
932 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) in nvmet_tcp_handle_req_failure() argument
934 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); in nvmet_tcp_handle_req_failure()
944 if (!nvme_is_write(cmd->req.cmd) || !data_len || in nvmet_tcp_handle_req_failure()
945 data_len > cmd->req.port->inline_data_size) { in nvmet_tcp_handle_req_failure()
950 ret = nvmet_tcp_map_data(cmd); in nvmet_tcp_handle_req_failure()
958 nvmet_tcp_build_pdu_iovec(cmd); in nvmet_tcp_handle_req_failure()
959 cmd->flags |= NVMET_TCP_F_INIT_FAILED; in nvmet_tcp_handle_req_failure()
965 struct nvmet_tcp_cmd *cmd; in nvmet_tcp_handle_h2c_data_pdu() local
975 cmd = &queue->cmds[data->ttag]; in nvmet_tcp_handle_h2c_data_pdu()
977 cmd = &queue->connect; in nvmet_tcp_handle_h2c_data_pdu()
980 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { in nvmet_tcp_handle_h2c_data_pdu()
983 cmd->rbytes_done); in nvmet_tcp_handle_h2c_data_pdu()
994 cmd->pdu_len = le32_to_cpu(data->data_length); in nvmet_tcp_handle_h2c_data_pdu()
995 if (unlikely(cmd->pdu_len != exp_data_len || in nvmet_tcp_handle_h2c_data_pdu()
996 cmd->pdu_len == 0 || in nvmet_tcp_handle_h2c_data_pdu()
997 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { in nvmet_tcp_handle_h2c_data_pdu()
998 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); in nvmet_tcp_handle_h2c_data_pdu()
1003 cmd->pdu_recv = 0; in nvmet_tcp_handle_h2c_data_pdu()
1004 nvmet_tcp_build_pdu_iovec(cmd); in nvmet_tcp_handle_h2c_data_pdu()
1005 queue->cmd = cmd; in nvmet_tcp_handle_h2c_data_pdu()
1013 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
1014 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
1042 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu()
1043 if (unlikely(!queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1052 req = &queue->cmd->req; in nvmet_tcp_done_recv_pdu()
1053 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); in nvmet_tcp_done_recv_pdu()
1057 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", in nvmet_tcp_done_recv_pdu()
1058 req->cmd, req->cmd->common.command_id, in nvmet_tcp_done_recv_pdu()
1059 req->cmd->common.opcode, in nvmet_tcp_done_recv_pdu()
1060 le32_to_cpu(req->cmd->common.dptr.sgl.length)); in nvmet_tcp_done_recv_pdu()
1062 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); in nvmet_tcp_done_recv_pdu()
1066 ret = nvmet_tcp_map_data(queue->cmd); in nvmet_tcp_done_recv_pdu()
1069 if (nvmet_tcp_has_inline_data(queue->cmd)) in nvmet_tcp_done_recv_pdu()
1077 if (nvmet_tcp_need_data_in(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1078 if (nvmet_tcp_has_inline_data(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1080 nvmet_tcp_build_pdu_iovec(queue->cmd); in nvmet_tcp_done_recv_pdu()
1084 nvmet_tcp_queue_response(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1088 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1124 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1175 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) in nvmet_tcp_prep_recv_ddgst() argument
1177 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst()
1179 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); in nvmet_tcp_prep_recv_ddgst()
1187 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_data() local
1190 while (msg_data_left(&cmd->recv_msg)) { in nvmet_tcp_try_recv_data()
1191 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, in nvmet_tcp_try_recv_data()
1192 cmd->recv_msg.msg_flags); in nvmet_tcp_try_recv_data()
1196 cmd->pdu_recv += ret; in nvmet_tcp_try_recv_data()
1197 cmd->rbytes_done += ret; in nvmet_tcp_try_recv_data()
1201 nvmet_tcp_prep_recv_ddgst(cmd); in nvmet_tcp_try_recv_data()
1205 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_data()
1206 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_data()
1214 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_ddgst() local
1218 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, in nvmet_tcp_try_recv_ddgst()
1232 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { in nvmet_tcp_try_recv_ddgst()
1233 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", in nvmet_tcp_try_recv_ddgst()
1234 queue->idx, cmd->req.cmd->common.command_id, in nvmet_tcp_try_recv_ddgst()
1235 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1236 le32_to_cpu(cmd->exp_ddgst)); in nvmet_tcp_try_recv_ddgst()
1237 nvmet_req_uninit(&cmd->req); in nvmet_tcp_try_recv_ddgst()
1238 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_try_recv_ddgst()
1244 if (cmd->rbytes_done == cmd->req.transfer_len) in nvmet_tcp_try_recv_ddgst()
1245 nvmet_tcp_execute_request(cmd); in nvmet_tcp_try_recv_ddgst()
1377 c->req.cmd = &c->cmd_pdu->cmd; in nvmet_tcp_alloc_cmd()
1472 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_uninit_data_in_cmds() local
1475 for (i = 0; i < queue->nr_cmds; i++, cmd++) { in nvmet_tcp_uninit_data_in_cmds()
1476 if (nvmet_tcp_need_data_in(cmd)) in nvmet_tcp_uninit_data_in_cmds()
1477 nvmet_req_uninit(&cmd->req); in nvmet_tcp_uninit_data_in_cmds()
1488 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_free_cmd_data_in_buffers() local
1491 for (i = 0; i < queue->nr_cmds; i++, cmd++) in nvmet_tcp_free_cmd_data_in_buffers()
1492 nvmet_tcp_free_cmd_buffers(cmd); in nvmet_tcp_free_cmd_data_in_buffers()
1885 struct nvmet_tcp_cmd *cmd = in nvmet_tcp_disc_port_addr() local
1887 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_disc_port_addr()