Lines Matching full:queue

60  * queue before determining it to be idle.  This optional module behavior
94 struct nvmet_tcp_queue *queue; member
197 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument
200 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag()
205 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
233 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument
237 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
253 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
256 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
259 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument
261 return queue->sock->sk->sk_incoming_cpu; in queue_cpu()
264 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) in nvmet_tcp_hdgst_len() argument
266 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvmet_tcp_hdgst_len()
269 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) in nvmet_tcp_ddgst_len() argument
271 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvmet_tcp_ddgst_len()
284 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, in nvmet_tcp_verify_hdgst() argument
292 pr_err("queue %d: header digest enabled but no header digest\n", in nvmet_tcp_verify_hdgst()
293 queue->idx); in nvmet_tcp_verify_hdgst()
298 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); in nvmet_tcp_verify_hdgst()
301 pr_err("queue %d: header digest error: recv %#x expected %#x\n", in nvmet_tcp_verify_hdgst()
302 queue->idx, le32_to_cpu(recv_digest), in nvmet_tcp_verify_hdgst()
310 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) in nvmet_tcp_check_ddgst() argument
313 u8 digest_len = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_check_ddgst()
320 pr_err("queue %d: data digest flag is cleared\n", queue->idx); in nvmet_tcp_check_ddgst()
366 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) in nvmet_tcp_fatal_error() argument
368 queue->rcv_state = NVMET_TCP_RECV_ERR; in nvmet_tcp_fatal_error()
369 if (queue->nvme_sq.ctrl) in nvmet_tcp_fatal_error()
370 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_tcp_fatal_error()
372 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_fatal_error()
375 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) in nvmet_tcp_socket_error() argument
377 queue->rcv_state = NVMET_TCP_RECV_ERR; in nvmet_tcp_socket_error()
379 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_socket_error()
381 nvmet_tcp_fatal_error(queue); in nvmet_tcp_socket_error()
432 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_c2h_data_pdu() local
433 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
434 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); in nvmet_setup_c2h_data_pdu()
440 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? in nvmet_setup_c2h_data_pdu()
451 if (queue->data_digest) { in nvmet_setup_c2h_data_pdu()
453 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); in nvmet_setup_c2h_data_pdu()
456 if (cmd->queue->hdr_digest) { in nvmet_setup_c2h_data_pdu()
458 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_c2h_data_pdu()
465 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_r2t_pdu() local
466 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_r2t_pdu()
478 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); in nvmet_setup_r2t_pdu()
481 if (cmd->queue->hdr_digest) { in nvmet_setup_r2t_pdu()
483 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_r2t_pdu()
490 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_setup_response_pdu() local
491 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_setup_response_pdu()
501 if (cmd->queue->hdr_digest) { in nvmet_setup_response_pdu()
503 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvmet_setup_response_pdu()
507 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) in nvmet_tcp_process_resp_list() argument
512 for (node = llist_del_all(&queue->resp_list); node; node = node->next) { in nvmet_tcp_process_resp_list()
514 list_add(&cmd->entry, &queue->resp_send_list); in nvmet_tcp_process_resp_list()
515 queue->send_list_len++; in nvmet_tcp_process_resp_list()
519 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_fetch_cmd() argument
521 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, in nvmet_tcp_fetch_cmd()
523 if (!queue->snd_cmd) { in nvmet_tcp_fetch_cmd()
524 nvmet_tcp_process_resp_list(queue); in nvmet_tcp_fetch_cmd()
525 queue->snd_cmd = in nvmet_tcp_fetch_cmd()
526 list_first_entry_or_null(&queue->resp_send_list, in nvmet_tcp_fetch_cmd()
528 if (unlikely(!queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
532 list_del_init(&queue->snd_cmd->entry); in nvmet_tcp_fetch_cmd()
533 queue->send_list_len--; in nvmet_tcp_fetch_cmd()
535 if (nvmet_tcp_need_data_out(queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
536 nvmet_setup_c2h_data_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
537 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) in nvmet_tcp_fetch_cmd()
538 nvmet_setup_r2t_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
540 nvmet_setup_response_pdu(queue->snd_cmd); in nvmet_tcp_fetch_cmd()
542 return queue->snd_cmd; in nvmet_tcp_fetch_cmd()
549 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_queue_response() local
556 queue_state = smp_load_acquire(&queue->rcv_state); in nvmet_tcp_queue_response()
557 queue_cmd = READ_ONCE(queue->cmd); in nvmet_tcp_queue_response()
574 llist_add(&cmd->lentry, &queue->resp_list); in nvmet_tcp_queue_response()
575 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); in nvmet_tcp_queue_response()
592 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_data_pdu()
598 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_data_pdu()
615 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_data() local
626 if ((!last_in_batch && cmd->queue->send_list_len) || in nvmet_try_send_data()
628 queue->data_digest || !queue->nvme_sq.sqhd_disabled) in nvmet_try_send_data()
633 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_data()
647 if (queue->data_digest) { in nvmet_try_send_data()
651 if (queue->nvme_sq.sqhd_disabled) { in nvmet_try_send_data()
652 cmd->queue->snd_cmd = NULL; in nvmet_try_send_data()
659 if (queue->nvme_sq.sqhd_disabled) in nvmet_try_send_data()
671 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_response()
675 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_response()
682 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_response()
692 cmd->queue->snd_cmd = NULL; in nvmet_try_send_response()
701 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); in nvmet_try_send_r2t()
705 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_r2t()
712 ret = sock_sendmsg(cmd->queue->sock, &msg); in nvmet_try_send_r2t()
721 cmd->queue->snd_cmd = NULL; in nvmet_try_send_r2t()
727 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_try_send_ddgst() local
736 if (!last_in_batch && cmd->queue->send_list_len) in nvmet_try_send_ddgst()
741 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvmet_try_send_ddgst()
751 if (queue->nvme_sq.sqhd_disabled) { in nvmet_try_send_ddgst()
752 cmd->queue->snd_cmd = NULL; in nvmet_try_send_ddgst()
760 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_send_one() argument
763 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; in nvmet_tcp_try_send_one()
766 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_try_send_one()
767 cmd = nvmet_tcp_fetch_cmd(queue); in nvmet_tcp_try_send_one()
809 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_send() argument
815 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); in nvmet_tcp_try_send()
817 nvmet_tcp_socket_error(queue, ret); in nvmet_tcp_try_send()
828 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) in nvmet_prepare_receive_pdu() argument
830 queue->offset = 0; in nvmet_prepare_receive_pdu()
831 queue->left = sizeof(struct nvme_tcp_hdr); in nvmet_prepare_receive_pdu()
832 WRITE_ONCE(queue->cmd, NULL); in nvmet_prepare_receive_pdu()
833 /* Ensure rcv_state is visible only after queue->cmd is set */ in nvmet_prepare_receive_pdu()
834 smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); in nvmet_prepare_receive_pdu()
837 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) in nvmet_tcp_free_crypto() argument
839 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvmet_tcp_free_crypto()
841 ahash_request_free(queue->rcv_hash); in nvmet_tcp_free_crypto()
842 ahash_request_free(queue->snd_hash); in nvmet_tcp_free_crypto()
846 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) in nvmet_tcp_alloc_crypto() argument
854 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvmet_tcp_alloc_crypto()
855 if (!queue->snd_hash) in nvmet_tcp_alloc_crypto()
857 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvmet_tcp_alloc_crypto()
859 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvmet_tcp_alloc_crypto()
860 if (!queue->rcv_hash) in nvmet_tcp_alloc_crypto()
862 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvmet_tcp_alloc_crypto()
866 ahash_request_free(queue->snd_hash); in nvmet_tcp_alloc_crypto()
873 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) in nvmet_tcp_handle_icreq() argument
875 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; in nvmet_tcp_handle_icreq()
876 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; in nvmet_tcp_handle_icreq()
884 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_icreq()
889 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); in nvmet_tcp_handle_icreq()
894 pr_err("queue %d: unsupported hpda %d\n", queue->idx, in nvmet_tcp_handle_icreq()
899 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvmet_tcp_handle_icreq()
900 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvmet_tcp_handle_icreq()
901 if (queue->hdr_digest || queue->data_digest) { in nvmet_tcp_handle_icreq()
902 ret = nvmet_tcp_alloc_crypto(queue); in nvmet_tcp_handle_icreq()
915 if (queue->hdr_digest) in nvmet_tcp_handle_icreq()
917 if (queue->data_digest) in nvmet_tcp_handle_icreq()
922 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvmet_tcp_handle_icreq()
924 return ret; /* queue removal will cleanup */ in nvmet_tcp_handle_icreq()
926 queue->state = NVMET_TCP_Q_LIVE; in nvmet_tcp_handle_icreq()
927 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_handle_icreq()
931 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, in nvmet_tcp_handle_req_failure() argument
946 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_handle_req_failure()
952 pr_err("queue %d: failed to map data\n", queue->idx); in nvmet_tcp_handle_req_failure()
953 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_req_failure()
957 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_handle_req_failure()
962 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_handle_h2c_data_pdu() argument
964 struct nvme_tcp_data_pdu *data = &queue->pdu.data; in nvmet_tcp_handle_h2c_data_pdu()
968 if (likely(queue->nr_cmds)) { in nvmet_tcp_handle_h2c_data_pdu()
969 if (unlikely(data->ttag >= queue->nr_cmds)) { in nvmet_tcp_handle_h2c_data_pdu()
970 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n", in nvmet_tcp_handle_h2c_data_pdu()
971 queue->idx, data->ttag, queue->nr_cmds); in nvmet_tcp_handle_h2c_data_pdu()
972 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_h2c_data_pdu()
975 cmd = &queue->cmds[data->ttag]; in nvmet_tcp_handle_h2c_data_pdu()
977 cmd = &queue->connect; in nvmet_tcp_handle_h2c_data_pdu()
985 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_h2c_data_pdu()
990 nvmet_tcp_hdgst_len(queue) - in nvmet_tcp_handle_h2c_data_pdu()
991 nvmet_tcp_ddgst_len(queue) - in nvmet_tcp_handle_h2c_data_pdu()
1000 nvmet_tcp_fatal_error(queue); in nvmet_tcp_handle_h2c_data_pdu()
1005 queue->cmd = cmd; in nvmet_tcp_handle_h2c_data_pdu()
1006 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_handle_h2c_data_pdu()
1011 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_done_recv_pdu() argument
1013 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_done_recv_pdu()
1014 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; in nvmet_tcp_done_recv_pdu()
1018 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { in nvmet_tcp_done_recv_pdu()
1022 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1025 return nvmet_tcp_handle_icreq(queue); in nvmet_tcp_done_recv_pdu()
1029 pr_err("queue %d: received icreq pdu in state %d\n", in nvmet_tcp_done_recv_pdu()
1030 queue->idx, queue->state); in nvmet_tcp_done_recv_pdu()
1031 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1036 ret = nvmet_tcp_handle_h2c_data_pdu(queue); in nvmet_tcp_done_recv_pdu()
1042 queue->cmd = nvmet_tcp_get_cmd(queue); in nvmet_tcp_done_recv_pdu()
1043 if (unlikely(!queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1045 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", in nvmet_tcp_done_recv_pdu()
1046 queue->idx, queue->nr_cmds, queue->send_list_len, in nvmet_tcp_done_recv_pdu()
1048 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1052 req = &queue->cmd->req; in nvmet_tcp_done_recv_pdu()
1055 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, in nvmet_tcp_done_recv_pdu()
1056 &queue->nvme_sq, &nvmet_tcp_ops))) { in nvmet_tcp_done_recv_pdu()
1062 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); in nvmet_tcp_done_recv_pdu()
1066 ret = nvmet_tcp_map_data(queue->cmd); in nvmet_tcp_done_recv_pdu()
1068 pr_err("queue %d: failed to map data\n", queue->idx); in nvmet_tcp_done_recv_pdu()
1069 if (nvmet_tcp_has_inline_data(queue->cmd)) in nvmet_tcp_done_recv_pdu()
1070 nvmet_tcp_fatal_error(queue); in nvmet_tcp_done_recv_pdu()
1077 if (nvmet_tcp_need_data_in(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1078 if (nvmet_tcp_has_inline_data(queue->cmd)) { in nvmet_tcp_done_recv_pdu()
1079 queue->rcv_state = NVMET_TCP_RECV_DATA; in nvmet_tcp_done_recv_pdu()
1080 nvmet_tcp_build_pdu_iovec(queue->cmd); in nvmet_tcp_done_recv_pdu()
1084 nvmet_tcp_queue_response(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1088 queue->cmd->req.execute(&queue->cmd->req); in nvmet_tcp_done_recv_pdu()
1090 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_done_recv_pdu()
1122 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_pdu() argument
1124 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; in nvmet_tcp_try_recv_pdu()
1130 iov.iov_base = (void *)&queue->pdu + queue->offset; in nvmet_tcp_try_recv_pdu()
1131 iov.iov_len = queue->left; in nvmet_tcp_try_recv_pdu()
1132 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvmet_tcp_try_recv_pdu()
1137 queue->offset += len; in nvmet_tcp_try_recv_pdu()
1138 queue->left -= len; in nvmet_tcp_try_recv_pdu()
1139 if (queue->left) in nvmet_tcp_try_recv_pdu()
1142 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { in nvmet_tcp_try_recv_pdu()
1143 u8 hdgst = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_try_recv_pdu()
1147 nvmet_tcp_fatal_error(queue); in nvmet_tcp_try_recv_pdu()
1156 queue->left = hdr->hlen - queue->offset + hdgst; in nvmet_tcp_try_recv_pdu()
1160 if (queue->hdr_digest && in nvmet_tcp_try_recv_pdu()
1161 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { in nvmet_tcp_try_recv_pdu()
1162 nvmet_tcp_fatal_error(queue); /* fatal */ in nvmet_tcp_try_recv_pdu()
1166 if (queue->data_digest && in nvmet_tcp_try_recv_pdu()
1167 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { in nvmet_tcp_try_recv_pdu()
1168 nvmet_tcp_fatal_error(queue); /* fatal */ in nvmet_tcp_try_recv_pdu()
1172 return nvmet_tcp_done_recv_pdu(queue); in nvmet_tcp_try_recv_pdu()
1177 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_prep_recv_ddgst() local
1179 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); in nvmet_tcp_prep_recv_ddgst()
1180 queue->offset = 0; in nvmet_tcp_prep_recv_ddgst()
1181 queue->left = NVME_TCP_DIGEST_LENGTH; in nvmet_tcp_prep_recv_ddgst()
1182 queue->rcv_state = NVMET_TCP_RECV_DDGST; in nvmet_tcp_prep_recv_ddgst()
1185 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_data() argument
1187 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_data()
1191 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, in nvmet_tcp_try_recv_data()
1200 if (queue->data_digest) { in nvmet_tcp_try_recv_data()
1208 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_try_recv_data()
1212 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_ddgst() argument
1214 struct nvmet_tcp_cmd *cmd = queue->cmd; in nvmet_tcp_try_recv_ddgst()
1218 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, in nvmet_tcp_try_recv_ddgst()
1219 .iov_len = queue->left in nvmet_tcp_try_recv_ddgst()
1222 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvmet_tcp_try_recv_ddgst()
1227 queue->offset += ret; in nvmet_tcp_try_recv_ddgst()
1228 queue->left -= ret; in nvmet_tcp_try_recv_ddgst()
1229 if (queue->left) in nvmet_tcp_try_recv_ddgst()
1232 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { in nvmet_tcp_try_recv_ddgst()
1233 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", in nvmet_tcp_try_recv_ddgst()
1234 queue->idx, cmd->req.cmd->common.command_id, in nvmet_tcp_try_recv_ddgst()
1235 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), in nvmet_tcp_try_recv_ddgst()
1239 nvmet_tcp_fatal_error(queue); in nvmet_tcp_try_recv_ddgst()
1249 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_try_recv_ddgst()
1253 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) in nvmet_tcp_try_recv_one() argument
1257 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) in nvmet_tcp_try_recv_one()
1260 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { in nvmet_tcp_try_recv_one()
1261 result = nvmet_tcp_try_recv_pdu(queue); in nvmet_tcp_try_recv_one()
1266 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { in nvmet_tcp_try_recv_one()
1267 result = nvmet_tcp_try_recv_data(queue); in nvmet_tcp_try_recv_one()
1272 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { in nvmet_tcp_try_recv_one()
1273 result = nvmet_tcp_try_recv_ddgst(queue); in nvmet_tcp_try_recv_one()
1287 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, in nvmet_tcp_try_recv() argument
1293 ret = nvmet_tcp_try_recv_one(queue); in nvmet_tcp_try_recv()
1295 nvmet_tcp_socket_error(queue, ret); in nvmet_tcp_try_recv()
1306 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) in nvmet_tcp_schedule_release_queue() argument
1308 spin_lock(&queue->state_lock); in nvmet_tcp_schedule_release_queue()
1309 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { in nvmet_tcp_schedule_release_queue()
1310 queue->state = NVMET_TCP_Q_DISCONNECTING; in nvmet_tcp_schedule_release_queue()
1311 queue_work(nvmet_wq, &queue->release_work); in nvmet_tcp_schedule_release_queue()
1313 spin_unlock(&queue->state_lock); in nvmet_tcp_schedule_release_queue()
1316 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) in nvmet_tcp_arm_queue_deadline() argument
1318 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs); in nvmet_tcp_arm_queue_deadline()
1321 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, in nvmet_tcp_check_queue_deadline() argument
1328 nvmet_tcp_arm_queue_deadline(queue); in nvmet_tcp_check_queue_deadline()
1330 return !time_after(jiffies, queue->poll_end); in nvmet_tcp_check_queue_deadline()
1335 struct nvmet_tcp_queue *queue = in nvmet_tcp_io_work() local
1343 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); in nvmet_tcp_io_work()
1349 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); in nvmet_tcp_io_work()
1361 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending) in nvmet_tcp_io_work()
1362 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_io_work()
1365 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, in nvmet_tcp_alloc_cmd() argument
1368 u8 hdgst = nvmet_tcp_hdgst_len(queue); in nvmet_tcp_alloc_cmd()
1370 c->queue = queue; in nvmet_tcp_alloc_cmd()
1371 c->req.port = queue->port->nport; in nvmet_tcp_alloc_cmd()
1373 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1379 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1385 c->data_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1390 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, in nvmet_tcp_alloc_cmd()
1397 list_add_tail(&c->entry, &queue->free_list); in nvmet_tcp_alloc_cmd()
1417 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_alloc_cmds() argument
1420 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; in nvmet_tcp_alloc_cmds()
1427 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); in nvmet_tcp_alloc_cmds()
1432 queue->cmds = cmds; in nvmet_tcp_alloc_cmds()
1443 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_free_cmds() argument
1445 struct nvmet_tcp_cmd *cmds = queue->cmds; in nvmet_tcp_free_cmds()
1448 for (i = 0; i < queue->nr_cmds; i++) in nvmet_tcp_free_cmds()
1451 nvmet_tcp_free_cmd(&queue->connect); in nvmet_tcp_free_cmds()
1455 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) in nvmet_tcp_restore_socket_callbacks() argument
1457 struct socket *sock = queue->sock; in nvmet_tcp_restore_socket_callbacks()
1459 if (!queue->state_change) in nvmet_tcp_restore_socket_callbacks()
1463 sock->sk->sk_data_ready = queue->data_ready; in nvmet_tcp_restore_socket_callbacks()
1464 sock->sk->sk_state_change = queue->state_change; in nvmet_tcp_restore_socket_callbacks()
1465 sock->sk->sk_write_space = queue->write_space; in nvmet_tcp_restore_socket_callbacks()
1470 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) in nvmet_tcp_uninit_data_in_cmds() argument
1472 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_uninit_data_in_cmds()
1475 for (i = 0; i < queue->nr_cmds; i++, cmd++) { in nvmet_tcp_uninit_data_in_cmds()
1480 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { in nvmet_tcp_uninit_data_in_cmds()
1482 nvmet_req_uninit(&queue->connect.req); in nvmet_tcp_uninit_data_in_cmds()
1486 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue) in nvmet_tcp_free_cmd_data_in_buffers() argument
1488 struct nvmet_tcp_cmd *cmd = queue->cmds; in nvmet_tcp_free_cmd_data_in_buffers()
1491 for (i = 0; i < queue->nr_cmds; i++, cmd++) in nvmet_tcp_free_cmd_data_in_buffers()
1493 nvmet_tcp_free_cmd_buffers(&queue->connect); in nvmet_tcp_free_cmd_data_in_buffers()
1499 struct nvmet_tcp_queue *queue = in nvmet_tcp_release_queue_work() local
1503 list_del_init(&queue->queue_list); in nvmet_tcp_release_queue_work()
1506 nvmet_tcp_restore_socket_callbacks(queue); in nvmet_tcp_release_queue_work()
1507 cancel_work_sync(&queue->io_work); in nvmet_tcp_release_queue_work()
1509 queue->rcv_state = NVMET_TCP_RECV_ERR; in nvmet_tcp_release_queue_work()
1511 nvmet_tcp_uninit_data_in_cmds(queue); in nvmet_tcp_release_queue_work()
1512 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_tcp_release_queue_work()
1513 cancel_work_sync(&queue->io_work); in nvmet_tcp_release_queue_work()
1514 nvmet_tcp_free_cmd_data_in_buffers(queue); in nvmet_tcp_release_queue_work()
1515 sock_release(queue->sock); in nvmet_tcp_release_queue_work()
1516 nvmet_tcp_free_cmds(queue); in nvmet_tcp_release_queue_work()
1517 if (queue->hdr_digest || queue->data_digest) in nvmet_tcp_release_queue_work()
1518 nvmet_tcp_free_crypto(queue); in nvmet_tcp_release_queue_work()
1519 ida_free(&nvmet_tcp_queue_ida, queue->idx); in nvmet_tcp_release_queue_work()
1521 page = virt_to_head_page(queue->pf_cache.va); in nvmet_tcp_release_queue_work()
1522 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); in nvmet_tcp_release_queue_work()
1523 kfree(queue); in nvmet_tcp_release_queue_work()
1528 struct nvmet_tcp_queue *queue; in nvmet_tcp_data_ready() local
1533 queue = sk->sk_user_data; in nvmet_tcp_data_ready()
1534 if (likely(queue)) in nvmet_tcp_data_ready()
1535 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_data_ready()
1541 struct nvmet_tcp_queue *queue; in nvmet_tcp_write_space() local
1544 queue = sk->sk_user_data; in nvmet_tcp_write_space()
1545 if (unlikely(!queue)) in nvmet_tcp_write_space()
1548 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { in nvmet_tcp_write_space()
1549 queue->write_space(sk); in nvmet_tcp_write_space()
1555 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_write_space()
1563 struct nvmet_tcp_queue *queue; in nvmet_tcp_state_change() local
1566 queue = sk->sk_user_data; in nvmet_tcp_state_change()
1567 if (!queue) in nvmet_tcp_state_change()
1578 nvmet_tcp_schedule_release_queue(queue); in nvmet_tcp_state_change()
1581 pr_warn("queue %d unhandled state %d\n", in nvmet_tcp_state_change()
1582 queue->idx, sk->sk_state); in nvmet_tcp_state_change()
1588 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) in nvmet_tcp_set_queue_sock() argument
1590 struct socket *sock = queue->sock; in nvmet_tcp_set_queue_sock()
1595 (struct sockaddr *)&queue->sockaddr); in nvmet_tcp_set_queue_sock()
1600 (struct sockaddr *)&queue->sockaddr_peer); in nvmet_tcp_set_queue_sock()
1605 * Cleanup whatever is sitting in the TCP transmit queue on socket in nvmet_tcp_set_queue_sock()
1627 sock->sk->sk_user_data = queue; in nvmet_tcp_set_queue_sock()
1628 queue->data_ready = sock->sk->sk_data_ready; in nvmet_tcp_set_queue_sock()
1630 queue->state_change = sock->sk->sk_state_change; in nvmet_tcp_set_queue_sock()
1632 queue->write_space = sock->sk->sk_write_space; in nvmet_tcp_set_queue_sock()
1635 nvmet_tcp_arm_queue_deadline(queue); in nvmet_tcp_set_queue_sock()
1636 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); in nvmet_tcp_set_queue_sock()
1646 struct nvmet_tcp_queue *queue; in nvmet_tcp_alloc_queue() local
1649 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_tcp_alloc_queue()
1650 if (!queue) in nvmet_tcp_alloc_queue()
1653 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); in nvmet_tcp_alloc_queue()
1654 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); in nvmet_tcp_alloc_queue()
1655 queue->sock = newsock; in nvmet_tcp_alloc_queue()
1656 queue->port = port; in nvmet_tcp_alloc_queue()
1657 queue->nr_cmds = 0; in nvmet_tcp_alloc_queue()
1658 spin_lock_init(&queue->state_lock); in nvmet_tcp_alloc_queue()
1659 queue->state = NVMET_TCP_Q_CONNECTING; in nvmet_tcp_alloc_queue()
1660 INIT_LIST_HEAD(&queue->free_list); in nvmet_tcp_alloc_queue()
1661 init_llist_head(&queue->resp_list); in nvmet_tcp_alloc_queue()
1662 INIT_LIST_HEAD(&queue->resp_send_list); in nvmet_tcp_alloc_queue()
1664 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL); in nvmet_tcp_alloc_queue()
1665 if (queue->idx < 0) { in nvmet_tcp_alloc_queue()
1666 ret = queue->idx; in nvmet_tcp_alloc_queue()
1670 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); in nvmet_tcp_alloc_queue()
1674 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_tcp_alloc_queue()
1678 nvmet_prepare_receive_pdu(queue); in nvmet_tcp_alloc_queue()
1681 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); in nvmet_tcp_alloc_queue()
1684 ret = nvmet_tcp_set_queue_sock(queue); in nvmet_tcp_alloc_queue()
1691 list_del_init(&queue->queue_list); in nvmet_tcp_alloc_queue()
1693 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_tcp_alloc_queue()
1695 nvmet_tcp_free_cmd(&queue->connect); in nvmet_tcp_alloc_queue()
1697 ida_free(&nvmet_tcp_queue_ida, queue->idx); in nvmet_tcp_alloc_queue()
1699 kfree(queue); in nvmet_tcp_alloc_queue()
1719 pr_err("failed to allocate queue\n"); in nvmet_tcp_accept_work()
1822 struct nvmet_tcp_queue *queue; in nvmet_tcp_destroy_port_queues() local
1825 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_destroy_port_queues()
1826 if (queue->port == port) in nvmet_tcp_destroy_port_queues()
1827 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_destroy_port_queues()
1852 struct nvmet_tcp_queue *queue; in nvmet_tcp_delete_ctrl() local
1855 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_delete_ctrl()
1856 if (queue->nvme_sq.ctrl == ctrl) in nvmet_tcp_delete_ctrl()
1857 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_delete_ctrl()
1863 struct nvmet_tcp_queue *queue = in nvmet_tcp_install_queue() local
1871 queue->nr_cmds = sq->size * 2; in nvmet_tcp_install_queue()
1872 if (nvmet_tcp_alloc_cmds(queue)) { in nvmet_tcp_install_queue()
1873 queue->nr_cmds = 0; in nvmet_tcp_install_queue()
1887 struct nvmet_tcp_queue *queue = cmd->queue; in nvmet_tcp_disc_port_addr() local
1889 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); in nvmet_tcp_disc_port_addr()
1928 struct nvmet_tcp_queue *queue; in nvmet_tcp_exit() local
1934 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) in nvmet_tcp_exit()
1935 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvmet_tcp_exit()