1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics TCP target. 4 * Copyright (c) 2018 Lightbits Labs. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/err.h> 11 #include <linux/nvme-tcp.h> 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <linux/inet.h> 15 #include <linux/llist.h> 16 #include <crypto/hash.h> 17 18 #include "nvmet.h" 19 20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) 21 22 /* Define the socket priority to use for connections were it is desirable 23 * that the NIC consider performing optimized packet processing or filtering. 24 * A non-zero value being sufficient to indicate general consideration of any 25 * possible optimization. Making it a module param allows for alternative 26 * values that may be unique for some NIC implementations. 27 */ 28 static int so_priority; 29 module_param(so_priority, int, 0644); 30 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority"); 31 32 /* Define a time period (in usecs) that io_work() shall sample an activated 33 * queue before determining it to be idle. This optional module behavior 34 * can enable NIC solutions that support socket optimized packet processing 35 * using advanced interrupt moderation techniques. 36 */ 37 static int idle_poll_period_usecs; 38 module_param(idle_poll_period_usecs, int, 0644); 39 MODULE_PARM_DESC(idle_poll_period_usecs, 40 "nvmet tcp io_work poll till idle time period in usecs"); 41 42 #define NVMET_TCP_RECV_BUDGET 8 43 #define NVMET_TCP_SEND_BUDGET 8 44 #define NVMET_TCP_IO_WORK_BUDGET 64 45 46 enum nvmet_tcp_send_state { 47 NVMET_TCP_SEND_DATA_PDU, 48 NVMET_TCP_SEND_DATA, 49 NVMET_TCP_SEND_R2T, 50 NVMET_TCP_SEND_DDGST, 51 NVMET_TCP_SEND_RESPONSE 52 }; 53 54 enum nvmet_tcp_recv_state { 55 NVMET_TCP_RECV_PDU, 56 NVMET_TCP_RECV_DATA, 57 NVMET_TCP_RECV_DDGST, 58 NVMET_TCP_RECV_ERR, 59 }; 60 61 enum { 62 NVMET_TCP_F_INIT_FAILED = (1 << 0), 63 }; 64 65 struct nvmet_tcp_cmd { 66 struct nvmet_tcp_queue *queue; 67 struct nvmet_req req; 68 69 struct nvme_tcp_cmd_pdu *cmd_pdu; 70 struct nvme_tcp_rsp_pdu *rsp_pdu; 71 struct nvme_tcp_data_pdu *data_pdu; 72 struct nvme_tcp_r2t_pdu *r2t_pdu; 73 74 u32 rbytes_done; 75 u32 wbytes_done; 76 77 u32 pdu_len; 78 u32 pdu_recv; 79 int sg_idx; 80 int nr_mapped; 81 struct msghdr recv_msg; 82 struct kvec *iov; 83 u32 flags; 84 85 struct list_head entry; 86 struct llist_node lentry; 87 88 /* send state */ 89 u32 offset; 90 struct scatterlist *cur_sg; 91 enum nvmet_tcp_send_state state; 92 93 __le32 exp_ddgst; 94 __le32 recv_ddgst; 95 }; 96 97 enum nvmet_tcp_queue_state { 98 NVMET_TCP_Q_CONNECTING, 99 NVMET_TCP_Q_LIVE, 100 NVMET_TCP_Q_DISCONNECTING, 101 }; 102 103 struct nvmet_tcp_queue { 104 struct socket *sock; 105 struct nvmet_tcp_port *port; 106 struct work_struct io_work; 107 struct nvmet_cq nvme_cq; 108 struct nvmet_sq nvme_sq; 109 110 /* send state */ 111 struct nvmet_tcp_cmd *cmds; 112 unsigned int nr_cmds; 113 struct list_head free_list; 114 struct llist_head resp_list; 115 struct list_head resp_send_list; 116 int send_list_len; 117 struct nvmet_tcp_cmd *snd_cmd; 118 119 /* recv state */ 120 int offset; 121 int left; 122 enum nvmet_tcp_recv_state rcv_state; 123 struct nvmet_tcp_cmd *cmd; 124 union nvme_tcp_pdu pdu; 125 126 /* digest state */ 127 bool hdr_digest; 128 bool data_digest; 129 struct ahash_request *snd_hash; 130 struct ahash_request *rcv_hash; 131 132 unsigned long poll_end; 133 134 spinlock_t state_lock; 135 enum nvmet_tcp_queue_state state; 136 137 struct sockaddr_storage sockaddr; 138 struct sockaddr_storage sockaddr_peer; 139 struct work_struct release_work; 140 141 int idx; 142 struct list_head queue_list; 143 144 struct nvmet_tcp_cmd connect; 145 146 struct page_frag_cache pf_cache; 147 148 void (*data_ready)(struct sock *); 149 void (*state_change)(struct sock *); 150 void (*write_space)(struct sock *); 151 }; 152 153 struct nvmet_tcp_port { 154 struct socket *sock; 155 struct work_struct accept_work; 156 struct nvmet_port *nport; 157 struct sockaddr_storage addr; 158 void (*data_ready)(struct sock *); 159 }; 160 161 static DEFINE_IDA(nvmet_tcp_queue_ida); 162 static LIST_HEAD(nvmet_tcp_queue_list); 163 static DEFINE_MUTEX(nvmet_tcp_queue_mutex); 164 165 static struct workqueue_struct *nvmet_tcp_wq; 166 static const struct nvmet_fabrics_ops nvmet_tcp_ops; 167 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); 168 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); 169 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd); 170 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd); 171 172 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, 173 struct nvmet_tcp_cmd *cmd) 174 { 175 if (unlikely(!queue->nr_cmds)) { 176 /* We didn't allocate cmds yet, send 0xffff */ 177 return USHRT_MAX; 178 } 179 180 return cmd - queue->cmds; 181 } 182 183 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) 184 { 185 return nvme_is_write(cmd->req.cmd) && 186 cmd->rbytes_done < cmd->req.transfer_len; 187 } 188 189 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) 190 { 191 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; 192 } 193 194 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) 195 { 196 return !nvme_is_write(cmd->req.cmd) && 197 cmd->req.transfer_len > 0 && 198 !cmd->req.cqe->status; 199 } 200 201 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) 202 { 203 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && 204 !cmd->rbytes_done; 205 } 206 207 static inline struct nvmet_tcp_cmd * 208 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) 209 { 210 struct nvmet_tcp_cmd *cmd; 211 212 cmd = list_first_entry_or_null(&queue->free_list, 213 struct nvmet_tcp_cmd, entry); 214 if (!cmd) 215 return NULL; 216 list_del_init(&cmd->entry); 217 218 cmd->rbytes_done = cmd->wbytes_done = 0; 219 cmd->pdu_len = 0; 220 cmd->pdu_recv = 0; 221 cmd->iov = NULL; 222 cmd->flags = 0; 223 return cmd; 224 } 225 226 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) 227 { 228 if (unlikely(cmd == &cmd->queue->connect)) 229 return; 230 231 list_add_tail(&cmd->entry, &cmd->queue->free_list); 232 } 233 234 static inline int queue_cpu(struct nvmet_tcp_queue *queue) 235 { 236 return queue->sock->sk->sk_incoming_cpu; 237 } 238 239 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) 240 { 241 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 242 } 243 244 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) 245 { 246 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 247 } 248 249 static inline void nvmet_tcp_hdgst(struct ahash_request *hash, 250 void *pdu, size_t len) 251 { 252 struct scatterlist sg; 253 254 sg_init_one(&sg, pdu, len); 255 ahash_request_set_crypt(hash, &sg, pdu + len, len); 256 crypto_ahash_digest(hash); 257 } 258 259 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, 260 void *pdu, size_t len) 261 { 262 struct nvme_tcp_hdr *hdr = pdu; 263 __le32 recv_digest; 264 __le32 exp_digest; 265 266 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 267 pr_err("queue %d: header digest enabled but no header digest\n", 268 queue->idx); 269 return -EPROTO; 270 } 271 272 recv_digest = *(__le32 *)(pdu + hdr->hlen); 273 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); 274 exp_digest = *(__le32 *)(pdu + hdr->hlen); 275 if (recv_digest != exp_digest) { 276 pr_err("queue %d: header digest error: recv %#x expected %#x\n", 277 queue->idx, le32_to_cpu(recv_digest), 278 le32_to_cpu(exp_digest)); 279 return -EPROTO; 280 } 281 282 return 0; 283 } 284 285 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) 286 { 287 struct nvme_tcp_hdr *hdr = pdu; 288 u8 digest_len = nvmet_tcp_hdgst_len(queue); 289 u32 len; 290 291 len = le32_to_cpu(hdr->plen) - hdr->hlen - 292 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); 293 294 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 295 pr_err("queue %d: data digest flag is cleared\n", queue->idx); 296 return -EPROTO; 297 } 298 299 return 0; 300 } 301 302 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd) 303 { 304 WARN_ON(unlikely(cmd->nr_mapped > 0)); 305 306 kfree(cmd->iov); 307 sgl_free(cmd->req.sg); 308 cmd->iov = NULL; 309 cmd->req.sg = NULL; 310 } 311 312 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) 313 { 314 struct scatterlist *sg; 315 int i; 316 317 sg = &cmd->req.sg[cmd->sg_idx]; 318 319 for (i = 0; i < cmd->nr_mapped; i++) 320 kunmap(sg_page(&sg[i])); 321 322 cmd->nr_mapped = 0; 323 } 324 325 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) 326 { 327 struct kvec *iov = cmd->iov; 328 struct scatterlist *sg; 329 u32 length, offset, sg_offset; 330 331 length = cmd->pdu_len; 332 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); 333 offset = cmd->rbytes_done; 334 cmd->sg_idx = offset / PAGE_SIZE; 335 sg_offset = offset % PAGE_SIZE; 336 sg = &cmd->req.sg[cmd->sg_idx]; 337 338 while (length) { 339 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 340 341 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset; 342 iov->iov_len = iov_len; 343 344 length -= iov_len; 345 sg = sg_next(sg); 346 iov++; 347 sg_offset = 0; 348 } 349 350 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, 351 cmd->nr_mapped, cmd->pdu_len); 352 } 353 354 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 355 { 356 queue->rcv_state = NVMET_TCP_RECV_ERR; 357 if (queue->nvme_sq.ctrl) 358 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 359 else 360 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 361 } 362 363 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) 364 { 365 if (status == -EPIPE || status == -ECONNRESET) 366 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 367 else 368 nvmet_tcp_fatal_error(queue); 369 } 370 371 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) 372 { 373 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; 374 u32 len = le32_to_cpu(sgl->length); 375 376 if (!len) 377 return 0; 378 379 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | 380 NVME_SGL_FMT_OFFSET)) { 381 if (!nvme_is_write(cmd->req.cmd)) 382 return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 383 384 if (len > cmd->req.port->inline_data_size) 385 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 386 cmd->pdu_len = len; 387 } 388 cmd->req.transfer_len += len; 389 390 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); 391 if (!cmd->req.sg) 392 return NVME_SC_INTERNAL; 393 cmd->cur_sg = cmd->req.sg; 394 395 if (nvmet_tcp_has_data_in(cmd)) { 396 cmd->iov = kmalloc_array(cmd->req.sg_cnt, 397 sizeof(*cmd->iov), GFP_KERNEL); 398 if (!cmd->iov) 399 goto err; 400 } 401 402 return 0; 403 err: 404 nvmet_tcp_free_cmd_buffers(cmd); 405 return NVME_SC_INTERNAL; 406 } 407 408 static void nvmet_tcp_send_ddgst(struct ahash_request *hash, 409 struct nvmet_tcp_cmd *cmd) 410 { 411 ahash_request_set_crypt(hash, cmd->req.sg, 412 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); 413 crypto_ahash_digest(hash); 414 } 415 416 static void nvmet_tcp_recv_ddgst(struct ahash_request *hash, 417 struct nvmet_tcp_cmd *cmd) 418 { 419 struct scatterlist sg; 420 struct kvec *iov; 421 int i; 422 423 crypto_ahash_init(hash); 424 for (i = 0, iov = cmd->iov; i < cmd->nr_mapped; i++, iov++) { 425 sg_init_one(&sg, iov->iov_base, iov->iov_len); 426 ahash_request_set_crypt(hash, &sg, NULL, iov->iov_len); 427 crypto_ahash_update(hash); 428 } 429 ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0); 430 crypto_ahash_final(hash); 431 } 432 433 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) 434 { 435 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; 436 struct nvmet_tcp_queue *queue = cmd->queue; 437 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 438 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); 439 440 cmd->offset = 0; 441 cmd->state = NVMET_TCP_SEND_DATA_PDU; 442 443 pdu->hdr.type = nvme_tcp_c2h_data; 444 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? 445 NVME_TCP_F_DATA_SUCCESS : 0); 446 pdu->hdr.hlen = sizeof(*pdu); 447 pdu->hdr.pdo = pdu->hdr.hlen + hdgst; 448 pdu->hdr.plen = 449 cpu_to_le32(pdu->hdr.hlen + hdgst + 450 cmd->req.transfer_len + ddgst); 451 pdu->command_id = cmd->req.cqe->command_id; 452 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); 453 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); 454 455 if (queue->data_digest) { 456 pdu->hdr.flags |= NVME_TCP_F_DDGST; 457 nvmet_tcp_send_ddgst(queue->snd_hash, cmd); 458 } 459 460 if (cmd->queue->hdr_digest) { 461 pdu->hdr.flags |= NVME_TCP_F_HDGST; 462 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 463 } 464 } 465 466 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) 467 { 468 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; 469 struct nvmet_tcp_queue *queue = cmd->queue; 470 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 471 472 cmd->offset = 0; 473 cmd->state = NVMET_TCP_SEND_R2T; 474 475 pdu->hdr.type = nvme_tcp_r2t; 476 pdu->hdr.flags = 0; 477 pdu->hdr.hlen = sizeof(*pdu); 478 pdu->hdr.pdo = 0; 479 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 480 481 pdu->command_id = cmd->req.cmd->common.command_id; 482 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); 483 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); 484 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); 485 if (cmd->queue->hdr_digest) { 486 pdu->hdr.flags |= NVME_TCP_F_HDGST; 487 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 488 } 489 } 490 491 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) 492 { 493 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; 494 struct nvmet_tcp_queue *queue = cmd->queue; 495 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 496 497 cmd->offset = 0; 498 cmd->state = NVMET_TCP_SEND_RESPONSE; 499 500 pdu->hdr.type = nvme_tcp_rsp; 501 pdu->hdr.flags = 0; 502 pdu->hdr.hlen = sizeof(*pdu); 503 pdu->hdr.pdo = 0; 504 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 505 if (cmd->queue->hdr_digest) { 506 pdu->hdr.flags |= NVME_TCP_F_HDGST; 507 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 508 } 509 } 510 511 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) 512 { 513 struct llist_node *node; 514 struct nvmet_tcp_cmd *cmd; 515 516 for (node = llist_del_all(&queue->resp_list); node; node = node->next) { 517 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry); 518 list_add(&cmd->entry, &queue->resp_send_list); 519 queue->send_list_len++; 520 } 521 } 522 523 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) 524 { 525 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, 526 struct nvmet_tcp_cmd, entry); 527 if (!queue->snd_cmd) { 528 nvmet_tcp_process_resp_list(queue); 529 queue->snd_cmd = 530 list_first_entry_or_null(&queue->resp_send_list, 531 struct nvmet_tcp_cmd, entry); 532 if (unlikely(!queue->snd_cmd)) 533 return NULL; 534 } 535 536 list_del_init(&queue->snd_cmd->entry); 537 queue->send_list_len--; 538 539 if (nvmet_tcp_need_data_out(queue->snd_cmd)) 540 nvmet_setup_c2h_data_pdu(queue->snd_cmd); 541 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) 542 nvmet_setup_r2t_pdu(queue->snd_cmd); 543 else 544 nvmet_setup_response_pdu(queue->snd_cmd); 545 546 return queue->snd_cmd; 547 } 548 549 static void nvmet_tcp_queue_response(struct nvmet_req *req) 550 { 551 struct nvmet_tcp_cmd *cmd = 552 container_of(req, struct nvmet_tcp_cmd, req); 553 struct nvmet_tcp_queue *queue = cmd->queue; 554 struct nvme_sgl_desc *sgl; 555 u32 len; 556 557 if (unlikely(cmd == queue->cmd)) { 558 sgl = &cmd->req.cmd->common.dptr.sgl; 559 len = le32_to_cpu(sgl->length); 560 561 /* 562 * Wait for inline data before processing the response. 563 * Avoid using helpers, this might happen before 564 * nvmet_req_init is completed. 565 */ 566 if (queue->rcv_state == NVMET_TCP_RECV_PDU && 567 len && len <= cmd->req.port->inline_data_size && 568 nvme_is_write(cmd->req.cmd)) 569 return; 570 } 571 572 llist_add(&cmd->lentry, &queue->resp_list); 573 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); 574 } 575 576 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd) 577 { 578 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED)) 579 nvmet_tcp_queue_response(&cmd->req); 580 else 581 cmd->req.execute(&cmd->req); 582 } 583 584 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) 585 { 586 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 587 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; 588 int ret; 589 590 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), 591 offset_in_page(cmd->data_pdu) + cmd->offset, 592 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); 593 if (ret <= 0) 594 return ret; 595 596 cmd->offset += ret; 597 left -= ret; 598 599 if (left) 600 return -EAGAIN; 601 602 cmd->state = NVMET_TCP_SEND_DATA; 603 cmd->offset = 0; 604 return 1; 605 } 606 607 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 608 { 609 struct nvmet_tcp_queue *queue = cmd->queue; 610 int ret; 611 612 while (cmd->cur_sg) { 613 struct page *page = sg_page(cmd->cur_sg); 614 u32 left = cmd->cur_sg->length - cmd->offset; 615 int flags = MSG_DONTWAIT; 616 617 if ((!last_in_batch && cmd->queue->send_list_len) || 618 cmd->wbytes_done + left < cmd->req.transfer_len || 619 queue->data_digest || !queue->nvme_sq.sqhd_disabled) 620 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 621 622 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, 623 left, flags); 624 if (ret <= 0) 625 return ret; 626 627 cmd->offset += ret; 628 cmd->wbytes_done += ret; 629 630 /* Done with sg?*/ 631 if (cmd->offset == cmd->cur_sg->length) { 632 cmd->cur_sg = sg_next(cmd->cur_sg); 633 cmd->offset = 0; 634 } 635 } 636 637 if (queue->data_digest) { 638 cmd->state = NVMET_TCP_SEND_DDGST; 639 cmd->offset = 0; 640 } else { 641 if (queue->nvme_sq.sqhd_disabled) { 642 cmd->queue->snd_cmd = NULL; 643 nvmet_tcp_put_cmd(cmd); 644 } else { 645 nvmet_setup_response_pdu(cmd); 646 } 647 } 648 649 if (queue->nvme_sq.sqhd_disabled) 650 nvmet_tcp_free_cmd_buffers(cmd); 651 652 return 1; 653 654 } 655 656 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, 657 bool last_in_batch) 658 { 659 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 660 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; 661 int flags = MSG_DONTWAIT; 662 int ret; 663 664 if (!last_in_batch && cmd->queue->send_list_len) 665 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 666 else 667 flags |= MSG_EOR; 668 669 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), 670 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags); 671 if (ret <= 0) 672 return ret; 673 cmd->offset += ret; 674 left -= ret; 675 676 if (left) 677 return -EAGAIN; 678 679 nvmet_tcp_free_cmd_buffers(cmd); 680 cmd->queue->snd_cmd = NULL; 681 nvmet_tcp_put_cmd(cmd); 682 return 1; 683 } 684 685 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 686 { 687 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 688 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; 689 int flags = MSG_DONTWAIT; 690 int ret; 691 692 if (!last_in_batch && cmd->queue->send_list_len) 693 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 694 else 695 flags |= MSG_EOR; 696 697 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), 698 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags); 699 if (ret <= 0) 700 return ret; 701 cmd->offset += ret; 702 left -= ret; 703 704 if (left) 705 return -EAGAIN; 706 707 cmd->queue->snd_cmd = NULL; 708 return 1; 709 } 710 711 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 712 { 713 struct nvmet_tcp_queue *queue = cmd->queue; 714 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset; 715 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 716 struct kvec iov = { 717 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset, 718 .iov_len = left 719 }; 720 int ret; 721 722 if (!last_in_batch && cmd->queue->send_list_len) 723 msg.msg_flags |= MSG_MORE; 724 else 725 msg.msg_flags |= MSG_EOR; 726 727 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 728 if (unlikely(ret <= 0)) 729 return ret; 730 731 cmd->offset += ret; 732 left -= ret; 733 734 if (left) 735 return -EAGAIN; 736 737 if (queue->nvme_sq.sqhd_disabled) { 738 cmd->queue->snd_cmd = NULL; 739 nvmet_tcp_put_cmd(cmd); 740 } else { 741 nvmet_setup_response_pdu(cmd); 742 } 743 return 1; 744 } 745 746 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, 747 bool last_in_batch) 748 { 749 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; 750 int ret = 0; 751 752 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { 753 cmd = nvmet_tcp_fetch_cmd(queue); 754 if (unlikely(!cmd)) 755 return 0; 756 } 757 758 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { 759 ret = nvmet_try_send_data_pdu(cmd); 760 if (ret <= 0) 761 goto done_send; 762 } 763 764 if (cmd->state == NVMET_TCP_SEND_DATA) { 765 ret = nvmet_try_send_data(cmd, last_in_batch); 766 if (ret <= 0) 767 goto done_send; 768 } 769 770 if (cmd->state == NVMET_TCP_SEND_DDGST) { 771 ret = nvmet_try_send_ddgst(cmd, last_in_batch); 772 if (ret <= 0) 773 goto done_send; 774 } 775 776 if (cmd->state == NVMET_TCP_SEND_R2T) { 777 ret = nvmet_try_send_r2t(cmd, last_in_batch); 778 if (ret <= 0) 779 goto done_send; 780 } 781 782 if (cmd->state == NVMET_TCP_SEND_RESPONSE) 783 ret = nvmet_try_send_response(cmd, last_in_batch); 784 785 done_send: 786 if (ret < 0) { 787 if (ret == -EAGAIN) 788 return 0; 789 return ret; 790 } 791 792 return 1; 793 } 794 795 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, 796 int budget, int *sends) 797 { 798 int i, ret = 0; 799 800 for (i = 0; i < budget; i++) { 801 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); 802 if (unlikely(ret < 0)) { 803 nvmet_tcp_socket_error(queue, ret); 804 goto done; 805 } else if (ret == 0) { 806 break; 807 } 808 (*sends)++; 809 } 810 done: 811 return ret; 812 } 813 814 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) 815 { 816 queue->offset = 0; 817 queue->left = sizeof(struct nvme_tcp_hdr); 818 queue->cmd = NULL; 819 queue->rcv_state = NVMET_TCP_RECV_PDU; 820 } 821 822 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) 823 { 824 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); 825 826 ahash_request_free(queue->rcv_hash); 827 ahash_request_free(queue->snd_hash); 828 crypto_free_ahash(tfm); 829 } 830 831 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) 832 { 833 struct crypto_ahash *tfm; 834 835 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); 836 if (IS_ERR(tfm)) 837 return PTR_ERR(tfm); 838 839 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); 840 if (!queue->snd_hash) 841 goto free_tfm; 842 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); 843 844 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); 845 if (!queue->rcv_hash) 846 goto free_snd_hash; 847 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); 848 849 return 0; 850 free_snd_hash: 851 ahash_request_free(queue->snd_hash); 852 free_tfm: 853 crypto_free_ahash(tfm); 854 return -ENOMEM; 855 } 856 857 858 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) 859 { 860 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; 861 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; 862 struct msghdr msg = {}; 863 struct kvec iov; 864 int ret; 865 866 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 867 pr_err("bad nvme-tcp pdu length (%d)\n", 868 le32_to_cpu(icreq->hdr.plen)); 869 nvmet_tcp_fatal_error(queue); 870 } 871 872 if (icreq->pfv != NVME_TCP_PFV_1_0) { 873 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); 874 return -EPROTO; 875 } 876 877 if (icreq->hpda != 0) { 878 pr_err("queue %d: unsupported hpda %d\n", queue->idx, 879 icreq->hpda); 880 return -EPROTO; 881 } 882 883 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); 884 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); 885 if (queue->hdr_digest || queue->data_digest) { 886 ret = nvmet_tcp_alloc_crypto(queue); 887 if (ret) 888 return ret; 889 } 890 891 memset(icresp, 0, sizeof(*icresp)); 892 icresp->hdr.type = nvme_tcp_icresp; 893 icresp->hdr.hlen = sizeof(*icresp); 894 icresp->hdr.pdo = 0; 895 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); 896 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 897 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */ 898 icresp->cpda = 0; 899 if (queue->hdr_digest) 900 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 901 if (queue->data_digest) 902 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 903 904 iov.iov_base = icresp; 905 iov.iov_len = sizeof(*icresp); 906 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 907 if (ret < 0) 908 goto free_crypto; 909 910 queue->state = NVMET_TCP_Q_LIVE; 911 nvmet_prepare_receive_pdu(queue); 912 return 0; 913 free_crypto: 914 if (queue->hdr_digest || queue->data_digest) 915 nvmet_tcp_free_crypto(queue); 916 return ret; 917 } 918 919 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 920 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 921 { 922 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 923 int ret; 924 925 if (!nvme_is_write(cmd->req.cmd) || 926 data_len > cmd->req.port->inline_data_size) { 927 nvmet_prepare_receive_pdu(queue); 928 return; 929 } 930 931 ret = nvmet_tcp_map_data(cmd); 932 if (unlikely(ret)) { 933 pr_err("queue %d: failed to map data\n", queue->idx); 934 nvmet_tcp_fatal_error(queue); 935 return; 936 } 937 938 queue->rcv_state = NVMET_TCP_RECV_DATA; 939 nvmet_tcp_map_pdu_iovec(cmd); 940 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 941 } 942 943 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) 944 { 945 struct nvme_tcp_data_pdu *data = &queue->pdu.data; 946 struct nvmet_tcp_cmd *cmd; 947 948 if (likely(queue->nr_cmds)) 949 cmd = &queue->cmds[data->ttag]; 950 else 951 cmd = &queue->connect; 952 953 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { 954 pr_err("ttag %u unexpected data offset %u (expected %u)\n", 955 data->ttag, le32_to_cpu(data->data_offset), 956 cmd->rbytes_done); 957 /* FIXME: use path and transport errors */ 958 nvmet_req_complete(&cmd->req, 959 NVME_SC_INVALID_FIELD | NVME_SC_DNR); 960 return -EPROTO; 961 } 962 963 cmd->pdu_len = le32_to_cpu(data->data_length); 964 cmd->pdu_recv = 0; 965 nvmet_tcp_map_pdu_iovec(cmd); 966 queue->cmd = cmd; 967 queue->rcv_state = NVMET_TCP_RECV_DATA; 968 969 return 0; 970 } 971 972 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) 973 { 974 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 975 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; 976 struct nvmet_req *req; 977 int ret; 978 979 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 980 if (hdr->type != nvme_tcp_icreq) { 981 pr_err("unexpected pdu type (%d) before icreq\n", 982 hdr->type); 983 nvmet_tcp_fatal_error(queue); 984 return -EPROTO; 985 } 986 return nvmet_tcp_handle_icreq(queue); 987 } 988 989 if (hdr->type == nvme_tcp_h2c_data) { 990 ret = nvmet_tcp_handle_h2c_data_pdu(queue); 991 if (unlikely(ret)) 992 return ret; 993 return 0; 994 } 995 996 queue->cmd = nvmet_tcp_get_cmd(queue); 997 if (unlikely(!queue->cmd)) { 998 /* This should never happen */ 999 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 1000 queue->idx, queue->nr_cmds, queue->send_list_len, 1001 nvme_cmd->common.opcode); 1002 nvmet_tcp_fatal_error(queue); 1003 return -ENOMEM; 1004 } 1005 1006 req = &queue->cmd->req; 1007 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); 1008 1009 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, 1010 &queue->nvme_sq, &nvmet_tcp_ops))) { 1011 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", 1012 req->cmd, req->cmd->common.command_id, 1013 req->cmd->common.opcode, 1014 le32_to_cpu(req->cmd->common.dptr.sgl.length)); 1015 1016 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 1017 return 0; 1018 } 1019 1020 ret = nvmet_tcp_map_data(queue->cmd); 1021 if (unlikely(ret)) { 1022 pr_err("queue %d: failed to map data\n", queue->idx); 1023 if (nvmet_tcp_has_inline_data(queue->cmd)) 1024 nvmet_tcp_fatal_error(queue); 1025 else 1026 nvmet_req_complete(req, ret); 1027 ret = -EAGAIN; 1028 goto out; 1029 } 1030 1031 if (nvmet_tcp_need_data_in(queue->cmd)) { 1032 if (nvmet_tcp_has_inline_data(queue->cmd)) { 1033 queue->rcv_state = NVMET_TCP_RECV_DATA; 1034 nvmet_tcp_map_pdu_iovec(queue->cmd); 1035 return 0; 1036 } 1037 /* send back R2T */ 1038 nvmet_tcp_queue_response(&queue->cmd->req); 1039 goto out; 1040 } 1041 1042 queue->cmd->req.execute(&queue->cmd->req); 1043 out: 1044 nvmet_prepare_receive_pdu(queue); 1045 return ret; 1046 } 1047 1048 static const u8 nvme_tcp_pdu_sizes[] = { 1049 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), 1050 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), 1051 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), 1052 }; 1053 1054 static inline u8 nvmet_tcp_pdu_size(u8 type) 1055 { 1056 size_t idx = type; 1057 1058 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && 1059 nvme_tcp_pdu_sizes[idx]) ? 1060 nvme_tcp_pdu_sizes[idx] : 0; 1061 } 1062 1063 static inline bool nvmet_tcp_pdu_valid(u8 type) 1064 { 1065 switch (type) { 1066 case nvme_tcp_icreq: 1067 case nvme_tcp_cmd: 1068 case nvme_tcp_h2c_data: 1069 /* fallthru */ 1070 return true; 1071 } 1072 1073 return false; 1074 } 1075 1076 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) 1077 { 1078 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1079 int len; 1080 struct kvec iov; 1081 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1082 1083 recv: 1084 iov.iov_base = (void *)&queue->pdu + queue->offset; 1085 iov.iov_len = queue->left; 1086 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1087 iov.iov_len, msg.msg_flags); 1088 if (unlikely(len < 0)) 1089 return len; 1090 1091 queue->offset += len; 1092 queue->left -= len; 1093 if (queue->left) 1094 return -EAGAIN; 1095 1096 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { 1097 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1098 1099 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 1100 pr_err("unexpected pdu type %d\n", hdr->type); 1101 nvmet_tcp_fatal_error(queue); 1102 return -EIO; 1103 } 1104 1105 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { 1106 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); 1107 return -EIO; 1108 } 1109 1110 queue->left = hdr->hlen - queue->offset + hdgst; 1111 goto recv; 1112 } 1113 1114 if (queue->hdr_digest && 1115 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) { 1116 nvmet_tcp_fatal_error(queue); /* fatal */ 1117 return -EPROTO; 1118 } 1119 1120 if (queue->data_digest && 1121 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1122 nvmet_tcp_fatal_error(queue); /* fatal */ 1123 return -EPROTO; 1124 } 1125 1126 return nvmet_tcp_done_recv_pdu(queue); 1127 } 1128 1129 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) 1130 { 1131 struct nvmet_tcp_queue *queue = cmd->queue; 1132 1133 nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd); 1134 queue->offset = 0; 1135 queue->left = NVME_TCP_DIGEST_LENGTH; 1136 queue->rcv_state = NVMET_TCP_RECV_DDGST; 1137 } 1138 1139 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) 1140 { 1141 struct nvmet_tcp_cmd *cmd = queue->cmd; 1142 int ret; 1143 1144 while (msg_data_left(&cmd->recv_msg)) { 1145 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, 1146 cmd->recv_msg.msg_flags); 1147 if (ret <= 0) 1148 return ret; 1149 1150 cmd->pdu_recv += ret; 1151 cmd->rbytes_done += ret; 1152 } 1153 1154 nvmet_tcp_unmap_pdu_iovec(cmd); 1155 if (queue->data_digest) { 1156 nvmet_tcp_prep_recv_ddgst(cmd); 1157 return 0; 1158 } 1159 1160 if (cmd->rbytes_done == cmd->req.transfer_len) 1161 nvmet_tcp_execute_request(cmd); 1162 1163 nvmet_prepare_receive_pdu(queue); 1164 return 0; 1165 } 1166 1167 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) 1168 { 1169 struct nvmet_tcp_cmd *cmd = queue->cmd; 1170 int ret; 1171 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1172 struct kvec iov = { 1173 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, 1174 .iov_len = queue->left 1175 }; 1176 1177 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1178 iov.iov_len, msg.msg_flags); 1179 if (unlikely(ret < 0)) 1180 return ret; 1181 1182 queue->offset += ret; 1183 queue->left -= ret; 1184 if (queue->left) 1185 return -EAGAIN; 1186 1187 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { 1188 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", 1189 queue->idx, cmd->req.cmd->common.command_id, 1190 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), 1191 le32_to_cpu(cmd->exp_ddgst)); 1192 nvmet_tcp_finish_cmd(cmd); 1193 nvmet_tcp_fatal_error(queue); 1194 ret = -EPROTO; 1195 goto out; 1196 } 1197 1198 if (cmd->rbytes_done == cmd->req.transfer_len) 1199 nvmet_tcp_execute_request(cmd); 1200 1201 ret = 0; 1202 out: 1203 nvmet_prepare_receive_pdu(queue); 1204 return ret; 1205 } 1206 1207 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1208 { 1209 int result = 0; 1210 1211 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1212 return 0; 1213 1214 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { 1215 result = nvmet_tcp_try_recv_pdu(queue); 1216 if (result != 0) 1217 goto done_recv; 1218 } 1219 1220 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { 1221 result = nvmet_tcp_try_recv_data(queue); 1222 if (result != 0) 1223 goto done_recv; 1224 } 1225 1226 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { 1227 result = nvmet_tcp_try_recv_ddgst(queue); 1228 if (result != 0) 1229 goto done_recv; 1230 } 1231 1232 done_recv: 1233 if (result < 0) { 1234 if (result == -EAGAIN) 1235 return 0; 1236 return result; 1237 } 1238 return 1; 1239 } 1240 1241 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, 1242 int budget, int *recvs) 1243 { 1244 int i, ret = 0; 1245 1246 for (i = 0; i < budget; i++) { 1247 ret = nvmet_tcp_try_recv_one(queue); 1248 if (unlikely(ret < 0)) { 1249 nvmet_tcp_socket_error(queue, ret); 1250 goto done; 1251 } else if (ret == 0) { 1252 break; 1253 } 1254 (*recvs)++; 1255 } 1256 done: 1257 return ret; 1258 } 1259 1260 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) 1261 { 1262 spin_lock(&queue->state_lock); 1263 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { 1264 queue->state = NVMET_TCP_Q_DISCONNECTING; 1265 schedule_work(&queue->release_work); 1266 } 1267 spin_unlock(&queue->state_lock); 1268 } 1269 1270 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue) 1271 { 1272 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs); 1273 } 1274 1275 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue, 1276 int ops) 1277 { 1278 if (!idle_poll_period_usecs) 1279 return false; 1280 1281 if (ops) 1282 nvmet_tcp_arm_queue_deadline(queue); 1283 1284 return !time_after(jiffies, queue->poll_end); 1285 } 1286 1287 static void nvmet_tcp_io_work(struct work_struct *w) 1288 { 1289 struct nvmet_tcp_queue *queue = 1290 container_of(w, struct nvmet_tcp_queue, io_work); 1291 bool pending; 1292 int ret, ops = 0; 1293 1294 do { 1295 pending = false; 1296 1297 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); 1298 if (ret > 0) 1299 pending = true; 1300 else if (ret < 0) 1301 return; 1302 1303 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); 1304 if (ret > 0) 1305 pending = true; 1306 else if (ret < 0) 1307 return; 1308 1309 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); 1310 1311 /* 1312 * Requeue the worker if idle deadline period is in progress or any 1313 * ops activity was recorded during the do-while loop above. 1314 */ 1315 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending) 1316 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1317 } 1318 1319 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, 1320 struct nvmet_tcp_cmd *c) 1321 { 1322 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1323 1324 c->queue = queue; 1325 c->req.port = queue->port->nport; 1326 1327 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, 1328 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1329 if (!c->cmd_pdu) 1330 return -ENOMEM; 1331 c->req.cmd = &c->cmd_pdu->cmd; 1332 1333 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, 1334 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1335 if (!c->rsp_pdu) 1336 goto out_free_cmd; 1337 c->req.cqe = &c->rsp_pdu->cqe; 1338 1339 c->data_pdu = page_frag_alloc(&queue->pf_cache, 1340 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1341 if (!c->data_pdu) 1342 goto out_free_rsp; 1343 1344 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, 1345 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1346 if (!c->r2t_pdu) 1347 goto out_free_data; 1348 1349 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1350 1351 list_add_tail(&c->entry, &queue->free_list); 1352 1353 return 0; 1354 out_free_data: 1355 page_frag_free(c->data_pdu); 1356 out_free_rsp: 1357 page_frag_free(c->rsp_pdu); 1358 out_free_cmd: 1359 page_frag_free(c->cmd_pdu); 1360 return -ENOMEM; 1361 } 1362 1363 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) 1364 { 1365 page_frag_free(c->r2t_pdu); 1366 page_frag_free(c->data_pdu); 1367 page_frag_free(c->rsp_pdu); 1368 page_frag_free(c->cmd_pdu); 1369 } 1370 1371 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) 1372 { 1373 struct nvmet_tcp_cmd *cmds; 1374 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1375 1376 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); 1377 if (!cmds) 1378 goto out; 1379 1380 for (i = 0; i < nr_cmds; i++) { 1381 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); 1382 if (ret) 1383 goto out_free; 1384 } 1385 1386 queue->cmds = cmds; 1387 1388 return 0; 1389 out_free: 1390 while (--i >= 0) 1391 nvmet_tcp_free_cmd(cmds + i); 1392 kfree(cmds); 1393 out: 1394 return ret; 1395 } 1396 1397 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) 1398 { 1399 struct nvmet_tcp_cmd *cmds = queue->cmds; 1400 int i; 1401 1402 for (i = 0; i < queue->nr_cmds; i++) 1403 nvmet_tcp_free_cmd(cmds + i); 1404 1405 nvmet_tcp_free_cmd(&queue->connect); 1406 kfree(cmds); 1407 } 1408 1409 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) 1410 { 1411 struct socket *sock = queue->sock; 1412 1413 write_lock_bh(&sock->sk->sk_callback_lock); 1414 sock->sk->sk_data_ready = queue->data_ready; 1415 sock->sk->sk_state_change = queue->state_change; 1416 sock->sk->sk_write_space = queue->write_space; 1417 sock->sk->sk_user_data = NULL; 1418 write_unlock_bh(&sock->sk->sk_callback_lock); 1419 } 1420 1421 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) 1422 { 1423 nvmet_req_uninit(&cmd->req); 1424 nvmet_tcp_unmap_pdu_iovec(cmd); 1425 nvmet_tcp_free_cmd_buffers(cmd); 1426 } 1427 1428 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) 1429 { 1430 struct nvmet_tcp_cmd *cmd = queue->cmds; 1431 int i; 1432 1433 for (i = 0; i < queue->nr_cmds; i++, cmd++) { 1434 if (nvmet_tcp_need_data_in(cmd)) 1435 nvmet_req_uninit(&cmd->req); 1436 1437 nvmet_tcp_unmap_pdu_iovec(cmd); 1438 nvmet_tcp_free_cmd_buffers(cmd); 1439 } 1440 1441 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { 1442 /* failed in connect */ 1443 nvmet_tcp_finish_cmd(&queue->connect); 1444 } 1445 } 1446 1447 static void nvmet_tcp_release_queue_work(struct work_struct *w) 1448 { 1449 struct page *page; 1450 struct nvmet_tcp_queue *queue = 1451 container_of(w, struct nvmet_tcp_queue, release_work); 1452 1453 mutex_lock(&nvmet_tcp_queue_mutex); 1454 list_del_init(&queue->queue_list); 1455 mutex_unlock(&nvmet_tcp_queue_mutex); 1456 1457 nvmet_tcp_restore_socket_callbacks(queue); 1458 cancel_work_sync(&queue->io_work); 1459 /* stop accepting incoming data */ 1460 queue->rcv_state = NVMET_TCP_RECV_ERR; 1461 1462 nvmet_tcp_uninit_data_in_cmds(queue); 1463 nvmet_sq_destroy(&queue->nvme_sq); 1464 cancel_work_sync(&queue->io_work); 1465 sock_release(queue->sock); 1466 nvmet_tcp_free_cmds(queue); 1467 if (queue->hdr_digest || queue->data_digest) 1468 nvmet_tcp_free_crypto(queue); 1469 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); 1470 1471 page = virt_to_head_page(queue->pf_cache.va); 1472 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); 1473 kfree(queue); 1474 } 1475 1476 static void nvmet_tcp_data_ready(struct sock *sk) 1477 { 1478 struct nvmet_tcp_queue *queue; 1479 1480 read_lock_bh(&sk->sk_callback_lock); 1481 queue = sk->sk_user_data; 1482 if (likely(queue)) 1483 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1484 read_unlock_bh(&sk->sk_callback_lock); 1485 } 1486 1487 static void nvmet_tcp_write_space(struct sock *sk) 1488 { 1489 struct nvmet_tcp_queue *queue; 1490 1491 read_lock_bh(&sk->sk_callback_lock); 1492 queue = sk->sk_user_data; 1493 if (unlikely(!queue)) 1494 goto out; 1495 1496 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1497 queue->write_space(sk); 1498 goto out; 1499 } 1500 1501 if (sk_stream_is_writeable(sk)) { 1502 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1503 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1504 } 1505 out: 1506 read_unlock_bh(&sk->sk_callback_lock); 1507 } 1508 1509 static void nvmet_tcp_state_change(struct sock *sk) 1510 { 1511 struct nvmet_tcp_queue *queue; 1512 1513 read_lock_bh(&sk->sk_callback_lock); 1514 queue = sk->sk_user_data; 1515 if (!queue) 1516 goto done; 1517 1518 switch (sk->sk_state) { 1519 case TCP_FIN_WAIT1: 1520 case TCP_CLOSE_WAIT: 1521 case TCP_CLOSE: 1522 /* FALLTHRU */ 1523 nvmet_tcp_schedule_release_queue(queue); 1524 break; 1525 default: 1526 pr_warn("queue %d unhandled state %d\n", 1527 queue->idx, sk->sk_state); 1528 } 1529 done: 1530 read_unlock_bh(&sk->sk_callback_lock); 1531 } 1532 1533 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) 1534 { 1535 struct socket *sock = queue->sock; 1536 struct inet_sock *inet = inet_sk(sock->sk); 1537 int ret; 1538 1539 ret = kernel_getsockname(sock, 1540 (struct sockaddr *)&queue->sockaddr); 1541 if (ret < 0) 1542 return ret; 1543 1544 ret = kernel_getpeername(sock, 1545 (struct sockaddr *)&queue->sockaddr_peer); 1546 if (ret < 0) 1547 return ret; 1548 1549 /* 1550 * Cleanup whatever is sitting in the TCP transmit queue on socket 1551 * close. This is done to prevent stale data from being sent should 1552 * the network connection be restored before TCP times out. 1553 */ 1554 sock_no_linger(sock->sk); 1555 1556 if (so_priority > 0) 1557 sock_set_priority(sock->sk, so_priority); 1558 1559 /* Set socket type of service */ 1560 if (inet->rcv_tos > 0) 1561 ip_sock_set_tos(sock->sk, inet->rcv_tos); 1562 1563 ret = 0; 1564 write_lock_bh(&sock->sk->sk_callback_lock); 1565 if (sock->sk->sk_state != TCP_ESTABLISHED) { 1566 /* 1567 * If the socket is already closing, don't even start 1568 * consuming it 1569 */ 1570 ret = -ENOTCONN; 1571 } else { 1572 sock->sk->sk_user_data = queue; 1573 queue->data_ready = sock->sk->sk_data_ready; 1574 sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1575 queue->state_change = sock->sk->sk_state_change; 1576 sock->sk->sk_state_change = nvmet_tcp_state_change; 1577 queue->write_space = sock->sk->sk_write_space; 1578 sock->sk->sk_write_space = nvmet_tcp_write_space; 1579 if (idle_poll_period_usecs) 1580 nvmet_tcp_arm_queue_deadline(queue); 1581 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); 1582 } 1583 write_unlock_bh(&sock->sk->sk_callback_lock); 1584 1585 return ret; 1586 } 1587 1588 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, 1589 struct socket *newsock) 1590 { 1591 struct nvmet_tcp_queue *queue; 1592 int ret; 1593 1594 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 1595 if (!queue) 1596 return -ENOMEM; 1597 1598 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); 1599 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); 1600 queue->sock = newsock; 1601 queue->port = port; 1602 queue->nr_cmds = 0; 1603 spin_lock_init(&queue->state_lock); 1604 queue->state = NVMET_TCP_Q_CONNECTING; 1605 INIT_LIST_HEAD(&queue->free_list); 1606 init_llist_head(&queue->resp_list); 1607 INIT_LIST_HEAD(&queue->resp_send_list); 1608 1609 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL); 1610 if (queue->idx < 0) { 1611 ret = queue->idx; 1612 goto out_free_queue; 1613 } 1614 1615 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); 1616 if (ret) 1617 goto out_ida_remove; 1618 1619 ret = nvmet_sq_init(&queue->nvme_sq); 1620 if (ret) 1621 goto out_free_connect; 1622 1623 nvmet_prepare_receive_pdu(queue); 1624 1625 mutex_lock(&nvmet_tcp_queue_mutex); 1626 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); 1627 mutex_unlock(&nvmet_tcp_queue_mutex); 1628 1629 ret = nvmet_tcp_set_queue_sock(queue); 1630 if (ret) 1631 goto out_destroy_sq; 1632 1633 return 0; 1634 out_destroy_sq: 1635 mutex_lock(&nvmet_tcp_queue_mutex); 1636 list_del_init(&queue->queue_list); 1637 mutex_unlock(&nvmet_tcp_queue_mutex); 1638 nvmet_sq_destroy(&queue->nvme_sq); 1639 out_free_connect: 1640 nvmet_tcp_free_cmd(&queue->connect); 1641 out_ida_remove: 1642 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); 1643 out_free_queue: 1644 kfree(queue); 1645 return ret; 1646 } 1647 1648 static void nvmet_tcp_accept_work(struct work_struct *w) 1649 { 1650 struct nvmet_tcp_port *port = 1651 container_of(w, struct nvmet_tcp_port, accept_work); 1652 struct socket *newsock; 1653 int ret; 1654 1655 while (true) { 1656 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); 1657 if (ret < 0) { 1658 if (ret != -EAGAIN) 1659 pr_warn("failed to accept err=%d\n", ret); 1660 return; 1661 } 1662 ret = nvmet_tcp_alloc_queue(port, newsock); 1663 if (ret) { 1664 pr_err("failed to allocate queue\n"); 1665 sock_release(newsock); 1666 } 1667 } 1668 } 1669 1670 static void nvmet_tcp_listen_data_ready(struct sock *sk) 1671 { 1672 struct nvmet_tcp_port *port; 1673 1674 read_lock_bh(&sk->sk_callback_lock); 1675 port = sk->sk_user_data; 1676 if (!port) 1677 goto out; 1678 1679 if (sk->sk_state == TCP_LISTEN) 1680 schedule_work(&port->accept_work); 1681 out: 1682 read_unlock_bh(&sk->sk_callback_lock); 1683 } 1684 1685 static int nvmet_tcp_add_port(struct nvmet_port *nport) 1686 { 1687 struct nvmet_tcp_port *port; 1688 __kernel_sa_family_t af; 1689 int ret; 1690 1691 port = kzalloc(sizeof(*port), GFP_KERNEL); 1692 if (!port) 1693 return -ENOMEM; 1694 1695 switch (nport->disc_addr.adrfam) { 1696 case NVMF_ADDR_FAMILY_IP4: 1697 af = AF_INET; 1698 break; 1699 case NVMF_ADDR_FAMILY_IP6: 1700 af = AF_INET6; 1701 break; 1702 default: 1703 pr_err("address family %d not supported\n", 1704 nport->disc_addr.adrfam); 1705 ret = -EINVAL; 1706 goto err_port; 1707 } 1708 1709 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 1710 nport->disc_addr.trsvcid, &port->addr); 1711 if (ret) { 1712 pr_err("malformed ip/port passed: %s:%s\n", 1713 nport->disc_addr.traddr, nport->disc_addr.trsvcid); 1714 goto err_port; 1715 } 1716 1717 port->nport = nport; 1718 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); 1719 if (port->nport->inline_data_size < 0) 1720 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; 1721 1722 ret = sock_create(port->addr.ss_family, SOCK_STREAM, 1723 IPPROTO_TCP, &port->sock); 1724 if (ret) { 1725 pr_err("failed to create a socket\n"); 1726 goto err_port; 1727 } 1728 1729 port->sock->sk->sk_user_data = port; 1730 port->data_ready = port->sock->sk->sk_data_ready; 1731 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; 1732 sock_set_reuseaddr(port->sock->sk); 1733 tcp_sock_set_nodelay(port->sock->sk); 1734 if (so_priority > 0) 1735 sock_set_priority(port->sock->sk, so_priority); 1736 1737 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, 1738 sizeof(port->addr)); 1739 if (ret) { 1740 pr_err("failed to bind port socket %d\n", ret); 1741 goto err_sock; 1742 } 1743 1744 ret = kernel_listen(port->sock, 128); 1745 if (ret) { 1746 pr_err("failed to listen %d on port sock\n", ret); 1747 goto err_sock; 1748 } 1749 1750 nport->priv = port; 1751 pr_info("enabling port %d (%pISpc)\n", 1752 le16_to_cpu(nport->disc_addr.portid), &port->addr); 1753 1754 return 0; 1755 1756 err_sock: 1757 sock_release(port->sock); 1758 err_port: 1759 kfree(port); 1760 return ret; 1761 } 1762 1763 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port) 1764 { 1765 struct nvmet_tcp_queue *queue; 1766 1767 mutex_lock(&nvmet_tcp_queue_mutex); 1768 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1769 if (queue->port == port) 1770 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1771 mutex_unlock(&nvmet_tcp_queue_mutex); 1772 } 1773 1774 static void nvmet_tcp_remove_port(struct nvmet_port *nport) 1775 { 1776 struct nvmet_tcp_port *port = nport->priv; 1777 1778 write_lock_bh(&port->sock->sk->sk_callback_lock); 1779 port->sock->sk->sk_data_ready = port->data_ready; 1780 port->sock->sk->sk_user_data = NULL; 1781 write_unlock_bh(&port->sock->sk->sk_callback_lock); 1782 cancel_work_sync(&port->accept_work); 1783 /* 1784 * Destroy the remaining queues, which are not belong to any 1785 * controller yet. 1786 */ 1787 nvmet_tcp_destroy_port_queues(port); 1788 1789 sock_release(port->sock); 1790 kfree(port); 1791 } 1792 1793 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) 1794 { 1795 struct nvmet_tcp_queue *queue; 1796 1797 mutex_lock(&nvmet_tcp_queue_mutex); 1798 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1799 if (queue->nvme_sq.ctrl == ctrl) 1800 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1801 mutex_unlock(&nvmet_tcp_queue_mutex); 1802 } 1803 1804 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) 1805 { 1806 struct nvmet_tcp_queue *queue = 1807 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 1808 1809 if (sq->qid == 0) { 1810 /* Let inflight controller teardown complete */ 1811 flush_scheduled_work(); 1812 } 1813 1814 queue->nr_cmds = sq->size * 2; 1815 if (nvmet_tcp_alloc_cmds(queue)) 1816 return NVME_SC_INTERNAL; 1817 return 0; 1818 } 1819 1820 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, 1821 struct nvmet_port *nport, char *traddr) 1822 { 1823 struct nvmet_tcp_port *port = nport->priv; 1824 1825 if (inet_addr_is_any((struct sockaddr *)&port->addr)) { 1826 struct nvmet_tcp_cmd *cmd = 1827 container_of(req, struct nvmet_tcp_cmd, req); 1828 struct nvmet_tcp_queue *queue = cmd->queue; 1829 1830 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); 1831 } else { 1832 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 1833 } 1834 } 1835 1836 static const struct nvmet_fabrics_ops nvmet_tcp_ops = { 1837 .owner = THIS_MODULE, 1838 .type = NVMF_TRTYPE_TCP, 1839 .msdbd = 1, 1840 .add_port = nvmet_tcp_add_port, 1841 .remove_port = nvmet_tcp_remove_port, 1842 .queue_response = nvmet_tcp_queue_response, 1843 .delete_ctrl = nvmet_tcp_delete_ctrl, 1844 .install_queue = nvmet_tcp_install_queue, 1845 .disc_traddr = nvmet_tcp_disc_port_addr, 1846 }; 1847 1848 static int __init nvmet_tcp_init(void) 1849 { 1850 int ret; 1851 1852 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0); 1853 if (!nvmet_tcp_wq) 1854 return -ENOMEM; 1855 1856 ret = nvmet_register_transport(&nvmet_tcp_ops); 1857 if (ret) 1858 goto err; 1859 1860 return 0; 1861 err: 1862 destroy_workqueue(nvmet_tcp_wq); 1863 return ret; 1864 } 1865 1866 static void __exit nvmet_tcp_exit(void) 1867 { 1868 struct nvmet_tcp_queue *queue; 1869 1870 nvmet_unregister_transport(&nvmet_tcp_ops); 1871 1872 flush_scheduled_work(); 1873 mutex_lock(&nvmet_tcp_queue_mutex); 1874 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1875 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1876 mutex_unlock(&nvmet_tcp_queue_mutex); 1877 flush_scheduled_work(); 1878 1879 destroy_workqueue(nvmet_tcp_wq); 1880 } 1881 1882 module_init(nvmet_tcp_init); 1883 module_exit(nvmet_tcp_exit); 1884 1885 MODULE_LICENSE("GPL v2"); 1886 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ 1887