1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics TCP target. 4 * Copyright (c) 2018 Lightbits Labs. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/err.h> 11 #include <linux/nvme-tcp.h> 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <linux/inet.h> 15 #include <linux/llist.h> 16 #include <crypto/hash.h> 17 18 #include "nvmet.h" 19 20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) 21 22 /* Define the socket priority to use for connections were it is desirable 23 * that the NIC consider performing optimized packet processing or filtering. 24 * A non-zero value being sufficient to indicate general consideration of any 25 * possible optimization. Making it a module param allows for alternative 26 * values that may be unique for some NIC implementations. 27 */ 28 static int so_priority; 29 module_param(so_priority, int, 0644); 30 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority"); 31 32 #define NVMET_TCP_RECV_BUDGET 8 33 #define NVMET_TCP_SEND_BUDGET 8 34 #define NVMET_TCP_IO_WORK_BUDGET 64 35 36 enum nvmet_tcp_send_state { 37 NVMET_TCP_SEND_DATA_PDU, 38 NVMET_TCP_SEND_DATA, 39 NVMET_TCP_SEND_R2T, 40 NVMET_TCP_SEND_DDGST, 41 NVMET_TCP_SEND_RESPONSE 42 }; 43 44 enum nvmet_tcp_recv_state { 45 NVMET_TCP_RECV_PDU, 46 NVMET_TCP_RECV_DATA, 47 NVMET_TCP_RECV_DDGST, 48 NVMET_TCP_RECV_ERR, 49 }; 50 51 enum { 52 NVMET_TCP_F_INIT_FAILED = (1 << 0), 53 }; 54 55 struct nvmet_tcp_cmd { 56 struct nvmet_tcp_queue *queue; 57 struct nvmet_req req; 58 59 struct nvme_tcp_cmd_pdu *cmd_pdu; 60 struct nvme_tcp_rsp_pdu *rsp_pdu; 61 struct nvme_tcp_data_pdu *data_pdu; 62 struct nvme_tcp_r2t_pdu *r2t_pdu; 63 64 u32 rbytes_done; 65 u32 wbytes_done; 66 67 u32 pdu_len; 68 u32 pdu_recv; 69 int sg_idx; 70 int nr_mapped; 71 struct msghdr recv_msg; 72 struct kvec *iov; 73 u32 flags; 74 75 struct list_head entry; 76 struct llist_node lentry; 77 78 /* send state */ 79 u32 offset; 80 struct scatterlist *cur_sg; 81 enum nvmet_tcp_send_state state; 82 83 __le32 exp_ddgst; 84 __le32 recv_ddgst; 85 }; 86 87 enum nvmet_tcp_queue_state { 88 NVMET_TCP_Q_CONNECTING, 89 NVMET_TCP_Q_LIVE, 90 NVMET_TCP_Q_DISCONNECTING, 91 }; 92 93 struct nvmet_tcp_queue { 94 struct socket *sock; 95 struct nvmet_tcp_port *port; 96 struct work_struct io_work; 97 int cpu; 98 struct nvmet_cq nvme_cq; 99 struct nvmet_sq nvme_sq; 100 101 /* send state */ 102 struct nvmet_tcp_cmd *cmds; 103 unsigned int nr_cmds; 104 struct list_head free_list; 105 struct llist_head resp_list; 106 struct list_head resp_send_list; 107 int send_list_len; 108 struct nvmet_tcp_cmd *snd_cmd; 109 110 /* recv state */ 111 int offset; 112 int left; 113 enum nvmet_tcp_recv_state rcv_state; 114 struct nvmet_tcp_cmd *cmd; 115 union nvme_tcp_pdu pdu; 116 117 /* digest state */ 118 bool hdr_digest; 119 bool data_digest; 120 struct ahash_request *snd_hash; 121 struct ahash_request *rcv_hash; 122 123 spinlock_t state_lock; 124 enum nvmet_tcp_queue_state state; 125 126 struct sockaddr_storage sockaddr; 127 struct sockaddr_storage sockaddr_peer; 128 struct work_struct release_work; 129 130 int idx; 131 struct list_head queue_list; 132 133 struct nvmet_tcp_cmd connect; 134 135 struct page_frag_cache pf_cache; 136 137 void (*data_ready)(struct sock *); 138 void (*state_change)(struct sock *); 139 void (*write_space)(struct sock *); 140 }; 141 142 struct nvmet_tcp_port { 143 struct socket *sock; 144 struct work_struct accept_work; 145 struct nvmet_port *nport; 146 struct sockaddr_storage addr; 147 int last_cpu; 148 void (*data_ready)(struct sock *); 149 }; 150 151 static DEFINE_IDA(nvmet_tcp_queue_ida); 152 static LIST_HEAD(nvmet_tcp_queue_list); 153 static DEFINE_MUTEX(nvmet_tcp_queue_mutex); 154 155 static struct workqueue_struct *nvmet_tcp_wq; 156 static const struct nvmet_fabrics_ops nvmet_tcp_ops; 157 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c); 158 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); 159 160 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, 161 struct nvmet_tcp_cmd *cmd) 162 { 163 return cmd - queue->cmds; 164 } 165 166 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd) 167 { 168 return nvme_is_write(cmd->req.cmd) && 169 cmd->rbytes_done < cmd->req.transfer_len; 170 } 171 172 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd) 173 { 174 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status; 175 } 176 177 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd) 178 { 179 return !nvme_is_write(cmd->req.cmd) && 180 cmd->req.transfer_len > 0 && 181 !cmd->req.cqe->status; 182 } 183 184 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd) 185 { 186 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len && 187 !cmd->rbytes_done; 188 } 189 190 static inline struct nvmet_tcp_cmd * 191 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) 192 { 193 struct nvmet_tcp_cmd *cmd; 194 195 cmd = list_first_entry_or_null(&queue->free_list, 196 struct nvmet_tcp_cmd, entry); 197 if (!cmd) 198 return NULL; 199 list_del_init(&cmd->entry); 200 201 cmd->rbytes_done = cmd->wbytes_done = 0; 202 cmd->pdu_len = 0; 203 cmd->pdu_recv = 0; 204 cmd->iov = NULL; 205 cmd->flags = 0; 206 return cmd; 207 } 208 209 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) 210 { 211 if (unlikely(cmd == &cmd->queue->connect)) 212 return; 213 214 list_add_tail(&cmd->entry, &cmd->queue->free_list); 215 } 216 217 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) 218 { 219 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 220 } 221 222 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) 223 { 224 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 225 } 226 227 static inline void nvmet_tcp_hdgst(struct ahash_request *hash, 228 void *pdu, size_t len) 229 { 230 struct scatterlist sg; 231 232 sg_init_one(&sg, pdu, len); 233 ahash_request_set_crypt(hash, &sg, pdu + len, len); 234 crypto_ahash_digest(hash); 235 } 236 237 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, 238 void *pdu, size_t len) 239 { 240 struct nvme_tcp_hdr *hdr = pdu; 241 __le32 recv_digest; 242 __le32 exp_digest; 243 244 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 245 pr_err("queue %d: header digest enabled but no header digest\n", 246 queue->idx); 247 return -EPROTO; 248 } 249 250 recv_digest = *(__le32 *)(pdu + hdr->hlen); 251 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); 252 exp_digest = *(__le32 *)(pdu + hdr->hlen); 253 if (recv_digest != exp_digest) { 254 pr_err("queue %d: header digest error: recv %#x expected %#x\n", 255 queue->idx, le32_to_cpu(recv_digest), 256 le32_to_cpu(exp_digest)); 257 return -EPROTO; 258 } 259 260 return 0; 261 } 262 263 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu) 264 { 265 struct nvme_tcp_hdr *hdr = pdu; 266 u8 digest_len = nvmet_tcp_hdgst_len(queue); 267 u32 len; 268 269 len = le32_to_cpu(hdr->plen) - hdr->hlen - 270 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0); 271 272 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 273 pr_err("queue %d: data digest flag is cleared\n", queue->idx); 274 return -EPROTO; 275 } 276 277 return 0; 278 } 279 280 static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd) 281 { 282 struct scatterlist *sg; 283 int i; 284 285 sg = &cmd->req.sg[cmd->sg_idx]; 286 287 for (i = 0; i < cmd->nr_mapped; i++) 288 kunmap(sg_page(&sg[i])); 289 } 290 291 static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) 292 { 293 struct kvec *iov = cmd->iov; 294 struct scatterlist *sg; 295 u32 length, offset, sg_offset; 296 297 length = cmd->pdu_len; 298 cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); 299 offset = cmd->rbytes_done; 300 cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); 301 sg_offset = offset % PAGE_SIZE; 302 sg = &cmd->req.sg[cmd->sg_idx]; 303 304 while (length) { 305 u32 iov_len = min_t(u32, length, sg->length - sg_offset); 306 307 iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset; 308 iov->iov_len = iov_len; 309 310 length -= iov_len; 311 sg = sg_next(sg); 312 iov++; 313 } 314 315 iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, 316 cmd->nr_mapped, cmd->pdu_len); 317 } 318 319 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue) 320 { 321 queue->rcv_state = NVMET_TCP_RECV_ERR; 322 if (queue->nvme_sq.ctrl) 323 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); 324 else 325 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 326 } 327 328 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status) 329 { 330 if (status == -EPIPE || status == -ECONNRESET) 331 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 332 else 333 nvmet_tcp_fatal_error(queue); 334 } 335 336 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) 337 { 338 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; 339 u32 len = le32_to_cpu(sgl->length); 340 341 if (!len) 342 return 0; 343 344 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | 345 NVME_SGL_FMT_OFFSET)) { 346 if (!nvme_is_write(cmd->req.cmd)) 347 return NVME_SC_INVALID_FIELD | NVME_SC_DNR; 348 349 if (len > cmd->req.port->inline_data_size) 350 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; 351 cmd->pdu_len = len; 352 } 353 cmd->req.transfer_len += len; 354 355 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt); 356 if (!cmd->req.sg) 357 return NVME_SC_INTERNAL; 358 cmd->cur_sg = cmd->req.sg; 359 360 if (nvmet_tcp_has_data_in(cmd)) { 361 cmd->iov = kmalloc_array(cmd->req.sg_cnt, 362 sizeof(*cmd->iov), GFP_KERNEL); 363 if (!cmd->iov) 364 goto err; 365 } 366 367 return 0; 368 err: 369 sgl_free(cmd->req.sg); 370 return NVME_SC_INTERNAL; 371 } 372 373 static void nvmet_tcp_ddgst(struct ahash_request *hash, 374 struct nvmet_tcp_cmd *cmd) 375 { 376 ahash_request_set_crypt(hash, cmd->req.sg, 377 (void *)&cmd->exp_ddgst, cmd->req.transfer_len); 378 crypto_ahash_digest(hash); 379 } 380 381 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) 382 { 383 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu; 384 struct nvmet_tcp_queue *queue = cmd->queue; 385 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 386 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue); 387 388 cmd->offset = 0; 389 cmd->state = NVMET_TCP_SEND_DATA_PDU; 390 391 pdu->hdr.type = nvme_tcp_c2h_data; 392 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ? 393 NVME_TCP_F_DATA_SUCCESS : 0); 394 pdu->hdr.hlen = sizeof(*pdu); 395 pdu->hdr.pdo = pdu->hdr.hlen + hdgst; 396 pdu->hdr.plen = 397 cpu_to_le32(pdu->hdr.hlen + hdgst + 398 cmd->req.transfer_len + ddgst); 399 pdu->command_id = cmd->req.cqe->command_id; 400 pdu->data_length = cpu_to_le32(cmd->req.transfer_len); 401 pdu->data_offset = cpu_to_le32(cmd->wbytes_done); 402 403 if (queue->data_digest) { 404 pdu->hdr.flags |= NVME_TCP_F_DDGST; 405 nvmet_tcp_ddgst(queue->snd_hash, cmd); 406 } 407 408 if (cmd->queue->hdr_digest) { 409 pdu->hdr.flags |= NVME_TCP_F_HDGST; 410 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 411 } 412 } 413 414 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) 415 { 416 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; 417 struct nvmet_tcp_queue *queue = cmd->queue; 418 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 419 420 cmd->offset = 0; 421 cmd->state = NVMET_TCP_SEND_R2T; 422 423 pdu->hdr.type = nvme_tcp_r2t; 424 pdu->hdr.flags = 0; 425 pdu->hdr.hlen = sizeof(*pdu); 426 pdu->hdr.pdo = 0; 427 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 428 429 pdu->command_id = cmd->req.cmd->common.command_id; 430 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd); 431 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done); 432 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); 433 if (cmd->queue->hdr_digest) { 434 pdu->hdr.flags |= NVME_TCP_F_HDGST; 435 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 436 } 437 } 438 439 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) 440 { 441 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; 442 struct nvmet_tcp_queue *queue = cmd->queue; 443 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 444 445 cmd->offset = 0; 446 cmd->state = NVMET_TCP_SEND_RESPONSE; 447 448 pdu->hdr.type = nvme_tcp_rsp; 449 pdu->hdr.flags = 0; 450 pdu->hdr.hlen = sizeof(*pdu); 451 pdu->hdr.pdo = 0; 452 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 453 if (cmd->queue->hdr_digest) { 454 pdu->hdr.flags |= NVME_TCP_F_HDGST; 455 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 456 } 457 } 458 459 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue) 460 { 461 struct llist_node *node; 462 463 node = llist_del_all(&queue->resp_list); 464 if (!node) 465 return; 466 467 while (node) { 468 struct nvmet_tcp_cmd *cmd = llist_entry(node, 469 struct nvmet_tcp_cmd, lentry); 470 471 list_add(&cmd->entry, &queue->resp_send_list); 472 node = node->next; 473 queue->send_list_len++; 474 } 475 } 476 477 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue) 478 { 479 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list, 480 struct nvmet_tcp_cmd, entry); 481 if (!queue->snd_cmd) { 482 nvmet_tcp_process_resp_list(queue); 483 queue->snd_cmd = 484 list_first_entry_or_null(&queue->resp_send_list, 485 struct nvmet_tcp_cmd, entry); 486 if (unlikely(!queue->snd_cmd)) 487 return NULL; 488 } 489 490 list_del_init(&queue->snd_cmd->entry); 491 queue->send_list_len--; 492 493 if (nvmet_tcp_need_data_out(queue->snd_cmd)) 494 nvmet_setup_c2h_data_pdu(queue->snd_cmd); 495 else if (nvmet_tcp_need_data_in(queue->snd_cmd)) 496 nvmet_setup_r2t_pdu(queue->snd_cmd); 497 else 498 nvmet_setup_response_pdu(queue->snd_cmd); 499 500 return queue->snd_cmd; 501 } 502 503 static void nvmet_tcp_queue_response(struct nvmet_req *req) 504 { 505 struct nvmet_tcp_cmd *cmd = 506 container_of(req, struct nvmet_tcp_cmd, req); 507 struct nvmet_tcp_queue *queue = cmd->queue; 508 509 llist_add(&cmd->lentry, &queue->resp_list); 510 queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work); 511 } 512 513 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) 514 { 515 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 516 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst; 517 int ret; 518 519 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu), 520 offset_in_page(cmd->data_pdu) + cmd->offset, 521 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); 522 if (ret <= 0) 523 return ret; 524 525 cmd->offset += ret; 526 left -= ret; 527 528 if (left) 529 return -EAGAIN; 530 531 cmd->state = NVMET_TCP_SEND_DATA; 532 cmd->offset = 0; 533 return 1; 534 } 535 536 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 537 { 538 struct nvmet_tcp_queue *queue = cmd->queue; 539 int ret; 540 541 while (cmd->cur_sg) { 542 struct page *page = sg_page(cmd->cur_sg); 543 u32 left = cmd->cur_sg->length - cmd->offset; 544 int flags = MSG_DONTWAIT; 545 546 if ((!last_in_batch && cmd->queue->send_list_len) || 547 cmd->wbytes_done + left < cmd->req.transfer_len || 548 queue->data_digest || !queue->nvme_sq.sqhd_disabled) 549 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 550 551 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset, 552 left, flags); 553 if (ret <= 0) 554 return ret; 555 556 cmd->offset += ret; 557 cmd->wbytes_done += ret; 558 559 /* Done with sg?*/ 560 if (cmd->offset == cmd->cur_sg->length) { 561 cmd->cur_sg = sg_next(cmd->cur_sg); 562 cmd->offset = 0; 563 } 564 } 565 566 if (queue->data_digest) { 567 cmd->state = NVMET_TCP_SEND_DDGST; 568 cmd->offset = 0; 569 } else { 570 if (queue->nvme_sq.sqhd_disabled) { 571 cmd->queue->snd_cmd = NULL; 572 nvmet_tcp_put_cmd(cmd); 573 } else { 574 nvmet_setup_response_pdu(cmd); 575 } 576 } 577 578 if (queue->nvme_sq.sqhd_disabled) { 579 kfree(cmd->iov); 580 sgl_free(cmd->req.sg); 581 } 582 583 return 1; 584 585 } 586 587 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, 588 bool last_in_batch) 589 { 590 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 591 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst; 592 int flags = MSG_DONTWAIT; 593 int ret; 594 595 if (!last_in_batch && cmd->queue->send_list_len) 596 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 597 else 598 flags |= MSG_EOR; 599 600 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu), 601 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags); 602 if (ret <= 0) 603 return ret; 604 cmd->offset += ret; 605 left -= ret; 606 607 if (left) 608 return -EAGAIN; 609 610 kfree(cmd->iov); 611 sgl_free(cmd->req.sg); 612 cmd->queue->snd_cmd = NULL; 613 nvmet_tcp_put_cmd(cmd); 614 return 1; 615 } 616 617 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 618 { 619 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); 620 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst; 621 int flags = MSG_DONTWAIT; 622 int ret; 623 624 if (!last_in_batch && cmd->queue->send_list_len) 625 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 626 else 627 flags |= MSG_EOR; 628 629 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu), 630 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags); 631 if (ret <= 0) 632 return ret; 633 cmd->offset += ret; 634 left -= ret; 635 636 if (left) 637 return -EAGAIN; 638 639 cmd->queue->snd_cmd = NULL; 640 return 1; 641 } 642 643 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch) 644 { 645 struct nvmet_tcp_queue *queue = cmd->queue; 646 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 647 struct kvec iov = { 648 .iov_base = &cmd->exp_ddgst + cmd->offset, 649 .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset 650 }; 651 int ret; 652 653 if (!last_in_batch && cmd->queue->send_list_len) 654 msg.msg_flags |= MSG_MORE; 655 else 656 msg.msg_flags |= MSG_EOR; 657 658 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 659 if (unlikely(ret <= 0)) 660 return ret; 661 662 cmd->offset += ret; 663 664 if (queue->nvme_sq.sqhd_disabled) { 665 cmd->queue->snd_cmd = NULL; 666 nvmet_tcp_put_cmd(cmd); 667 } else { 668 nvmet_setup_response_pdu(cmd); 669 } 670 return 1; 671 } 672 673 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue, 674 bool last_in_batch) 675 { 676 struct nvmet_tcp_cmd *cmd = queue->snd_cmd; 677 int ret = 0; 678 679 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) { 680 cmd = nvmet_tcp_fetch_cmd(queue); 681 if (unlikely(!cmd)) 682 return 0; 683 } 684 685 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) { 686 ret = nvmet_try_send_data_pdu(cmd); 687 if (ret <= 0) 688 goto done_send; 689 } 690 691 if (cmd->state == NVMET_TCP_SEND_DATA) { 692 ret = nvmet_try_send_data(cmd, last_in_batch); 693 if (ret <= 0) 694 goto done_send; 695 } 696 697 if (cmd->state == NVMET_TCP_SEND_DDGST) { 698 ret = nvmet_try_send_ddgst(cmd, last_in_batch); 699 if (ret <= 0) 700 goto done_send; 701 } 702 703 if (cmd->state == NVMET_TCP_SEND_R2T) { 704 ret = nvmet_try_send_r2t(cmd, last_in_batch); 705 if (ret <= 0) 706 goto done_send; 707 } 708 709 if (cmd->state == NVMET_TCP_SEND_RESPONSE) 710 ret = nvmet_try_send_response(cmd, last_in_batch); 711 712 done_send: 713 if (ret < 0) { 714 if (ret == -EAGAIN) 715 return 0; 716 return ret; 717 } 718 719 return 1; 720 } 721 722 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue, 723 int budget, int *sends) 724 { 725 int i, ret = 0; 726 727 for (i = 0; i < budget; i++) { 728 ret = nvmet_tcp_try_send_one(queue, i == budget - 1); 729 if (unlikely(ret < 0)) { 730 nvmet_tcp_socket_error(queue, ret); 731 goto done; 732 } else if (ret == 0) { 733 break; 734 } 735 (*sends)++; 736 } 737 done: 738 return ret; 739 } 740 741 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) 742 { 743 queue->offset = 0; 744 queue->left = sizeof(struct nvme_tcp_hdr); 745 queue->cmd = NULL; 746 queue->rcv_state = NVMET_TCP_RECV_PDU; 747 } 748 749 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) 750 { 751 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); 752 753 ahash_request_free(queue->rcv_hash); 754 ahash_request_free(queue->snd_hash); 755 crypto_free_ahash(tfm); 756 } 757 758 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) 759 { 760 struct crypto_ahash *tfm; 761 762 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); 763 if (IS_ERR(tfm)) 764 return PTR_ERR(tfm); 765 766 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); 767 if (!queue->snd_hash) 768 goto free_tfm; 769 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); 770 771 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); 772 if (!queue->rcv_hash) 773 goto free_snd_hash; 774 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); 775 776 return 0; 777 free_snd_hash: 778 ahash_request_free(queue->snd_hash); 779 free_tfm: 780 crypto_free_ahash(tfm); 781 return -ENOMEM; 782 } 783 784 785 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) 786 { 787 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; 788 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp; 789 struct msghdr msg = {}; 790 struct kvec iov; 791 int ret; 792 793 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) { 794 pr_err("bad nvme-tcp pdu length (%d)\n", 795 le32_to_cpu(icreq->hdr.plen)); 796 nvmet_tcp_fatal_error(queue); 797 } 798 799 if (icreq->pfv != NVME_TCP_PFV_1_0) { 800 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv); 801 return -EPROTO; 802 } 803 804 if (icreq->hpda != 0) { 805 pr_err("queue %d: unsupported hpda %d\n", queue->idx, 806 icreq->hpda); 807 return -EPROTO; 808 } 809 810 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); 811 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); 812 if (queue->hdr_digest || queue->data_digest) { 813 ret = nvmet_tcp_alloc_crypto(queue); 814 if (ret) 815 return ret; 816 } 817 818 memset(icresp, 0, sizeof(*icresp)); 819 icresp->hdr.type = nvme_tcp_icresp; 820 icresp->hdr.hlen = sizeof(*icresp); 821 icresp->hdr.pdo = 0; 822 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); 823 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 824 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */ 825 icresp->cpda = 0; 826 if (queue->hdr_digest) 827 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 828 if (queue->data_digest) 829 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 830 831 iov.iov_base = icresp; 832 iov.iov_len = sizeof(*icresp); 833 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 834 if (ret < 0) 835 goto free_crypto; 836 837 queue->state = NVMET_TCP_Q_LIVE; 838 nvmet_prepare_receive_pdu(queue); 839 return 0; 840 free_crypto: 841 if (queue->hdr_digest || queue->data_digest) 842 nvmet_tcp_free_crypto(queue); 843 return ret; 844 } 845 846 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue, 847 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req) 848 { 849 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length); 850 int ret; 851 852 if (!nvme_is_write(cmd->req.cmd) || 853 data_len > cmd->req.port->inline_data_size) { 854 nvmet_prepare_receive_pdu(queue); 855 return; 856 } 857 858 ret = nvmet_tcp_map_data(cmd); 859 if (unlikely(ret)) { 860 pr_err("queue %d: failed to map data\n", queue->idx); 861 nvmet_tcp_fatal_error(queue); 862 return; 863 } 864 865 queue->rcv_state = NVMET_TCP_RECV_DATA; 866 nvmet_tcp_map_pdu_iovec(cmd); 867 cmd->flags |= NVMET_TCP_F_INIT_FAILED; 868 } 869 870 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) 871 { 872 struct nvme_tcp_data_pdu *data = &queue->pdu.data; 873 struct nvmet_tcp_cmd *cmd; 874 875 cmd = &queue->cmds[data->ttag]; 876 877 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { 878 pr_err("ttag %u unexpected data offset %u (expected %u)\n", 879 data->ttag, le32_to_cpu(data->data_offset), 880 cmd->rbytes_done); 881 /* FIXME: use path and transport errors */ 882 nvmet_req_complete(&cmd->req, 883 NVME_SC_INVALID_FIELD | NVME_SC_DNR); 884 return -EPROTO; 885 } 886 887 cmd->pdu_len = le32_to_cpu(data->data_length); 888 cmd->pdu_recv = 0; 889 nvmet_tcp_map_pdu_iovec(cmd); 890 queue->cmd = cmd; 891 queue->rcv_state = NVMET_TCP_RECV_DATA; 892 893 return 0; 894 } 895 896 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) 897 { 898 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 899 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd; 900 struct nvmet_req *req; 901 int ret; 902 903 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 904 if (hdr->type != nvme_tcp_icreq) { 905 pr_err("unexpected pdu type (%d) before icreq\n", 906 hdr->type); 907 nvmet_tcp_fatal_error(queue); 908 return -EPROTO; 909 } 910 return nvmet_tcp_handle_icreq(queue); 911 } 912 913 if (hdr->type == nvme_tcp_h2c_data) { 914 ret = nvmet_tcp_handle_h2c_data_pdu(queue); 915 if (unlikely(ret)) 916 return ret; 917 return 0; 918 } 919 920 queue->cmd = nvmet_tcp_get_cmd(queue); 921 if (unlikely(!queue->cmd)) { 922 /* This should never happen */ 923 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d", 924 queue->idx, queue->nr_cmds, queue->send_list_len, 925 nvme_cmd->common.opcode); 926 nvmet_tcp_fatal_error(queue); 927 return -ENOMEM; 928 } 929 930 req = &queue->cmd->req; 931 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); 932 933 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, 934 &queue->nvme_sq, &nvmet_tcp_ops))) { 935 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", 936 req->cmd, req->cmd->common.command_id, 937 req->cmd->common.opcode, 938 le32_to_cpu(req->cmd->common.dptr.sgl.length)); 939 940 nvmet_tcp_handle_req_failure(queue, queue->cmd, req); 941 return -EAGAIN; 942 } 943 944 ret = nvmet_tcp_map_data(queue->cmd); 945 if (unlikely(ret)) { 946 pr_err("queue %d: failed to map data\n", queue->idx); 947 if (nvmet_tcp_has_inline_data(queue->cmd)) 948 nvmet_tcp_fatal_error(queue); 949 else 950 nvmet_req_complete(req, ret); 951 ret = -EAGAIN; 952 goto out; 953 } 954 955 if (nvmet_tcp_need_data_in(queue->cmd)) { 956 if (nvmet_tcp_has_inline_data(queue->cmd)) { 957 queue->rcv_state = NVMET_TCP_RECV_DATA; 958 nvmet_tcp_map_pdu_iovec(queue->cmd); 959 return 0; 960 } 961 /* send back R2T */ 962 nvmet_tcp_queue_response(&queue->cmd->req); 963 goto out; 964 } 965 966 queue->cmd->req.execute(&queue->cmd->req); 967 out: 968 nvmet_prepare_receive_pdu(queue); 969 return ret; 970 } 971 972 static const u8 nvme_tcp_pdu_sizes[] = { 973 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu), 974 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu), 975 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu), 976 }; 977 978 static inline u8 nvmet_tcp_pdu_size(u8 type) 979 { 980 size_t idx = type; 981 982 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) && 983 nvme_tcp_pdu_sizes[idx]) ? 984 nvme_tcp_pdu_sizes[idx] : 0; 985 } 986 987 static inline bool nvmet_tcp_pdu_valid(u8 type) 988 { 989 switch (type) { 990 case nvme_tcp_icreq: 991 case nvme_tcp_cmd: 992 case nvme_tcp_h2c_data: 993 /* fallthru */ 994 return true; 995 } 996 997 return false; 998 } 999 1000 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue) 1001 { 1002 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr; 1003 int len; 1004 struct kvec iov; 1005 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1006 1007 recv: 1008 iov.iov_base = (void *)&queue->pdu + queue->offset; 1009 iov.iov_len = queue->left; 1010 len = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1011 iov.iov_len, msg.msg_flags); 1012 if (unlikely(len < 0)) 1013 return len; 1014 1015 queue->offset += len; 1016 queue->left -= len; 1017 if (queue->left) 1018 return -EAGAIN; 1019 1020 if (queue->offset == sizeof(struct nvme_tcp_hdr)) { 1021 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1022 1023 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) { 1024 pr_err("unexpected pdu type %d\n", hdr->type); 1025 nvmet_tcp_fatal_error(queue); 1026 return -EIO; 1027 } 1028 1029 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) { 1030 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen); 1031 return -EIO; 1032 } 1033 1034 queue->left = hdr->hlen - queue->offset + hdgst; 1035 goto recv; 1036 } 1037 1038 if (queue->hdr_digest && 1039 nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) { 1040 nvmet_tcp_fatal_error(queue); /* fatal */ 1041 return -EPROTO; 1042 } 1043 1044 if (queue->data_digest && 1045 nvmet_tcp_check_ddgst(queue, &queue->pdu)) { 1046 nvmet_tcp_fatal_error(queue); /* fatal */ 1047 return -EPROTO; 1048 } 1049 1050 return nvmet_tcp_done_recv_pdu(queue); 1051 } 1052 1053 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) 1054 { 1055 struct nvmet_tcp_queue *queue = cmd->queue; 1056 1057 nvmet_tcp_ddgst(queue->rcv_hash, cmd); 1058 queue->offset = 0; 1059 queue->left = NVME_TCP_DIGEST_LENGTH; 1060 queue->rcv_state = NVMET_TCP_RECV_DDGST; 1061 } 1062 1063 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue) 1064 { 1065 struct nvmet_tcp_cmd *cmd = queue->cmd; 1066 int ret; 1067 1068 while (msg_data_left(&cmd->recv_msg)) { 1069 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg, 1070 cmd->recv_msg.msg_flags); 1071 if (ret <= 0) 1072 return ret; 1073 1074 cmd->pdu_recv += ret; 1075 cmd->rbytes_done += ret; 1076 } 1077 1078 nvmet_tcp_unmap_pdu_iovec(cmd); 1079 1080 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && 1081 cmd->rbytes_done == cmd->req.transfer_len) { 1082 if (queue->data_digest) { 1083 nvmet_tcp_prep_recv_ddgst(cmd); 1084 return 0; 1085 } 1086 cmd->req.execute(&cmd->req); 1087 } 1088 1089 nvmet_prepare_receive_pdu(queue); 1090 return 0; 1091 } 1092 1093 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) 1094 { 1095 struct nvmet_tcp_cmd *cmd = queue->cmd; 1096 int ret; 1097 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1098 struct kvec iov = { 1099 .iov_base = (void *)&cmd->recv_ddgst + queue->offset, 1100 .iov_len = queue->left 1101 }; 1102 1103 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1104 iov.iov_len, msg.msg_flags); 1105 if (unlikely(ret < 0)) 1106 return ret; 1107 1108 queue->offset += ret; 1109 queue->left -= ret; 1110 if (queue->left) 1111 return -EAGAIN; 1112 1113 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) { 1114 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n", 1115 queue->idx, cmd->req.cmd->common.command_id, 1116 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst), 1117 le32_to_cpu(cmd->exp_ddgst)); 1118 nvmet_tcp_finish_cmd(cmd); 1119 nvmet_tcp_fatal_error(queue); 1120 ret = -EPROTO; 1121 goto out; 1122 } 1123 1124 if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) && 1125 cmd->rbytes_done == cmd->req.transfer_len) 1126 cmd->req.execute(&cmd->req); 1127 ret = 0; 1128 out: 1129 nvmet_prepare_receive_pdu(queue); 1130 return ret; 1131 } 1132 1133 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) 1134 { 1135 int result = 0; 1136 1137 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) 1138 return 0; 1139 1140 if (queue->rcv_state == NVMET_TCP_RECV_PDU) { 1141 result = nvmet_tcp_try_recv_pdu(queue); 1142 if (result != 0) 1143 goto done_recv; 1144 } 1145 1146 if (queue->rcv_state == NVMET_TCP_RECV_DATA) { 1147 result = nvmet_tcp_try_recv_data(queue); 1148 if (result != 0) 1149 goto done_recv; 1150 } 1151 1152 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) { 1153 result = nvmet_tcp_try_recv_ddgst(queue); 1154 if (result != 0) 1155 goto done_recv; 1156 } 1157 1158 done_recv: 1159 if (result < 0) { 1160 if (result == -EAGAIN) 1161 return 0; 1162 return result; 1163 } 1164 return 1; 1165 } 1166 1167 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue, 1168 int budget, int *recvs) 1169 { 1170 int i, ret = 0; 1171 1172 for (i = 0; i < budget; i++) { 1173 ret = nvmet_tcp_try_recv_one(queue); 1174 if (unlikely(ret < 0)) { 1175 nvmet_tcp_socket_error(queue, ret); 1176 goto done; 1177 } else if (ret == 0) { 1178 break; 1179 } 1180 (*recvs)++; 1181 } 1182 done: 1183 return ret; 1184 } 1185 1186 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue) 1187 { 1188 spin_lock(&queue->state_lock); 1189 if (queue->state != NVMET_TCP_Q_DISCONNECTING) { 1190 queue->state = NVMET_TCP_Q_DISCONNECTING; 1191 schedule_work(&queue->release_work); 1192 } 1193 spin_unlock(&queue->state_lock); 1194 } 1195 1196 static void nvmet_tcp_io_work(struct work_struct *w) 1197 { 1198 struct nvmet_tcp_queue *queue = 1199 container_of(w, struct nvmet_tcp_queue, io_work); 1200 bool pending; 1201 int ret, ops = 0; 1202 1203 do { 1204 pending = false; 1205 1206 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops); 1207 if (ret > 0) 1208 pending = true; 1209 else if (ret < 0) 1210 return; 1211 1212 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops); 1213 if (ret > 0) 1214 pending = true; 1215 else if (ret < 0) 1216 return; 1217 1218 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET); 1219 1220 /* 1221 * We exahusted our budget, requeue our selves 1222 */ 1223 if (pending) 1224 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); 1225 } 1226 1227 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, 1228 struct nvmet_tcp_cmd *c) 1229 { 1230 u8 hdgst = nvmet_tcp_hdgst_len(queue); 1231 1232 c->queue = queue; 1233 c->req.port = queue->port->nport; 1234 1235 c->cmd_pdu = page_frag_alloc(&queue->pf_cache, 1236 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1237 if (!c->cmd_pdu) 1238 return -ENOMEM; 1239 c->req.cmd = &c->cmd_pdu->cmd; 1240 1241 c->rsp_pdu = page_frag_alloc(&queue->pf_cache, 1242 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1243 if (!c->rsp_pdu) 1244 goto out_free_cmd; 1245 c->req.cqe = &c->rsp_pdu->cqe; 1246 1247 c->data_pdu = page_frag_alloc(&queue->pf_cache, 1248 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1249 if (!c->data_pdu) 1250 goto out_free_rsp; 1251 1252 c->r2t_pdu = page_frag_alloc(&queue->pf_cache, 1253 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO); 1254 if (!c->r2t_pdu) 1255 goto out_free_data; 1256 1257 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; 1258 1259 list_add_tail(&c->entry, &queue->free_list); 1260 1261 return 0; 1262 out_free_data: 1263 page_frag_free(c->data_pdu); 1264 out_free_rsp: 1265 page_frag_free(c->rsp_pdu); 1266 out_free_cmd: 1267 page_frag_free(c->cmd_pdu); 1268 return -ENOMEM; 1269 } 1270 1271 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c) 1272 { 1273 page_frag_free(c->r2t_pdu); 1274 page_frag_free(c->data_pdu); 1275 page_frag_free(c->rsp_pdu); 1276 page_frag_free(c->cmd_pdu); 1277 } 1278 1279 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) 1280 { 1281 struct nvmet_tcp_cmd *cmds; 1282 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; 1283 1284 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); 1285 if (!cmds) 1286 goto out; 1287 1288 for (i = 0; i < nr_cmds; i++) { 1289 ret = nvmet_tcp_alloc_cmd(queue, cmds + i); 1290 if (ret) 1291 goto out_free; 1292 } 1293 1294 queue->cmds = cmds; 1295 1296 return 0; 1297 out_free: 1298 while (--i >= 0) 1299 nvmet_tcp_free_cmd(cmds + i); 1300 kfree(cmds); 1301 out: 1302 return ret; 1303 } 1304 1305 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) 1306 { 1307 struct nvmet_tcp_cmd *cmds = queue->cmds; 1308 int i; 1309 1310 for (i = 0; i < queue->nr_cmds; i++) 1311 nvmet_tcp_free_cmd(cmds + i); 1312 1313 nvmet_tcp_free_cmd(&queue->connect); 1314 kfree(cmds); 1315 } 1316 1317 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) 1318 { 1319 struct socket *sock = queue->sock; 1320 1321 write_lock_bh(&sock->sk->sk_callback_lock); 1322 sock->sk->sk_data_ready = queue->data_ready; 1323 sock->sk->sk_state_change = queue->state_change; 1324 sock->sk->sk_write_space = queue->write_space; 1325 sock->sk->sk_user_data = NULL; 1326 write_unlock_bh(&sock->sk->sk_callback_lock); 1327 } 1328 1329 static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) 1330 { 1331 nvmet_req_uninit(&cmd->req); 1332 nvmet_tcp_unmap_pdu_iovec(cmd); 1333 kfree(cmd->iov); 1334 sgl_free(cmd->req.sg); 1335 } 1336 1337 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) 1338 { 1339 struct nvmet_tcp_cmd *cmd = queue->cmds; 1340 int i; 1341 1342 for (i = 0; i < queue->nr_cmds; i++, cmd++) { 1343 if (nvmet_tcp_need_data_in(cmd)) 1344 nvmet_tcp_finish_cmd(cmd); 1345 } 1346 1347 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) { 1348 /* failed in connect */ 1349 nvmet_tcp_finish_cmd(&queue->connect); 1350 } 1351 } 1352 1353 static void nvmet_tcp_release_queue_work(struct work_struct *w) 1354 { 1355 struct nvmet_tcp_queue *queue = 1356 container_of(w, struct nvmet_tcp_queue, release_work); 1357 1358 mutex_lock(&nvmet_tcp_queue_mutex); 1359 list_del_init(&queue->queue_list); 1360 mutex_unlock(&nvmet_tcp_queue_mutex); 1361 1362 nvmet_tcp_restore_socket_callbacks(queue); 1363 flush_work(&queue->io_work); 1364 1365 nvmet_tcp_uninit_data_in_cmds(queue); 1366 nvmet_sq_destroy(&queue->nvme_sq); 1367 cancel_work_sync(&queue->io_work); 1368 sock_release(queue->sock); 1369 nvmet_tcp_free_cmds(queue); 1370 if (queue->hdr_digest || queue->data_digest) 1371 nvmet_tcp_free_crypto(queue); 1372 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); 1373 1374 kfree(queue); 1375 } 1376 1377 static void nvmet_tcp_data_ready(struct sock *sk) 1378 { 1379 struct nvmet_tcp_queue *queue; 1380 1381 read_lock_bh(&sk->sk_callback_lock); 1382 queue = sk->sk_user_data; 1383 if (likely(queue)) 1384 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); 1385 read_unlock_bh(&sk->sk_callback_lock); 1386 } 1387 1388 static void nvmet_tcp_write_space(struct sock *sk) 1389 { 1390 struct nvmet_tcp_queue *queue; 1391 1392 read_lock_bh(&sk->sk_callback_lock); 1393 queue = sk->sk_user_data; 1394 if (unlikely(!queue)) 1395 goto out; 1396 1397 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) { 1398 queue->write_space(sk); 1399 goto out; 1400 } 1401 1402 if (sk_stream_is_writeable(sk)) { 1403 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1404 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); 1405 } 1406 out: 1407 read_unlock_bh(&sk->sk_callback_lock); 1408 } 1409 1410 static void nvmet_tcp_state_change(struct sock *sk) 1411 { 1412 struct nvmet_tcp_queue *queue; 1413 1414 write_lock_bh(&sk->sk_callback_lock); 1415 queue = sk->sk_user_data; 1416 if (!queue) 1417 goto done; 1418 1419 switch (sk->sk_state) { 1420 case TCP_FIN_WAIT1: 1421 case TCP_CLOSE_WAIT: 1422 case TCP_CLOSE: 1423 /* FALLTHRU */ 1424 sk->sk_user_data = NULL; 1425 nvmet_tcp_schedule_release_queue(queue); 1426 break; 1427 default: 1428 pr_warn("queue %d unhandled state %d\n", 1429 queue->idx, sk->sk_state); 1430 } 1431 done: 1432 write_unlock_bh(&sk->sk_callback_lock); 1433 } 1434 1435 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue) 1436 { 1437 struct socket *sock = queue->sock; 1438 struct inet_sock *inet = inet_sk(sock->sk); 1439 int ret; 1440 1441 ret = kernel_getsockname(sock, 1442 (struct sockaddr *)&queue->sockaddr); 1443 if (ret < 0) 1444 return ret; 1445 1446 ret = kernel_getpeername(sock, 1447 (struct sockaddr *)&queue->sockaddr_peer); 1448 if (ret < 0) 1449 return ret; 1450 1451 /* 1452 * Cleanup whatever is sitting in the TCP transmit queue on socket 1453 * close. This is done to prevent stale data from being sent should 1454 * the network connection be restored before TCP times out. 1455 */ 1456 sock_no_linger(sock->sk); 1457 1458 if (so_priority > 0) 1459 sock_set_priority(sock->sk, so_priority); 1460 1461 /* Set socket type of service */ 1462 if (inet->rcv_tos > 0) 1463 ip_sock_set_tos(sock->sk, inet->rcv_tos); 1464 1465 write_lock_bh(&sock->sk->sk_callback_lock); 1466 sock->sk->sk_user_data = queue; 1467 queue->data_ready = sock->sk->sk_data_ready; 1468 sock->sk->sk_data_ready = nvmet_tcp_data_ready; 1469 queue->state_change = sock->sk->sk_state_change; 1470 sock->sk->sk_state_change = nvmet_tcp_state_change; 1471 queue->write_space = sock->sk->sk_write_space; 1472 sock->sk->sk_write_space = nvmet_tcp_write_space; 1473 write_unlock_bh(&sock->sk->sk_callback_lock); 1474 1475 return 0; 1476 } 1477 1478 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, 1479 struct socket *newsock) 1480 { 1481 struct nvmet_tcp_queue *queue; 1482 int ret; 1483 1484 queue = kzalloc(sizeof(*queue), GFP_KERNEL); 1485 if (!queue) 1486 return -ENOMEM; 1487 1488 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work); 1489 INIT_WORK(&queue->io_work, nvmet_tcp_io_work); 1490 queue->sock = newsock; 1491 queue->port = port; 1492 queue->nr_cmds = 0; 1493 spin_lock_init(&queue->state_lock); 1494 queue->state = NVMET_TCP_Q_CONNECTING; 1495 INIT_LIST_HEAD(&queue->free_list); 1496 init_llist_head(&queue->resp_list); 1497 INIT_LIST_HEAD(&queue->resp_send_list); 1498 1499 queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL); 1500 if (queue->idx < 0) { 1501 ret = queue->idx; 1502 goto out_free_queue; 1503 } 1504 1505 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect); 1506 if (ret) 1507 goto out_ida_remove; 1508 1509 ret = nvmet_sq_init(&queue->nvme_sq); 1510 if (ret) 1511 goto out_free_connect; 1512 1513 port->last_cpu = cpumask_next_wrap(port->last_cpu, 1514 cpu_online_mask, -1, false); 1515 queue->cpu = port->last_cpu; 1516 nvmet_prepare_receive_pdu(queue); 1517 1518 mutex_lock(&nvmet_tcp_queue_mutex); 1519 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list); 1520 mutex_unlock(&nvmet_tcp_queue_mutex); 1521 1522 ret = nvmet_tcp_set_queue_sock(queue); 1523 if (ret) 1524 goto out_destroy_sq; 1525 1526 queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); 1527 1528 return 0; 1529 out_destroy_sq: 1530 mutex_lock(&nvmet_tcp_queue_mutex); 1531 list_del_init(&queue->queue_list); 1532 mutex_unlock(&nvmet_tcp_queue_mutex); 1533 nvmet_sq_destroy(&queue->nvme_sq); 1534 out_free_connect: 1535 nvmet_tcp_free_cmd(&queue->connect); 1536 out_ida_remove: 1537 ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx); 1538 out_free_queue: 1539 kfree(queue); 1540 return ret; 1541 } 1542 1543 static void nvmet_tcp_accept_work(struct work_struct *w) 1544 { 1545 struct nvmet_tcp_port *port = 1546 container_of(w, struct nvmet_tcp_port, accept_work); 1547 struct socket *newsock; 1548 int ret; 1549 1550 while (true) { 1551 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK); 1552 if (ret < 0) { 1553 if (ret != -EAGAIN) 1554 pr_warn("failed to accept err=%d\n", ret); 1555 return; 1556 } 1557 ret = nvmet_tcp_alloc_queue(port, newsock); 1558 if (ret) { 1559 pr_err("failed to allocate queue\n"); 1560 sock_release(newsock); 1561 } 1562 } 1563 } 1564 1565 static void nvmet_tcp_listen_data_ready(struct sock *sk) 1566 { 1567 struct nvmet_tcp_port *port; 1568 1569 read_lock_bh(&sk->sk_callback_lock); 1570 port = sk->sk_user_data; 1571 if (!port) 1572 goto out; 1573 1574 if (sk->sk_state == TCP_LISTEN) 1575 schedule_work(&port->accept_work); 1576 out: 1577 read_unlock_bh(&sk->sk_callback_lock); 1578 } 1579 1580 static int nvmet_tcp_add_port(struct nvmet_port *nport) 1581 { 1582 struct nvmet_tcp_port *port; 1583 __kernel_sa_family_t af; 1584 int ret; 1585 1586 port = kzalloc(sizeof(*port), GFP_KERNEL); 1587 if (!port) 1588 return -ENOMEM; 1589 1590 switch (nport->disc_addr.adrfam) { 1591 case NVMF_ADDR_FAMILY_IP4: 1592 af = AF_INET; 1593 break; 1594 case NVMF_ADDR_FAMILY_IP6: 1595 af = AF_INET6; 1596 break; 1597 default: 1598 pr_err("address family %d not supported\n", 1599 nport->disc_addr.adrfam); 1600 ret = -EINVAL; 1601 goto err_port; 1602 } 1603 1604 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, 1605 nport->disc_addr.trsvcid, &port->addr); 1606 if (ret) { 1607 pr_err("malformed ip/port passed: %s:%s\n", 1608 nport->disc_addr.traddr, nport->disc_addr.trsvcid); 1609 goto err_port; 1610 } 1611 1612 port->nport = nport; 1613 port->last_cpu = -1; 1614 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); 1615 if (port->nport->inline_data_size < 0) 1616 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; 1617 1618 ret = sock_create(port->addr.ss_family, SOCK_STREAM, 1619 IPPROTO_TCP, &port->sock); 1620 if (ret) { 1621 pr_err("failed to create a socket\n"); 1622 goto err_port; 1623 } 1624 1625 port->sock->sk->sk_user_data = port; 1626 port->data_ready = port->sock->sk->sk_data_ready; 1627 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready; 1628 sock_set_reuseaddr(port->sock->sk); 1629 tcp_sock_set_nodelay(port->sock->sk); 1630 if (so_priority > 0) 1631 sock_set_priority(port->sock->sk, so_priority); 1632 1633 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, 1634 sizeof(port->addr)); 1635 if (ret) { 1636 pr_err("failed to bind port socket %d\n", ret); 1637 goto err_sock; 1638 } 1639 1640 ret = kernel_listen(port->sock, 128); 1641 if (ret) { 1642 pr_err("failed to listen %d on port sock\n", ret); 1643 goto err_sock; 1644 } 1645 1646 nport->priv = port; 1647 pr_info("enabling port %d (%pISpc)\n", 1648 le16_to_cpu(nport->disc_addr.portid), &port->addr); 1649 1650 return 0; 1651 1652 err_sock: 1653 sock_release(port->sock); 1654 err_port: 1655 kfree(port); 1656 return ret; 1657 } 1658 1659 static void nvmet_tcp_remove_port(struct nvmet_port *nport) 1660 { 1661 struct nvmet_tcp_port *port = nport->priv; 1662 1663 write_lock_bh(&port->sock->sk->sk_callback_lock); 1664 port->sock->sk->sk_data_ready = port->data_ready; 1665 port->sock->sk->sk_user_data = NULL; 1666 write_unlock_bh(&port->sock->sk->sk_callback_lock); 1667 cancel_work_sync(&port->accept_work); 1668 1669 sock_release(port->sock); 1670 kfree(port); 1671 } 1672 1673 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl) 1674 { 1675 struct nvmet_tcp_queue *queue; 1676 1677 mutex_lock(&nvmet_tcp_queue_mutex); 1678 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1679 if (queue->nvme_sq.ctrl == ctrl) 1680 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1681 mutex_unlock(&nvmet_tcp_queue_mutex); 1682 } 1683 1684 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) 1685 { 1686 struct nvmet_tcp_queue *queue = 1687 container_of(sq, struct nvmet_tcp_queue, nvme_sq); 1688 1689 if (sq->qid == 0) { 1690 /* Let inflight controller teardown complete */ 1691 flush_scheduled_work(); 1692 } 1693 1694 queue->nr_cmds = sq->size * 2; 1695 if (nvmet_tcp_alloc_cmds(queue)) 1696 return NVME_SC_INTERNAL; 1697 return 0; 1698 } 1699 1700 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, 1701 struct nvmet_port *nport, char *traddr) 1702 { 1703 struct nvmet_tcp_port *port = nport->priv; 1704 1705 if (inet_addr_is_any((struct sockaddr *)&port->addr)) { 1706 struct nvmet_tcp_cmd *cmd = 1707 container_of(req, struct nvmet_tcp_cmd, req); 1708 struct nvmet_tcp_queue *queue = cmd->queue; 1709 1710 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr); 1711 } else { 1712 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); 1713 } 1714 } 1715 1716 static const struct nvmet_fabrics_ops nvmet_tcp_ops = { 1717 .owner = THIS_MODULE, 1718 .type = NVMF_TRTYPE_TCP, 1719 .msdbd = 1, 1720 .has_keyed_sgls = 0, 1721 .add_port = nvmet_tcp_add_port, 1722 .remove_port = nvmet_tcp_remove_port, 1723 .queue_response = nvmet_tcp_queue_response, 1724 .delete_ctrl = nvmet_tcp_delete_ctrl, 1725 .install_queue = nvmet_tcp_install_queue, 1726 .disc_traddr = nvmet_tcp_disc_port_addr, 1727 }; 1728 1729 static int __init nvmet_tcp_init(void) 1730 { 1731 int ret; 1732 1733 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0); 1734 if (!nvmet_tcp_wq) 1735 return -ENOMEM; 1736 1737 ret = nvmet_register_transport(&nvmet_tcp_ops); 1738 if (ret) 1739 goto err; 1740 1741 return 0; 1742 err: 1743 destroy_workqueue(nvmet_tcp_wq); 1744 return ret; 1745 } 1746 1747 static void __exit nvmet_tcp_exit(void) 1748 { 1749 struct nvmet_tcp_queue *queue; 1750 1751 nvmet_unregister_transport(&nvmet_tcp_ops); 1752 1753 flush_scheduled_work(); 1754 mutex_lock(&nvmet_tcp_queue_mutex); 1755 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list) 1756 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1757 mutex_unlock(&nvmet_tcp_queue_mutex); 1758 flush_scheduled_work(); 1759 1760 destroy_workqueue(nvmet_tcp_wq); 1761 } 1762 1763 module_init(nvmet_tcp_init); 1764 module_exit(nvmet_tcp_exit); 1765 1766 MODULE_LICENSE("GPL v2"); 1767 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */ 1768