1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics TCP host. 4 * Copyright (c) 2018 Lightbits Labs. All rights reserved. 5 */ 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 #include <linux/module.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/err.h> 11 #include <linux/nvme-tcp.h> 12 #include <net/sock.h> 13 #include <net/tcp.h> 14 #include <linux/blk-mq.h> 15 #include <crypto/hash.h> 16 #include <net/busy_poll.h> 17 18 #include "nvme.h" 19 #include "fabrics.h" 20 21 struct nvme_tcp_queue; 22 23 /* Define the socket priority to use for connections were it is desirable 24 * that the NIC consider performing optimized packet processing or filtering. 25 * A non-zero value being sufficient to indicate general consideration of any 26 * possible optimization. Making it a module param allows for alternative 27 * values that may be unique for some NIC implementations. 28 */ 29 static int so_priority; 30 module_param(so_priority, int, 0644); 31 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority"); 32 33 #ifdef CONFIG_DEBUG_LOCK_ALLOC 34 /* lockdep can detect a circular dependency of the form 35 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock 36 * because dependencies are tracked for both nvme-tcp and user contexts. Using 37 * a separate class prevents lockdep from conflating nvme-tcp socket use with 38 * user-space socket API use. 39 */ 40 static struct lock_class_key nvme_tcp_sk_key[2]; 41 static struct lock_class_key nvme_tcp_slock_key[2]; 42 43 static void nvme_tcp_reclassify_socket(struct socket *sock) 44 { 45 struct sock *sk = sock->sk; 46 47 if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) 48 return; 49 50 switch (sk->sk_family) { 51 case AF_INET: 52 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", 53 &nvme_tcp_slock_key[0], 54 "sk_lock-AF_INET-NVME", 55 &nvme_tcp_sk_key[0]); 56 break; 57 case AF_INET6: 58 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", 59 &nvme_tcp_slock_key[1], 60 "sk_lock-AF_INET6-NVME", 61 &nvme_tcp_sk_key[1]); 62 break; 63 default: 64 WARN_ON_ONCE(1); 65 } 66 } 67 #else 68 static void nvme_tcp_reclassify_socket(struct socket *sock) { } 69 #endif 70 71 enum nvme_tcp_send_state { 72 NVME_TCP_SEND_CMD_PDU = 0, 73 NVME_TCP_SEND_H2C_PDU, 74 NVME_TCP_SEND_DATA, 75 NVME_TCP_SEND_DDGST, 76 }; 77 78 struct nvme_tcp_request { 79 struct nvme_request req; 80 void *pdu; 81 struct nvme_tcp_queue *queue; 82 u32 data_len; 83 u32 pdu_len; 84 u32 pdu_sent; 85 u16 ttag; 86 __le16 status; 87 struct list_head entry; 88 struct llist_node lentry; 89 __le32 ddgst; 90 91 struct bio *curr_bio; 92 struct iov_iter iter; 93 94 /* send state */ 95 size_t offset; 96 size_t data_sent; 97 enum nvme_tcp_send_state state; 98 }; 99 100 enum nvme_tcp_queue_flags { 101 NVME_TCP_Q_ALLOCATED = 0, 102 NVME_TCP_Q_LIVE = 1, 103 NVME_TCP_Q_POLLING = 2, 104 }; 105 106 enum nvme_tcp_recv_state { 107 NVME_TCP_RECV_PDU = 0, 108 NVME_TCP_RECV_DATA, 109 NVME_TCP_RECV_DDGST, 110 }; 111 112 struct nvme_tcp_ctrl; 113 struct nvme_tcp_queue { 114 struct socket *sock; 115 struct work_struct io_work; 116 int io_cpu; 117 118 struct mutex queue_lock; 119 struct mutex send_mutex; 120 struct llist_head req_list; 121 struct list_head send_list; 122 bool more_requests; 123 124 /* recv state */ 125 void *pdu; 126 int pdu_remaining; 127 int pdu_offset; 128 size_t data_remaining; 129 size_t ddgst_remaining; 130 unsigned int nr_cqe; 131 132 /* send state */ 133 struct nvme_tcp_request *request; 134 135 int queue_size; 136 size_t cmnd_capsule_len; 137 struct nvme_tcp_ctrl *ctrl; 138 unsigned long flags; 139 bool rd_enabled; 140 141 bool hdr_digest; 142 bool data_digest; 143 struct ahash_request *rcv_hash; 144 struct ahash_request *snd_hash; 145 __le32 exp_ddgst; 146 __le32 recv_ddgst; 147 148 struct page_frag_cache pf_cache; 149 150 void (*state_change)(struct sock *); 151 void (*data_ready)(struct sock *); 152 void (*write_space)(struct sock *); 153 }; 154 155 struct nvme_tcp_ctrl { 156 /* read only in the hot path */ 157 struct nvme_tcp_queue *queues; 158 struct blk_mq_tag_set tag_set; 159 160 /* other member variables */ 161 struct list_head list; 162 struct blk_mq_tag_set admin_tag_set; 163 struct sockaddr_storage addr; 164 struct sockaddr_storage src_addr; 165 struct nvme_ctrl ctrl; 166 167 struct work_struct err_work; 168 struct delayed_work connect_work; 169 struct nvme_tcp_request async_req; 170 u32 io_queues[HCTX_MAX_TYPES]; 171 }; 172 173 static LIST_HEAD(nvme_tcp_ctrl_list); 174 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex); 175 static struct workqueue_struct *nvme_tcp_wq; 176 static const struct blk_mq_ops nvme_tcp_mq_ops; 177 static const struct blk_mq_ops nvme_tcp_admin_mq_ops; 178 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); 179 180 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) 181 { 182 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); 183 } 184 185 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) 186 { 187 return queue - queue->ctrl->queues; 188 } 189 190 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) 191 { 192 u32 queue_idx = nvme_tcp_queue_id(queue); 193 194 if (queue_idx == 0) 195 return queue->ctrl->admin_tag_set.tags[queue_idx]; 196 return queue->ctrl->tag_set.tags[queue_idx - 1]; 197 } 198 199 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue) 200 { 201 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; 202 } 203 204 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) 205 { 206 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; 207 } 208 209 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue) 210 { 211 return queue->cmnd_capsule_len - sizeof(struct nvme_command); 212 } 213 214 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req) 215 { 216 return req == &req->queue->ctrl->async_req; 217 } 218 219 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req) 220 { 221 struct request *rq; 222 223 if (unlikely(nvme_tcp_async_req(req))) 224 return false; /* async events don't have a request */ 225 226 rq = blk_mq_rq_from_pdu(req); 227 228 return rq_data_dir(rq) == WRITE && req->data_len && 229 req->data_len <= nvme_tcp_inline_data_size(req->queue); 230 } 231 232 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req) 233 { 234 return req->iter.bvec->bv_page; 235 } 236 237 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req) 238 { 239 return req->iter.bvec->bv_offset + req->iter.iov_offset; 240 } 241 242 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req) 243 { 244 return min_t(size_t, iov_iter_single_seg_count(&req->iter), 245 req->pdu_len - req->pdu_sent); 246 } 247 248 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req) 249 { 250 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ? 251 req->pdu_len - req->pdu_sent : 0; 252 } 253 254 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req, 255 int len) 256 { 257 return nvme_tcp_pdu_data_left(req) <= len; 258 } 259 260 static void nvme_tcp_init_iter(struct nvme_tcp_request *req, 261 unsigned int dir) 262 { 263 struct request *rq = blk_mq_rq_from_pdu(req); 264 struct bio_vec *vec; 265 unsigned int size; 266 int nr_bvec; 267 size_t offset; 268 269 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { 270 vec = &rq->special_vec; 271 nr_bvec = 1; 272 size = blk_rq_payload_bytes(rq); 273 offset = 0; 274 } else { 275 struct bio *bio = req->curr_bio; 276 struct bvec_iter bi; 277 struct bio_vec bv; 278 279 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); 280 nr_bvec = 0; 281 bio_for_each_bvec(bv, bio, bi) { 282 nr_bvec++; 283 } 284 size = bio->bi_iter.bi_size; 285 offset = bio->bi_iter.bi_bvec_done; 286 } 287 288 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); 289 req->iter.iov_offset = offset; 290 } 291 292 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req, 293 int len) 294 { 295 req->data_sent += len; 296 req->pdu_sent += len; 297 iov_iter_advance(&req->iter, len); 298 if (!iov_iter_count(&req->iter) && 299 req->data_sent < req->data_len) { 300 req->curr_bio = req->curr_bio->bi_next; 301 nvme_tcp_init_iter(req, WRITE); 302 } 303 } 304 305 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue) 306 { 307 int ret; 308 309 /* drain the send queue as much as we can... */ 310 do { 311 ret = nvme_tcp_try_send(queue); 312 } while (ret > 0); 313 } 314 315 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) 316 { 317 return !list_empty(&queue->send_list) || 318 !llist_empty(&queue->req_list) || queue->more_requests; 319 } 320 321 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, 322 bool sync, bool last) 323 { 324 struct nvme_tcp_queue *queue = req->queue; 325 bool empty; 326 327 empty = llist_add(&req->lentry, &queue->req_list) && 328 list_empty(&queue->send_list) && !queue->request; 329 330 /* 331 * if we're the first on the send_list and we can try to send 332 * directly, otherwise queue io_work. Also, only do that if we 333 * are on the same cpu, so we don't introduce contention. 334 */ 335 if (queue->io_cpu == raw_smp_processor_id() && 336 sync && empty && mutex_trylock(&queue->send_mutex)) { 337 queue->more_requests = !last; 338 nvme_tcp_send_all(queue); 339 queue->more_requests = false; 340 mutex_unlock(&queue->send_mutex); 341 } 342 343 if (last && nvme_tcp_queue_more(queue)) 344 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 345 } 346 347 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue) 348 { 349 struct nvme_tcp_request *req; 350 struct llist_node *node; 351 352 for (node = llist_del_all(&queue->req_list); node; node = node->next) { 353 req = llist_entry(node, struct nvme_tcp_request, lentry); 354 list_add(&req->entry, &queue->send_list); 355 } 356 } 357 358 static inline struct nvme_tcp_request * 359 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) 360 { 361 struct nvme_tcp_request *req; 362 363 req = list_first_entry_or_null(&queue->send_list, 364 struct nvme_tcp_request, entry); 365 if (!req) { 366 nvme_tcp_process_req_list(queue); 367 req = list_first_entry_or_null(&queue->send_list, 368 struct nvme_tcp_request, entry); 369 if (unlikely(!req)) 370 return NULL; 371 } 372 373 list_del(&req->entry); 374 return req; 375 } 376 377 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash, 378 __le32 *dgst) 379 { 380 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0); 381 crypto_ahash_final(hash); 382 } 383 384 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash, 385 struct page *page, off_t off, size_t len) 386 { 387 struct scatterlist sg; 388 389 sg_init_marker(&sg, 1); 390 sg_set_page(&sg, page, len, off); 391 ahash_request_set_crypt(hash, &sg, NULL, len); 392 crypto_ahash_update(hash); 393 } 394 395 static inline void nvme_tcp_hdgst(struct ahash_request *hash, 396 void *pdu, size_t len) 397 { 398 struct scatterlist sg; 399 400 sg_init_one(&sg, pdu, len); 401 ahash_request_set_crypt(hash, &sg, pdu + len, len); 402 crypto_ahash_digest(hash); 403 } 404 405 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, 406 void *pdu, size_t pdu_len) 407 { 408 struct nvme_tcp_hdr *hdr = pdu; 409 __le32 recv_digest; 410 __le32 exp_digest; 411 412 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { 413 dev_err(queue->ctrl->ctrl.device, 414 "queue %d: header digest flag is cleared\n", 415 nvme_tcp_queue_id(queue)); 416 return -EPROTO; 417 } 418 419 recv_digest = *(__le32 *)(pdu + hdr->hlen); 420 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); 421 exp_digest = *(__le32 *)(pdu + hdr->hlen); 422 if (recv_digest != exp_digest) { 423 dev_err(queue->ctrl->ctrl.device, 424 "header digest error: recv %#x expected %#x\n", 425 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest)); 426 return -EIO; 427 } 428 429 return 0; 430 } 431 432 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) 433 { 434 struct nvme_tcp_hdr *hdr = pdu; 435 u8 digest_len = nvme_tcp_hdgst_len(queue); 436 u32 len; 437 438 len = le32_to_cpu(hdr->plen) - hdr->hlen - 439 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); 440 441 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { 442 dev_err(queue->ctrl->ctrl.device, 443 "queue %d: data digest flag is cleared\n", 444 nvme_tcp_queue_id(queue)); 445 return -EPROTO; 446 } 447 crypto_ahash_init(queue->rcv_hash); 448 449 return 0; 450 } 451 452 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set, 453 struct request *rq, unsigned int hctx_idx) 454 { 455 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 456 457 page_frag_free(req->pdu); 458 } 459 460 static int nvme_tcp_init_request(struct blk_mq_tag_set *set, 461 struct request *rq, unsigned int hctx_idx, 462 unsigned int numa_node) 463 { 464 struct nvme_tcp_ctrl *ctrl = set->driver_data; 465 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 466 struct nvme_tcp_cmd_pdu *pdu; 467 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; 468 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; 469 u8 hdgst = nvme_tcp_hdgst_len(queue); 470 471 req->pdu = page_frag_alloc(&queue->pf_cache, 472 sizeof(struct nvme_tcp_cmd_pdu) + hdgst, 473 GFP_KERNEL | __GFP_ZERO); 474 if (!req->pdu) 475 return -ENOMEM; 476 477 pdu = req->pdu; 478 req->queue = queue; 479 nvme_req(rq)->ctrl = &ctrl->ctrl; 480 nvme_req(rq)->cmd = &pdu->cmd; 481 482 return 0; 483 } 484 485 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 486 unsigned int hctx_idx) 487 { 488 struct nvme_tcp_ctrl *ctrl = data; 489 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; 490 491 hctx->driver_data = queue; 492 return 0; 493 } 494 495 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data, 496 unsigned int hctx_idx) 497 { 498 struct nvme_tcp_ctrl *ctrl = data; 499 struct nvme_tcp_queue *queue = &ctrl->queues[0]; 500 501 hctx->driver_data = queue; 502 return 0; 503 } 504 505 static enum nvme_tcp_recv_state 506 nvme_tcp_recv_state(struct nvme_tcp_queue *queue) 507 { 508 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : 509 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : 510 NVME_TCP_RECV_DATA; 511 } 512 513 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue) 514 { 515 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + 516 nvme_tcp_hdgst_len(queue); 517 queue->pdu_offset = 0; 518 queue->data_remaining = -1; 519 queue->ddgst_remaining = 0; 520 } 521 522 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) 523 { 524 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) 525 return; 526 527 dev_warn(ctrl->device, "starting error recovery\n"); 528 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); 529 } 530 531 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue, 532 struct nvme_completion *cqe) 533 { 534 struct nvme_tcp_request *req; 535 struct request *rq; 536 537 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); 538 if (!rq) { 539 dev_err(queue->ctrl->ctrl.device, 540 "got bad cqe.command_id %#x on queue %d\n", 541 cqe->command_id, nvme_tcp_queue_id(queue)); 542 nvme_tcp_error_recovery(&queue->ctrl->ctrl); 543 return -EINVAL; 544 } 545 546 req = blk_mq_rq_to_pdu(rq); 547 if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) 548 req->status = cqe->status; 549 550 if (!nvme_try_complete_req(rq, req->status, cqe->result)) 551 nvme_complete_rq(rq); 552 queue->nr_cqe++; 553 554 return 0; 555 } 556 557 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue, 558 struct nvme_tcp_data_pdu *pdu) 559 { 560 struct request *rq; 561 562 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); 563 if (!rq) { 564 dev_err(queue->ctrl->ctrl.device, 565 "got bad c2hdata.command_id %#x on queue %d\n", 566 pdu->command_id, nvme_tcp_queue_id(queue)); 567 return -ENOENT; 568 } 569 570 if (!blk_rq_payload_bytes(rq)) { 571 dev_err(queue->ctrl->ctrl.device, 572 "queue %d tag %#x unexpected data\n", 573 nvme_tcp_queue_id(queue), rq->tag); 574 return -EIO; 575 } 576 577 queue->data_remaining = le32_to_cpu(pdu->data_length); 578 579 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && 580 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { 581 dev_err(queue->ctrl->ctrl.device, 582 "queue %d tag %#x SUCCESS set but not last PDU\n", 583 nvme_tcp_queue_id(queue), rq->tag); 584 nvme_tcp_error_recovery(&queue->ctrl->ctrl); 585 return -EPROTO; 586 } 587 588 return 0; 589 } 590 591 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, 592 struct nvme_tcp_rsp_pdu *pdu) 593 { 594 struct nvme_completion *cqe = &pdu->cqe; 595 int ret = 0; 596 597 /* 598 * AEN requests are special as they don't time out and can 599 * survive any kind of queue freeze and often don't respond to 600 * aborts. We don't even bother to allocate a struct request 601 * for them but rather special case them here. 602 */ 603 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), 604 cqe->command_id))) 605 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, 606 &cqe->result); 607 else 608 ret = nvme_tcp_process_nvme_cqe(queue, cqe); 609 610 return ret; 611 } 612 613 static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req, 614 struct nvme_tcp_r2t_pdu *pdu) 615 { 616 struct nvme_tcp_data_pdu *data = req->pdu; 617 struct nvme_tcp_queue *queue = req->queue; 618 struct request *rq = blk_mq_rq_from_pdu(req); 619 u8 hdgst = nvme_tcp_hdgst_len(queue); 620 u8 ddgst = nvme_tcp_ddgst_len(queue); 621 622 req->pdu_len = le32_to_cpu(pdu->r2t_length); 623 req->pdu_sent = 0; 624 625 if (unlikely(!req->pdu_len)) { 626 dev_err(queue->ctrl->ctrl.device, 627 "req %d r2t len is %u, probably a bug...\n", 628 rq->tag, req->pdu_len); 629 return -EPROTO; 630 } 631 632 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { 633 dev_err(queue->ctrl->ctrl.device, 634 "req %d r2t len %u exceeded data len %u (%zu sent)\n", 635 rq->tag, req->pdu_len, req->data_len, 636 req->data_sent); 637 return -EPROTO; 638 } 639 640 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { 641 dev_err(queue->ctrl->ctrl.device, 642 "req %d unexpected r2t offset %u (expected %zu)\n", 643 rq->tag, le32_to_cpu(pdu->r2t_offset), 644 req->data_sent); 645 return -EPROTO; 646 } 647 648 memset(data, 0, sizeof(*data)); 649 data->hdr.type = nvme_tcp_h2c_data; 650 data->hdr.flags = NVME_TCP_F_DATA_LAST; 651 if (queue->hdr_digest) 652 data->hdr.flags |= NVME_TCP_F_HDGST; 653 if (queue->data_digest) 654 data->hdr.flags |= NVME_TCP_F_DDGST; 655 data->hdr.hlen = sizeof(*data); 656 data->hdr.pdo = data->hdr.hlen + hdgst; 657 data->hdr.plen = 658 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); 659 data->ttag = pdu->ttag; 660 data->command_id = nvme_cid(rq); 661 data->data_offset = pdu->r2t_offset; 662 data->data_length = cpu_to_le32(req->pdu_len); 663 return 0; 664 } 665 666 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, 667 struct nvme_tcp_r2t_pdu *pdu) 668 { 669 struct nvme_tcp_request *req; 670 struct request *rq; 671 int ret; 672 673 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); 674 if (!rq) { 675 dev_err(queue->ctrl->ctrl.device, 676 "got bad r2t.command_id %#x on queue %d\n", 677 pdu->command_id, nvme_tcp_queue_id(queue)); 678 return -ENOENT; 679 } 680 req = blk_mq_rq_to_pdu(rq); 681 682 ret = nvme_tcp_setup_h2c_data_pdu(req, pdu); 683 if (unlikely(ret)) 684 return ret; 685 686 req->state = NVME_TCP_SEND_H2C_PDU; 687 req->offset = 0; 688 689 nvme_tcp_queue_request(req, false, true); 690 691 return 0; 692 } 693 694 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, 695 unsigned int *offset, size_t *len) 696 { 697 struct nvme_tcp_hdr *hdr; 698 char *pdu = queue->pdu; 699 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); 700 int ret; 701 702 ret = skb_copy_bits(skb, *offset, 703 &pdu[queue->pdu_offset], rcv_len); 704 if (unlikely(ret)) 705 return ret; 706 707 queue->pdu_remaining -= rcv_len; 708 queue->pdu_offset += rcv_len; 709 *offset += rcv_len; 710 *len -= rcv_len; 711 if (queue->pdu_remaining) 712 return 0; 713 714 hdr = queue->pdu; 715 if (queue->hdr_digest) { 716 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); 717 if (unlikely(ret)) 718 return ret; 719 } 720 721 722 if (queue->data_digest) { 723 ret = nvme_tcp_check_ddgst(queue, queue->pdu); 724 if (unlikely(ret)) 725 return ret; 726 } 727 728 switch (hdr->type) { 729 case nvme_tcp_c2h_data: 730 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); 731 case nvme_tcp_rsp: 732 nvme_tcp_init_recv_ctx(queue); 733 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); 734 case nvme_tcp_r2t: 735 nvme_tcp_init_recv_ctx(queue); 736 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); 737 default: 738 dev_err(queue->ctrl->ctrl.device, 739 "unsupported pdu type (%d)\n", hdr->type); 740 return -EINVAL; 741 } 742 } 743 744 static inline void nvme_tcp_end_request(struct request *rq, u16 status) 745 { 746 union nvme_result res = {}; 747 748 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res)) 749 nvme_complete_rq(rq); 750 } 751 752 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, 753 unsigned int *offset, size_t *len) 754 { 755 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; 756 struct request *rq = 757 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); 758 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 759 760 while (true) { 761 int recv_len, ret; 762 763 recv_len = min_t(size_t, *len, queue->data_remaining); 764 if (!recv_len) 765 break; 766 767 if (!iov_iter_count(&req->iter)) { 768 req->curr_bio = req->curr_bio->bi_next; 769 770 /* 771 * If we don`t have any bios it means that controller 772 * sent more data than we requested, hence error 773 */ 774 if (!req->curr_bio) { 775 dev_err(queue->ctrl->ctrl.device, 776 "queue %d no space in request %#x", 777 nvme_tcp_queue_id(queue), rq->tag); 778 nvme_tcp_init_recv_ctx(queue); 779 return -EIO; 780 } 781 nvme_tcp_init_iter(req, READ); 782 } 783 784 /* we can read only from what is left in this bio */ 785 recv_len = min_t(size_t, recv_len, 786 iov_iter_count(&req->iter)); 787 788 if (queue->data_digest) 789 ret = skb_copy_and_hash_datagram_iter(skb, *offset, 790 &req->iter, recv_len, queue->rcv_hash); 791 else 792 ret = skb_copy_datagram_iter(skb, *offset, 793 &req->iter, recv_len); 794 if (ret) { 795 dev_err(queue->ctrl->ctrl.device, 796 "queue %d failed to copy request %#x data", 797 nvme_tcp_queue_id(queue), rq->tag); 798 return ret; 799 } 800 801 *len -= recv_len; 802 *offset += recv_len; 803 queue->data_remaining -= recv_len; 804 } 805 806 if (!queue->data_remaining) { 807 if (queue->data_digest) { 808 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); 809 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; 810 } else { 811 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { 812 nvme_tcp_end_request(rq, 813 le16_to_cpu(req->status)); 814 queue->nr_cqe++; 815 } 816 nvme_tcp_init_recv_ctx(queue); 817 } 818 } 819 820 return 0; 821 } 822 823 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, 824 struct sk_buff *skb, unsigned int *offset, size_t *len) 825 { 826 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; 827 char *ddgst = (char *)&queue->recv_ddgst; 828 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); 829 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; 830 int ret; 831 832 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len); 833 if (unlikely(ret)) 834 return ret; 835 836 queue->ddgst_remaining -= recv_len; 837 *offset += recv_len; 838 *len -= recv_len; 839 if (queue->ddgst_remaining) 840 return 0; 841 842 if (queue->recv_ddgst != queue->exp_ddgst) { 843 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), 844 pdu->command_id); 845 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 846 847 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR); 848 849 dev_err(queue->ctrl->ctrl.device, 850 "data digest error: recv %#x expected %#x\n", 851 le32_to_cpu(queue->recv_ddgst), 852 le32_to_cpu(queue->exp_ddgst)); 853 } 854 855 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { 856 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue), 857 pdu->command_id); 858 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 859 860 nvme_tcp_end_request(rq, le16_to_cpu(req->status)); 861 queue->nr_cqe++; 862 } 863 864 nvme_tcp_init_recv_ctx(queue); 865 return 0; 866 } 867 868 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb, 869 unsigned int offset, size_t len) 870 { 871 struct nvme_tcp_queue *queue = desc->arg.data; 872 size_t consumed = len; 873 int result; 874 875 while (len) { 876 switch (nvme_tcp_recv_state(queue)) { 877 case NVME_TCP_RECV_PDU: 878 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); 879 break; 880 case NVME_TCP_RECV_DATA: 881 result = nvme_tcp_recv_data(queue, skb, &offset, &len); 882 break; 883 case NVME_TCP_RECV_DDGST: 884 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); 885 break; 886 default: 887 result = -EFAULT; 888 } 889 if (result) { 890 dev_err(queue->ctrl->ctrl.device, 891 "receive failed: %d\n", result); 892 queue->rd_enabled = false; 893 nvme_tcp_error_recovery(&queue->ctrl->ctrl); 894 return result; 895 } 896 } 897 898 return consumed; 899 } 900 901 static void nvme_tcp_data_ready(struct sock *sk) 902 { 903 struct nvme_tcp_queue *queue; 904 905 read_lock_bh(&sk->sk_callback_lock); 906 queue = sk->sk_user_data; 907 if (likely(queue && queue->rd_enabled) && 908 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) 909 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 910 read_unlock_bh(&sk->sk_callback_lock); 911 } 912 913 static void nvme_tcp_write_space(struct sock *sk) 914 { 915 struct nvme_tcp_queue *queue; 916 917 read_lock_bh(&sk->sk_callback_lock); 918 queue = sk->sk_user_data; 919 if (likely(queue && sk_stream_is_writeable(sk))) { 920 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 921 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 922 } 923 read_unlock_bh(&sk->sk_callback_lock); 924 } 925 926 static void nvme_tcp_state_change(struct sock *sk) 927 { 928 struct nvme_tcp_queue *queue; 929 930 read_lock_bh(&sk->sk_callback_lock); 931 queue = sk->sk_user_data; 932 if (!queue) 933 goto done; 934 935 switch (sk->sk_state) { 936 case TCP_CLOSE: 937 case TCP_CLOSE_WAIT: 938 case TCP_LAST_ACK: 939 case TCP_FIN_WAIT1: 940 case TCP_FIN_WAIT2: 941 nvme_tcp_error_recovery(&queue->ctrl->ctrl); 942 break; 943 default: 944 dev_info(queue->ctrl->ctrl.device, 945 "queue %d socket state %d\n", 946 nvme_tcp_queue_id(queue), sk->sk_state); 947 } 948 949 queue->state_change(sk); 950 done: 951 read_unlock_bh(&sk->sk_callback_lock); 952 } 953 954 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) 955 { 956 queue->request = NULL; 957 } 958 959 static void nvme_tcp_fail_request(struct nvme_tcp_request *req) 960 { 961 if (nvme_tcp_async_req(req)) { 962 union nvme_result res = {}; 963 964 nvme_complete_async_event(&req->queue->ctrl->ctrl, 965 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res); 966 } else { 967 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), 968 NVME_SC_HOST_PATH_ERROR); 969 } 970 } 971 972 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) 973 { 974 struct nvme_tcp_queue *queue = req->queue; 975 int req_data_len = req->data_len; 976 977 while (true) { 978 struct page *page = nvme_tcp_req_cur_page(req); 979 size_t offset = nvme_tcp_req_cur_offset(req); 980 size_t len = nvme_tcp_req_cur_length(req); 981 bool last = nvme_tcp_pdu_last_send(req, len); 982 int req_data_sent = req->data_sent; 983 int ret, flags = MSG_DONTWAIT; 984 985 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) 986 flags |= MSG_EOR; 987 else 988 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 989 990 if (sendpage_ok(page)) { 991 ret = kernel_sendpage(queue->sock, page, offset, len, 992 flags); 993 } else { 994 ret = sock_no_sendpage(queue->sock, page, offset, len, 995 flags); 996 } 997 if (ret <= 0) 998 return ret; 999 1000 if (queue->data_digest) 1001 nvme_tcp_ddgst_update(queue->snd_hash, page, 1002 offset, ret); 1003 1004 /* 1005 * update the request iterator except for the last payload send 1006 * in the request where we don't want to modify it as we may 1007 * compete with the RX path completing the request. 1008 */ 1009 if (req_data_sent + ret < req_data_len) 1010 nvme_tcp_advance_req(req, ret); 1011 1012 /* fully successful last send in current PDU */ 1013 if (last && ret == len) { 1014 if (queue->data_digest) { 1015 nvme_tcp_ddgst_final(queue->snd_hash, 1016 &req->ddgst); 1017 req->state = NVME_TCP_SEND_DDGST; 1018 req->offset = 0; 1019 } else { 1020 nvme_tcp_done_send_req(queue); 1021 } 1022 return 1; 1023 } 1024 } 1025 return -EAGAIN; 1026 } 1027 1028 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) 1029 { 1030 struct nvme_tcp_queue *queue = req->queue; 1031 struct nvme_tcp_cmd_pdu *pdu = req->pdu; 1032 bool inline_data = nvme_tcp_has_inline_data(req); 1033 u8 hdgst = nvme_tcp_hdgst_len(queue); 1034 int len = sizeof(*pdu) + hdgst - req->offset; 1035 int flags = MSG_DONTWAIT; 1036 int ret; 1037 1038 if (inline_data || nvme_tcp_queue_more(queue)) 1039 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST; 1040 else 1041 flags |= MSG_EOR; 1042 1043 if (queue->hdr_digest && !req->offset) 1044 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 1045 1046 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), 1047 offset_in_page(pdu) + req->offset, len, flags); 1048 if (unlikely(ret <= 0)) 1049 return ret; 1050 1051 len -= ret; 1052 if (!len) { 1053 if (inline_data) { 1054 req->state = NVME_TCP_SEND_DATA; 1055 if (queue->data_digest) 1056 crypto_ahash_init(queue->snd_hash); 1057 } else { 1058 nvme_tcp_done_send_req(queue); 1059 } 1060 return 1; 1061 } 1062 req->offset += ret; 1063 1064 return -EAGAIN; 1065 } 1066 1067 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) 1068 { 1069 struct nvme_tcp_queue *queue = req->queue; 1070 struct nvme_tcp_data_pdu *pdu = req->pdu; 1071 u8 hdgst = nvme_tcp_hdgst_len(queue); 1072 int len = sizeof(*pdu) - req->offset + hdgst; 1073 int ret; 1074 1075 if (queue->hdr_digest && !req->offset) 1076 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); 1077 1078 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), 1079 offset_in_page(pdu) + req->offset, len, 1080 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST); 1081 if (unlikely(ret <= 0)) 1082 return ret; 1083 1084 len -= ret; 1085 if (!len) { 1086 req->state = NVME_TCP_SEND_DATA; 1087 if (queue->data_digest) 1088 crypto_ahash_init(queue->snd_hash); 1089 return 1; 1090 } 1091 req->offset += ret; 1092 1093 return -EAGAIN; 1094 } 1095 1096 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) 1097 { 1098 struct nvme_tcp_queue *queue = req->queue; 1099 size_t offset = req->offset; 1100 int ret; 1101 struct msghdr msg = { .msg_flags = MSG_DONTWAIT }; 1102 struct kvec iov = { 1103 .iov_base = (u8 *)&req->ddgst + req->offset, 1104 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset 1105 }; 1106 1107 if (nvme_tcp_queue_more(queue)) 1108 msg.msg_flags |= MSG_MORE; 1109 else 1110 msg.msg_flags |= MSG_EOR; 1111 1112 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 1113 if (unlikely(ret <= 0)) 1114 return ret; 1115 1116 if (offset + ret == NVME_TCP_DIGEST_LENGTH) { 1117 nvme_tcp_done_send_req(queue); 1118 return 1; 1119 } 1120 1121 req->offset += ret; 1122 return -EAGAIN; 1123 } 1124 1125 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) 1126 { 1127 struct nvme_tcp_request *req; 1128 int ret = 1; 1129 1130 if (!queue->request) { 1131 queue->request = nvme_tcp_fetch_request(queue); 1132 if (!queue->request) 1133 return 0; 1134 } 1135 req = queue->request; 1136 1137 if (req->state == NVME_TCP_SEND_CMD_PDU) { 1138 ret = nvme_tcp_try_send_cmd_pdu(req); 1139 if (ret <= 0) 1140 goto done; 1141 if (!nvme_tcp_has_inline_data(req)) 1142 return ret; 1143 } 1144 1145 if (req->state == NVME_TCP_SEND_H2C_PDU) { 1146 ret = nvme_tcp_try_send_data_pdu(req); 1147 if (ret <= 0) 1148 goto done; 1149 } 1150 1151 if (req->state == NVME_TCP_SEND_DATA) { 1152 ret = nvme_tcp_try_send_data(req); 1153 if (ret <= 0) 1154 goto done; 1155 } 1156 1157 if (req->state == NVME_TCP_SEND_DDGST) 1158 ret = nvme_tcp_try_send_ddgst(req); 1159 done: 1160 if (ret == -EAGAIN) { 1161 ret = 0; 1162 } else if (ret < 0) { 1163 dev_err(queue->ctrl->ctrl.device, 1164 "failed to send request %d\n", ret); 1165 if (ret != -EPIPE && ret != -ECONNRESET) 1166 nvme_tcp_fail_request(queue->request); 1167 nvme_tcp_done_send_req(queue); 1168 } 1169 return ret; 1170 } 1171 1172 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) 1173 { 1174 struct socket *sock = queue->sock; 1175 struct sock *sk = sock->sk; 1176 read_descriptor_t rd_desc; 1177 int consumed; 1178 1179 rd_desc.arg.data = queue; 1180 rd_desc.count = 1; 1181 lock_sock(sk); 1182 queue->nr_cqe = 0; 1183 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); 1184 release_sock(sk); 1185 return consumed; 1186 } 1187 1188 static void nvme_tcp_io_work(struct work_struct *w) 1189 { 1190 struct nvme_tcp_queue *queue = 1191 container_of(w, struct nvme_tcp_queue, io_work); 1192 unsigned long deadline = jiffies + msecs_to_jiffies(1); 1193 1194 do { 1195 bool pending = false; 1196 int result; 1197 1198 if (mutex_trylock(&queue->send_mutex)) { 1199 result = nvme_tcp_try_send(queue); 1200 mutex_unlock(&queue->send_mutex); 1201 if (result > 0) 1202 pending = true; 1203 else if (unlikely(result < 0)) 1204 break; 1205 } 1206 1207 result = nvme_tcp_try_recv(queue); 1208 if (result > 0) 1209 pending = true; 1210 else if (unlikely(result < 0)) 1211 return; 1212 1213 if (!pending) 1214 return; 1215 1216 } while (!time_after(jiffies, deadline)); /* quota is exhausted */ 1217 1218 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 1219 } 1220 1221 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) 1222 { 1223 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); 1224 1225 ahash_request_free(queue->rcv_hash); 1226 ahash_request_free(queue->snd_hash); 1227 crypto_free_ahash(tfm); 1228 } 1229 1230 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) 1231 { 1232 struct crypto_ahash *tfm; 1233 1234 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); 1235 if (IS_ERR(tfm)) 1236 return PTR_ERR(tfm); 1237 1238 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); 1239 if (!queue->snd_hash) 1240 goto free_tfm; 1241 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); 1242 1243 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); 1244 if (!queue->rcv_hash) 1245 goto free_snd_hash; 1246 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); 1247 1248 return 0; 1249 free_snd_hash: 1250 ahash_request_free(queue->snd_hash); 1251 free_tfm: 1252 crypto_free_ahash(tfm); 1253 return -ENOMEM; 1254 } 1255 1256 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) 1257 { 1258 struct nvme_tcp_request *async = &ctrl->async_req; 1259 1260 page_frag_free(async->pdu); 1261 } 1262 1263 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) 1264 { 1265 struct nvme_tcp_queue *queue = &ctrl->queues[0]; 1266 struct nvme_tcp_request *async = &ctrl->async_req; 1267 u8 hdgst = nvme_tcp_hdgst_len(queue); 1268 1269 async->pdu = page_frag_alloc(&queue->pf_cache, 1270 sizeof(struct nvme_tcp_cmd_pdu) + hdgst, 1271 GFP_KERNEL | __GFP_ZERO); 1272 if (!async->pdu) 1273 return -ENOMEM; 1274 1275 async->queue = &ctrl->queues[0]; 1276 return 0; 1277 } 1278 1279 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) 1280 { 1281 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1282 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 1283 1284 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) 1285 return; 1286 1287 if (queue->hdr_digest || queue->data_digest) 1288 nvme_tcp_free_crypto(queue); 1289 1290 sock_release(queue->sock); 1291 kfree(queue->pdu); 1292 mutex_destroy(&queue->send_mutex); 1293 mutex_destroy(&queue->queue_lock); 1294 } 1295 1296 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) 1297 { 1298 struct nvme_tcp_icreq_pdu *icreq; 1299 struct nvme_tcp_icresp_pdu *icresp; 1300 struct msghdr msg = {}; 1301 struct kvec iov; 1302 bool ctrl_hdgst, ctrl_ddgst; 1303 int ret; 1304 1305 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL); 1306 if (!icreq) 1307 return -ENOMEM; 1308 1309 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL); 1310 if (!icresp) { 1311 ret = -ENOMEM; 1312 goto free_icreq; 1313 } 1314 1315 icreq->hdr.type = nvme_tcp_icreq; 1316 icreq->hdr.hlen = sizeof(*icreq); 1317 icreq->hdr.pdo = 0; 1318 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); 1319 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); 1320 icreq->maxr2t = 0; /* single inflight r2t supported */ 1321 icreq->hpda = 0; /* no alignment constraint */ 1322 if (queue->hdr_digest) 1323 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; 1324 if (queue->data_digest) 1325 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; 1326 1327 iov.iov_base = icreq; 1328 iov.iov_len = sizeof(*icreq); 1329 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); 1330 if (ret < 0) 1331 goto free_icresp; 1332 1333 memset(&msg, 0, sizeof(msg)); 1334 iov.iov_base = icresp; 1335 iov.iov_len = sizeof(*icresp); 1336 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, 1337 iov.iov_len, msg.msg_flags); 1338 if (ret < 0) 1339 goto free_icresp; 1340 1341 ret = -EINVAL; 1342 if (icresp->hdr.type != nvme_tcp_icresp) { 1343 pr_err("queue %d: bad type returned %d\n", 1344 nvme_tcp_queue_id(queue), icresp->hdr.type); 1345 goto free_icresp; 1346 } 1347 1348 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { 1349 pr_err("queue %d: bad pdu length returned %d\n", 1350 nvme_tcp_queue_id(queue), icresp->hdr.plen); 1351 goto free_icresp; 1352 } 1353 1354 if (icresp->pfv != NVME_TCP_PFV_1_0) { 1355 pr_err("queue %d: bad pfv returned %d\n", 1356 nvme_tcp_queue_id(queue), icresp->pfv); 1357 goto free_icresp; 1358 } 1359 1360 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); 1361 if ((queue->data_digest && !ctrl_ddgst) || 1362 (!queue->data_digest && ctrl_ddgst)) { 1363 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", 1364 nvme_tcp_queue_id(queue), 1365 queue->data_digest ? "enabled" : "disabled", 1366 ctrl_ddgst ? "enabled" : "disabled"); 1367 goto free_icresp; 1368 } 1369 1370 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); 1371 if ((queue->hdr_digest && !ctrl_hdgst) || 1372 (!queue->hdr_digest && ctrl_hdgst)) { 1373 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", 1374 nvme_tcp_queue_id(queue), 1375 queue->hdr_digest ? "enabled" : "disabled", 1376 ctrl_hdgst ? "enabled" : "disabled"); 1377 goto free_icresp; 1378 } 1379 1380 if (icresp->cpda != 0) { 1381 pr_err("queue %d: unsupported cpda returned %d\n", 1382 nvme_tcp_queue_id(queue), icresp->cpda); 1383 goto free_icresp; 1384 } 1385 1386 ret = 0; 1387 free_icresp: 1388 kfree(icresp); 1389 free_icreq: 1390 kfree(icreq); 1391 return ret; 1392 } 1393 1394 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue) 1395 { 1396 return nvme_tcp_queue_id(queue) == 0; 1397 } 1398 1399 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue) 1400 { 1401 struct nvme_tcp_ctrl *ctrl = queue->ctrl; 1402 int qid = nvme_tcp_queue_id(queue); 1403 1404 return !nvme_tcp_admin_queue(queue) && 1405 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; 1406 } 1407 1408 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue) 1409 { 1410 struct nvme_tcp_ctrl *ctrl = queue->ctrl; 1411 int qid = nvme_tcp_queue_id(queue); 1412 1413 return !nvme_tcp_admin_queue(queue) && 1414 !nvme_tcp_default_queue(queue) && 1415 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + 1416 ctrl->io_queues[HCTX_TYPE_READ]; 1417 } 1418 1419 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) 1420 { 1421 struct nvme_tcp_ctrl *ctrl = queue->ctrl; 1422 int qid = nvme_tcp_queue_id(queue); 1423 1424 return !nvme_tcp_admin_queue(queue) && 1425 !nvme_tcp_default_queue(queue) && 1426 !nvme_tcp_read_queue(queue) && 1427 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + 1428 ctrl->io_queues[HCTX_TYPE_READ] + 1429 ctrl->io_queues[HCTX_TYPE_POLL]; 1430 } 1431 1432 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) 1433 { 1434 struct nvme_tcp_ctrl *ctrl = queue->ctrl; 1435 int qid = nvme_tcp_queue_id(queue); 1436 int n = 0; 1437 1438 if (nvme_tcp_default_queue(queue)) 1439 n = qid - 1; 1440 else if (nvme_tcp_read_queue(queue)) 1441 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; 1442 else if (nvme_tcp_poll_queue(queue)) 1443 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1444 ctrl->io_queues[HCTX_TYPE_READ] - 1; 1445 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); 1446 } 1447 1448 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, 1449 int qid, size_t queue_size) 1450 { 1451 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1452 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 1453 int ret, rcv_pdu_size; 1454 1455 mutex_init(&queue->queue_lock); 1456 queue->ctrl = ctrl; 1457 init_llist_head(&queue->req_list); 1458 INIT_LIST_HEAD(&queue->send_list); 1459 mutex_init(&queue->send_mutex); 1460 INIT_WORK(&queue->io_work, nvme_tcp_io_work); 1461 queue->queue_size = queue_size; 1462 1463 if (qid > 0) 1464 queue->cmnd_capsule_len = nctrl->ioccsz * 16; 1465 else 1466 queue->cmnd_capsule_len = sizeof(struct nvme_command) + 1467 NVME_TCP_ADMIN_CCSZ; 1468 1469 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, 1470 IPPROTO_TCP, &queue->sock); 1471 if (ret) { 1472 dev_err(nctrl->device, 1473 "failed to create socket: %d\n", ret); 1474 goto err_destroy_mutex; 1475 } 1476 1477 nvme_tcp_reclassify_socket(queue->sock); 1478 1479 /* Single syn retry */ 1480 tcp_sock_set_syncnt(queue->sock->sk, 1); 1481 1482 /* Set TCP no delay */ 1483 tcp_sock_set_nodelay(queue->sock->sk); 1484 1485 /* 1486 * Cleanup whatever is sitting in the TCP transmit queue on socket 1487 * close. This is done to prevent stale data from being sent should 1488 * the network connection be restored before TCP times out. 1489 */ 1490 sock_no_linger(queue->sock->sk); 1491 1492 if (so_priority > 0) 1493 sock_set_priority(queue->sock->sk, so_priority); 1494 1495 /* Set socket type of service */ 1496 if (nctrl->opts->tos >= 0) 1497 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); 1498 1499 /* Set 10 seconds timeout for icresp recvmsg */ 1500 queue->sock->sk->sk_rcvtimeo = 10 * HZ; 1501 1502 queue->sock->sk->sk_allocation = GFP_ATOMIC; 1503 nvme_tcp_set_queue_io_cpu(queue); 1504 queue->request = NULL; 1505 queue->data_remaining = 0; 1506 queue->ddgst_remaining = 0; 1507 queue->pdu_remaining = 0; 1508 queue->pdu_offset = 0; 1509 sk_set_memalloc(queue->sock->sk); 1510 1511 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { 1512 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, 1513 sizeof(ctrl->src_addr)); 1514 if (ret) { 1515 dev_err(nctrl->device, 1516 "failed to bind queue %d socket %d\n", 1517 qid, ret); 1518 goto err_sock; 1519 } 1520 } 1521 1522 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { 1523 char *iface = nctrl->opts->host_iface; 1524 sockptr_t optval = KERNEL_SOCKPTR(iface); 1525 1526 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, 1527 optval, strlen(iface)); 1528 if (ret) { 1529 dev_err(nctrl->device, 1530 "failed to bind to interface %s queue %d err %d\n", 1531 iface, qid, ret); 1532 goto err_sock; 1533 } 1534 } 1535 1536 queue->hdr_digest = nctrl->opts->hdr_digest; 1537 queue->data_digest = nctrl->opts->data_digest; 1538 if (queue->hdr_digest || queue->data_digest) { 1539 ret = nvme_tcp_alloc_crypto(queue); 1540 if (ret) { 1541 dev_err(nctrl->device, 1542 "failed to allocate queue %d crypto\n", qid); 1543 goto err_sock; 1544 } 1545 } 1546 1547 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) + 1548 nvme_tcp_hdgst_len(queue); 1549 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); 1550 if (!queue->pdu) { 1551 ret = -ENOMEM; 1552 goto err_crypto; 1553 } 1554 1555 dev_dbg(nctrl->device, "connecting queue %d\n", 1556 nvme_tcp_queue_id(queue)); 1557 1558 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, 1559 sizeof(ctrl->addr), 0); 1560 if (ret) { 1561 dev_err(nctrl->device, 1562 "failed to connect socket: %d\n", ret); 1563 goto err_rcv_pdu; 1564 } 1565 1566 ret = nvme_tcp_init_connection(queue); 1567 if (ret) 1568 goto err_init_connect; 1569 1570 queue->rd_enabled = true; 1571 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); 1572 nvme_tcp_init_recv_ctx(queue); 1573 1574 write_lock_bh(&queue->sock->sk->sk_callback_lock); 1575 queue->sock->sk->sk_user_data = queue; 1576 queue->state_change = queue->sock->sk->sk_state_change; 1577 queue->data_ready = queue->sock->sk->sk_data_ready; 1578 queue->write_space = queue->sock->sk->sk_write_space; 1579 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; 1580 queue->sock->sk->sk_state_change = nvme_tcp_state_change; 1581 queue->sock->sk->sk_write_space = nvme_tcp_write_space; 1582 #ifdef CONFIG_NET_RX_BUSY_POLL 1583 queue->sock->sk->sk_ll_usec = 1; 1584 #endif 1585 write_unlock_bh(&queue->sock->sk->sk_callback_lock); 1586 1587 return 0; 1588 1589 err_init_connect: 1590 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1591 err_rcv_pdu: 1592 kfree(queue->pdu); 1593 err_crypto: 1594 if (queue->hdr_digest || queue->data_digest) 1595 nvme_tcp_free_crypto(queue); 1596 err_sock: 1597 sock_release(queue->sock); 1598 queue->sock = NULL; 1599 err_destroy_mutex: 1600 mutex_destroy(&queue->send_mutex); 1601 mutex_destroy(&queue->queue_lock); 1602 return ret; 1603 } 1604 1605 static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue) 1606 { 1607 struct socket *sock = queue->sock; 1608 1609 write_lock_bh(&sock->sk->sk_callback_lock); 1610 sock->sk->sk_user_data = NULL; 1611 sock->sk->sk_data_ready = queue->data_ready; 1612 sock->sk->sk_state_change = queue->state_change; 1613 sock->sk->sk_write_space = queue->write_space; 1614 write_unlock_bh(&sock->sk->sk_callback_lock); 1615 } 1616 1617 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) 1618 { 1619 kernel_sock_shutdown(queue->sock, SHUT_RDWR); 1620 nvme_tcp_restore_sock_calls(queue); 1621 cancel_work_sync(&queue->io_work); 1622 } 1623 1624 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) 1625 { 1626 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1627 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; 1628 1629 mutex_lock(&queue->queue_lock); 1630 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) 1631 __nvme_tcp_stop_queue(queue); 1632 mutex_unlock(&queue->queue_lock); 1633 } 1634 1635 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) 1636 { 1637 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1638 int ret; 1639 1640 if (idx) 1641 ret = nvmf_connect_io_queue(nctrl, idx); 1642 else 1643 ret = nvmf_connect_admin_queue(nctrl); 1644 1645 if (!ret) { 1646 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); 1647 } else { 1648 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) 1649 __nvme_tcp_stop_queue(&ctrl->queues[idx]); 1650 dev_err(nctrl->device, 1651 "failed to connect queue: %d ret=%d\n", idx, ret); 1652 } 1653 return ret; 1654 } 1655 1656 static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl, 1657 bool admin) 1658 { 1659 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1660 struct blk_mq_tag_set *set; 1661 int ret; 1662 1663 if (admin) { 1664 set = &ctrl->admin_tag_set; 1665 memset(set, 0, sizeof(*set)); 1666 set->ops = &nvme_tcp_admin_mq_ops; 1667 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1668 set->reserved_tags = NVMF_RESERVED_TAGS; 1669 set->numa_node = nctrl->numa_node; 1670 set->flags = BLK_MQ_F_BLOCKING; 1671 set->cmd_size = sizeof(struct nvme_tcp_request); 1672 set->driver_data = ctrl; 1673 set->nr_hw_queues = 1; 1674 set->timeout = NVME_ADMIN_TIMEOUT; 1675 } else { 1676 set = &ctrl->tag_set; 1677 memset(set, 0, sizeof(*set)); 1678 set->ops = &nvme_tcp_mq_ops; 1679 set->queue_depth = nctrl->sqsize + 1; 1680 set->reserved_tags = NVMF_RESERVED_TAGS; 1681 set->numa_node = nctrl->numa_node; 1682 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; 1683 set->cmd_size = sizeof(struct nvme_tcp_request); 1684 set->driver_data = ctrl; 1685 set->nr_hw_queues = nctrl->queue_count - 1; 1686 set->timeout = NVME_IO_TIMEOUT; 1687 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; 1688 } 1689 1690 ret = blk_mq_alloc_tag_set(set); 1691 if (ret) 1692 return ERR_PTR(ret); 1693 1694 return set; 1695 } 1696 1697 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) 1698 { 1699 if (to_tcp_ctrl(ctrl)->async_req.pdu) { 1700 cancel_work_sync(&ctrl->async_event_work); 1701 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); 1702 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; 1703 } 1704 1705 nvme_tcp_free_queue(ctrl, 0); 1706 } 1707 1708 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) 1709 { 1710 int i; 1711 1712 for (i = 1; i < ctrl->queue_count; i++) 1713 nvme_tcp_free_queue(ctrl, i); 1714 } 1715 1716 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) 1717 { 1718 int i; 1719 1720 for (i = 1; i < ctrl->queue_count; i++) 1721 nvme_tcp_stop_queue(ctrl, i); 1722 } 1723 1724 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) 1725 { 1726 int i, ret = 0; 1727 1728 for (i = 1; i < ctrl->queue_count; i++) { 1729 ret = nvme_tcp_start_queue(ctrl, i); 1730 if (ret) 1731 goto out_stop_queues; 1732 } 1733 1734 return 0; 1735 1736 out_stop_queues: 1737 for (i--; i >= 1; i--) 1738 nvme_tcp_stop_queue(ctrl, i); 1739 return ret; 1740 } 1741 1742 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) 1743 { 1744 int ret; 1745 1746 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); 1747 if (ret) 1748 return ret; 1749 1750 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); 1751 if (ret) 1752 goto out_free_queue; 1753 1754 return 0; 1755 1756 out_free_queue: 1757 nvme_tcp_free_queue(ctrl, 0); 1758 return ret; 1759 } 1760 1761 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) 1762 { 1763 int i, ret; 1764 1765 for (i = 1; i < ctrl->queue_count; i++) { 1766 ret = nvme_tcp_alloc_queue(ctrl, i, 1767 ctrl->sqsize + 1); 1768 if (ret) 1769 goto out_free_queues; 1770 } 1771 1772 return 0; 1773 1774 out_free_queues: 1775 for (i--; i >= 1; i--) 1776 nvme_tcp_free_queue(ctrl, i); 1777 1778 return ret; 1779 } 1780 1781 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) 1782 { 1783 unsigned int nr_io_queues; 1784 1785 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); 1786 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); 1787 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); 1788 1789 return nr_io_queues; 1790 } 1791 1792 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl, 1793 unsigned int nr_io_queues) 1794 { 1795 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 1796 struct nvmf_ctrl_options *opts = nctrl->opts; 1797 1798 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { 1799 /* 1800 * separate read/write queues 1801 * hand out dedicated default queues only after we have 1802 * sufficient read queues. 1803 */ 1804 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; 1805 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; 1806 ctrl->io_queues[HCTX_TYPE_DEFAULT] = 1807 min(opts->nr_write_queues, nr_io_queues); 1808 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 1809 } else { 1810 /* 1811 * shared read/write queues 1812 * either no write queues were requested, or we don't have 1813 * sufficient queue count to have dedicated default queues. 1814 */ 1815 ctrl->io_queues[HCTX_TYPE_DEFAULT] = 1816 min(opts->nr_io_queues, nr_io_queues); 1817 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; 1818 } 1819 1820 if (opts->nr_poll_queues && nr_io_queues) { 1821 /* map dedicated poll queues only if we have queues left */ 1822 ctrl->io_queues[HCTX_TYPE_POLL] = 1823 min(opts->nr_poll_queues, nr_io_queues); 1824 } 1825 } 1826 1827 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) 1828 { 1829 unsigned int nr_io_queues; 1830 int ret; 1831 1832 nr_io_queues = nvme_tcp_nr_io_queues(ctrl); 1833 ret = nvme_set_queue_count(ctrl, &nr_io_queues); 1834 if (ret) 1835 return ret; 1836 1837 if (nr_io_queues == 0) { 1838 dev_err(ctrl->device, 1839 "unable to set any I/O queues\n"); 1840 return -ENOMEM; 1841 } 1842 1843 ctrl->queue_count = nr_io_queues + 1; 1844 dev_info(ctrl->device, 1845 "creating %d I/O queues.\n", nr_io_queues); 1846 1847 nvme_tcp_set_io_queues(ctrl, nr_io_queues); 1848 1849 return __nvme_tcp_alloc_io_queues(ctrl); 1850 } 1851 1852 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) 1853 { 1854 nvme_tcp_stop_io_queues(ctrl); 1855 if (remove) { 1856 blk_cleanup_queue(ctrl->connect_q); 1857 blk_mq_free_tag_set(ctrl->tagset); 1858 } 1859 nvme_tcp_free_io_queues(ctrl); 1860 } 1861 1862 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) 1863 { 1864 int ret; 1865 1866 ret = nvme_tcp_alloc_io_queues(ctrl); 1867 if (ret) 1868 return ret; 1869 1870 if (new) { 1871 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false); 1872 if (IS_ERR(ctrl->tagset)) { 1873 ret = PTR_ERR(ctrl->tagset); 1874 goto out_free_io_queues; 1875 } 1876 1877 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); 1878 if (IS_ERR(ctrl->connect_q)) { 1879 ret = PTR_ERR(ctrl->connect_q); 1880 goto out_free_tag_set; 1881 } 1882 } 1883 1884 ret = nvme_tcp_start_io_queues(ctrl); 1885 if (ret) 1886 goto out_cleanup_connect_q; 1887 1888 if (!new) { 1889 nvme_start_queues(ctrl); 1890 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { 1891 /* 1892 * If we timed out waiting for freeze we are likely to 1893 * be stuck. Fail the controller initialization just 1894 * to be safe. 1895 */ 1896 ret = -ENODEV; 1897 goto out_wait_freeze_timed_out; 1898 } 1899 blk_mq_update_nr_hw_queues(ctrl->tagset, 1900 ctrl->queue_count - 1); 1901 nvme_unfreeze(ctrl); 1902 } 1903 1904 return 0; 1905 1906 out_wait_freeze_timed_out: 1907 nvme_stop_queues(ctrl); 1908 nvme_sync_io_queues(ctrl); 1909 nvme_tcp_stop_io_queues(ctrl); 1910 out_cleanup_connect_q: 1911 nvme_cancel_tagset(ctrl); 1912 if (new) 1913 blk_cleanup_queue(ctrl->connect_q); 1914 out_free_tag_set: 1915 if (new) 1916 blk_mq_free_tag_set(ctrl->tagset); 1917 out_free_io_queues: 1918 nvme_tcp_free_io_queues(ctrl); 1919 return ret; 1920 } 1921 1922 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) 1923 { 1924 nvme_tcp_stop_queue(ctrl, 0); 1925 if (remove) { 1926 blk_cleanup_queue(ctrl->admin_q); 1927 blk_cleanup_queue(ctrl->fabrics_q); 1928 blk_mq_free_tag_set(ctrl->admin_tagset); 1929 } 1930 nvme_tcp_free_admin_queue(ctrl); 1931 } 1932 1933 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) 1934 { 1935 int error; 1936 1937 error = nvme_tcp_alloc_admin_queue(ctrl); 1938 if (error) 1939 return error; 1940 1941 if (new) { 1942 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true); 1943 if (IS_ERR(ctrl->admin_tagset)) { 1944 error = PTR_ERR(ctrl->admin_tagset); 1945 goto out_free_queue; 1946 } 1947 1948 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); 1949 if (IS_ERR(ctrl->fabrics_q)) { 1950 error = PTR_ERR(ctrl->fabrics_q); 1951 goto out_free_tagset; 1952 } 1953 1954 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset); 1955 if (IS_ERR(ctrl->admin_q)) { 1956 error = PTR_ERR(ctrl->admin_q); 1957 goto out_cleanup_fabrics_q; 1958 } 1959 } 1960 1961 error = nvme_tcp_start_queue(ctrl, 0); 1962 if (error) 1963 goto out_cleanup_queue; 1964 1965 error = nvme_enable_ctrl(ctrl); 1966 if (error) 1967 goto out_stop_queue; 1968 1969 blk_mq_unquiesce_queue(ctrl->admin_q); 1970 1971 error = nvme_init_ctrl_finish(ctrl); 1972 if (error) 1973 goto out_quiesce_queue; 1974 1975 return 0; 1976 1977 out_quiesce_queue: 1978 blk_mq_quiesce_queue(ctrl->admin_q); 1979 blk_sync_queue(ctrl->admin_q); 1980 out_stop_queue: 1981 nvme_tcp_stop_queue(ctrl, 0); 1982 nvme_cancel_admin_tagset(ctrl); 1983 out_cleanup_queue: 1984 if (new) 1985 blk_cleanup_queue(ctrl->admin_q); 1986 out_cleanup_fabrics_q: 1987 if (new) 1988 blk_cleanup_queue(ctrl->fabrics_q); 1989 out_free_tagset: 1990 if (new) 1991 blk_mq_free_tag_set(ctrl->admin_tagset); 1992 out_free_queue: 1993 nvme_tcp_free_admin_queue(ctrl); 1994 return error; 1995 } 1996 1997 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, 1998 bool remove) 1999 { 2000 blk_mq_quiesce_queue(ctrl->admin_q); 2001 blk_sync_queue(ctrl->admin_q); 2002 nvme_tcp_stop_queue(ctrl, 0); 2003 nvme_cancel_admin_tagset(ctrl); 2004 if (remove) 2005 blk_mq_unquiesce_queue(ctrl->admin_q); 2006 nvme_tcp_destroy_admin_queue(ctrl, remove); 2007 } 2008 2009 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, 2010 bool remove) 2011 { 2012 if (ctrl->queue_count <= 1) 2013 return; 2014 blk_mq_quiesce_queue(ctrl->admin_q); 2015 nvme_start_freeze(ctrl); 2016 nvme_stop_queues(ctrl); 2017 nvme_sync_io_queues(ctrl); 2018 nvme_tcp_stop_io_queues(ctrl); 2019 nvme_cancel_tagset(ctrl); 2020 if (remove) 2021 nvme_start_queues(ctrl); 2022 nvme_tcp_destroy_io_queues(ctrl, remove); 2023 } 2024 2025 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) 2026 { 2027 /* If we are resetting/deleting then do nothing */ 2028 if (ctrl->state != NVME_CTRL_CONNECTING) { 2029 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || 2030 ctrl->state == NVME_CTRL_LIVE); 2031 return; 2032 } 2033 2034 if (nvmf_should_reconnect(ctrl)) { 2035 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", 2036 ctrl->opts->reconnect_delay); 2037 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, 2038 ctrl->opts->reconnect_delay * HZ); 2039 } else { 2040 dev_info(ctrl->device, "Removing controller...\n"); 2041 nvme_delete_ctrl(ctrl); 2042 } 2043 } 2044 2045 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) 2046 { 2047 struct nvmf_ctrl_options *opts = ctrl->opts; 2048 int ret; 2049 2050 ret = nvme_tcp_configure_admin_queue(ctrl, new); 2051 if (ret) 2052 return ret; 2053 2054 if (ctrl->icdoff) { 2055 ret = -EOPNOTSUPP; 2056 dev_err(ctrl->device, "icdoff is not supported!\n"); 2057 goto destroy_admin; 2058 } 2059 2060 if (!nvme_ctrl_sgl_supported(ctrl)) { 2061 ret = -EOPNOTSUPP; 2062 dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); 2063 goto destroy_admin; 2064 } 2065 2066 if (opts->queue_size > ctrl->sqsize + 1) 2067 dev_warn(ctrl->device, 2068 "queue_size %zu > ctrl sqsize %u, clamping down\n", 2069 opts->queue_size, ctrl->sqsize + 1); 2070 2071 if (ctrl->sqsize + 1 > ctrl->maxcmd) { 2072 dev_warn(ctrl->device, 2073 "sqsize %u > ctrl maxcmd %u, clamping down\n", 2074 ctrl->sqsize + 1, ctrl->maxcmd); 2075 ctrl->sqsize = ctrl->maxcmd - 1; 2076 } 2077 2078 if (ctrl->queue_count > 1) { 2079 ret = nvme_tcp_configure_io_queues(ctrl, new); 2080 if (ret) 2081 goto destroy_admin; 2082 } 2083 2084 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { 2085 /* 2086 * state change failure is ok if we started ctrl delete, 2087 * unless we're during creation of a new controller to 2088 * avoid races with teardown flow. 2089 */ 2090 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && 2091 ctrl->state != NVME_CTRL_DELETING_NOIO); 2092 WARN_ON_ONCE(new); 2093 ret = -EINVAL; 2094 goto destroy_io; 2095 } 2096 2097 nvme_start_ctrl(ctrl); 2098 return 0; 2099 2100 destroy_io: 2101 if (ctrl->queue_count > 1) { 2102 nvme_stop_queues(ctrl); 2103 nvme_sync_io_queues(ctrl); 2104 nvme_tcp_stop_io_queues(ctrl); 2105 nvme_cancel_tagset(ctrl); 2106 nvme_tcp_destroy_io_queues(ctrl, new); 2107 } 2108 destroy_admin: 2109 blk_mq_quiesce_queue(ctrl->admin_q); 2110 blk_sync_queue(ctrl->admin_q); 2111 nvme_tcp_stop_queue(ctrl, 0); 2112 nvme_cancel_admin_tagset(ctrl); 2113 nvme_tcp_destroy_admin_queue(ctrl, new); 2114 return ret; 2115 } 2116 2117 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work) 2118 { 2119 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work), 2120 struct nvme_tcp_ctrl, connect_work); 2121 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; 2122 2123 ++ctrl->nr_reconnects; 2124 2125 if (nvme_tcp_setup_ctrl(ctrl, false)) 2126 goto requeue; 2127 2128 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", 2129 ctrl->nr_reconnects); 2130 2131 ctrl->nr_reconnects = 0; 2132 2133 return; 2134 2135 requeue: 2136 dev_info(ctrl->device, "Failed reconnect attempt %d\n", 2137 ctrl->nr_reconnects); 2138 nvme_tcp_reconnect_or_remove(ctrl); 2139 } 2140 2141 static void nvme_tcp_error_recovery_work(struct work_struct *work) 2142 { 2143 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work, 2144 struct nvme_tcp_ctrl, err_work); 2145 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; 2146 2147 nvme_stop_keep_alive(ctrl); 2148 flush_work(&ctrl->async_event_work); 2149 nvme_tcp_teardown_io_queues(ctrl, false); 2150 /* unquiesce to fail fast pending requests */ 2151 nvme_start_queues(ctrl); 2152 nvme_tcp_teardown_admin_queue(ctrl, false); 2153 blk_mq_unquiesce_queue(ctrl->admin_q); 2154 2155 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { 2156 /* state change failure is ok if we started ctrl delete */ 2157 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && 2158 ctrl->state != NVME_CTRL_DELETING_NOIO); 2159 return; 2160 } 2161 2162 nvme_tcp_reconnect_or_remove(ctrl); 2163 } 2164 2165 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) 2166 { 2167 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); 2168 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); 2169 2170 nvme_tcp_teardown_io_queues(ctrl, shutdown); 2171 blk_mq_quiesce_queue(ctrl->admin_q); 2172 if (shutdown) 2173 nvme_shutdown_ctrl(ctrl); 2174 else 2175 nvme_disable_ctrl(ctrl); 2176 nvme_tcp_teardown_admin_queue(ctrl, shutdown); 2177 } 2178 2179 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) 2180 { 2181 nvme_tcp_teardown_ctrl(ctrl, true); 2182 } 2183 2184 static void nvme_reset_ctrl_work(struct work_struct *work) 2185 { 2186 struct nvme_ctrl *ctrl = 2187 container_of(work, struct nvme_ctrl, reset_work); 2188 2189 nvme_stop_ctrl(ctrl); 2190 nvme_tcp_teardown_ctrl(ctrl, false); 2191 2192 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { 2193 /* state change failure is ok if we started ctrl delete */ 2194 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && 2195 ctrl->state != NVME_CTRL_DELETING_NOIO); 2196 return; 2197 } 2198 2199 if (nvme_tcp_setup_ctrl(ctrl, false)) 2200 goto out_fail; 2201 2202 return; 2203 2204 out_fail: 2205 ++ctrl->nr_reconnects; 2206 nvme_tcp_reconnect_or_remove(ctrl); 2207 } 2208 2209 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) 2210 { 2211 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); 2212 2213 if (list_empty(&ctrl->list)) 2214 goto free_ctrl; 2215 2216 mutex_lock(&nvme_tcp_ctrl_mutex); 2217 list_del(&ctrl->list); 2218 mutex_unlock(&nvme_tcp_ctrl_mutex); 2219 2220 nvmf_free_options(nctrl->opts); 2221 free_ctrl: 2222 kfree(ctrl->queues); 2223 kfree(ctrl); 2224 } 2225 2226 static void nvme_tcp_set_sg_null(struct nvme_command *c) 2227 { 2228 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 2229 2230 sg->addr = 0; 2231 sg->length = 0; 2232 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 2233 NVME_SGL_FMT_TRANSPORT_A; 2234 } 2235 2236 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue, 2237 struct nvme_command *c, u32 data_len) 2238 { 2239 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 2240 2241 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); 2242 sg->length = cpu_to_le32(data_len); 2243 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; 2244 } 2245 2246 static void nvme_tcp_set_sg_host_data(struct nvme_command *c, 2247 u32 data_len) 2248 { 2249 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; 2250 2251 sg->addr = 0; 2252 sg->length = cpu_to_le32(data_len); 2253 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | 2254 NVME_SGL_FMT_TRANSPORT_A; 2255 } 2256 2257 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) 2258 { 2259 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); 2260 struct nvme_tcp_queue *queue = &ctrl->queues[0]; 2261 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; 2262 struct nvme_command *cmd = &pdu->cmd; 2263 u8 hdgst = nvme_tcp_hdgst_len(queue); 2264 2265 memset(pdu, 0, sizeof(*pdu)); 2266 pdu->hdr.type = nvme_tcp_cmd; 2267 if (queue->hdr_digest) 2268 pdu->hdr.flags |= NVME_TCP_F_HDGST; 2269 pdu->hdr.hlen = sizeof(*pdu); 2270 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); 2271 2272 cmd->common.opcode = nvme_admin_async_event; 2273 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; 2274 cmd->common.flags |= NVME_CMD_SGL_METABUF; 2275 nvme_tcp_set_sg_null(cmd); 2276 2277 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; 2278 ctrl->async_req.offset = 0; 2279 ctrl->async_req.curr_bio = NULL; 2280 ctrl->async_req.data_len = 0; 2281 2282 nvme_tcp_queue_request(&ctrl->async_req, true, true); 2283 } 2284 2285 static void nvme_tcp_complete_timed_out(struct request *rq) 2286 { 2287 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 2288 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; 2289 2290 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); 2291 if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { 2292 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; 2293 blk_mq_complete_request(rq); 2294 } 2295 } 2296 2297 static enum blk_eh_timer_return 2298 nvme_tcp_timeout(struct request *rq, bool reserved) 2299 { 2300 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 2301 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; 2302 struct nvme_tcp_cmd_pdu *pdu = req->pdu; 2303 2304 dev_warn(ctrl->device, 2305 "queue %d: timeout request %#x type %d\n", 2306 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); 2307 2308 if (ctrl->state != NVME_CTRL_LIVE) { 2309 /* 2310 * If we are resetting, connecting or deleting we should 2311 * complete immediately because we may block controller 2312 * teardown or setup sequence 2313 * - ctrl disable/shutdown fabrics requests 2314 * - connect requests 2315 * - initialization admin requests 2316 * - I/O requests that entered after unquiescing and 2317 * the controller stopped responding 2318 * 2319 * All other requests should be cancelled by the error 2320 * recovery work, so it's fine that we fail it here. 2321 */ 2322 nvme_tcp_complete_timed_out(rq); 2323 return BLK_EH_DONE; 2324 } 2325 2326 /* 2327 * LIVE state should trigger the normal error recovery which will 2328 * handle completing this request. 2329 */ 2330 nvme_tcp_error_recovery(ctrl); 2331 return BLK_EH_RESET_TIMER; 2332 } 2333 2334 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, 2335 struct request *rq) 2336 { 2337 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 2338 struct nvme_tcp_cmd_pdu *pdu = req->pdu; 2339 struct nvme_command *c = &pdu->cmd; 2340 2341 c->common.flags |= NVME_CMD_SGL_METABUF; 2342 2343 if (!blk_rq_nr_phys_segments(rq)) 2344 nvme_tcp_set_sg_null(c); 2345 else if (rq_data_dir(rq) == WRITE && 2346 req->data_len <= nvme_tcp_inline_data_size(queue)) 2347 nvme_tcp_set_sg_inline(queue, c, req->data_len); 2348 else 2349 nvme_tcp_set_sg_host_data(c, req->data_len); 2350 2351 return 0; 2352 } 2353 2354 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, 2355 struct request *rq) 2356 { 2357 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 2358 struct nvme_tcp_cmd_pdu *pdu = req->pdu; 2359 struct nvme_tcp_queue *queue = req->queue; 2360 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; 2361 blk_status_t ret; 2362 2363 ret = nvme_setup_cmd(ns, rq); 2364 if (ret) 2365 return ret; 2366 2367 req->state = NVME_TCP_SEND_CMD_PDU; 2368 req->status = cpu_to_le16(NVME_SC_SUCCESS); 2369 req->offset = 0; 2370 req->data_sent = 0; 2371 req->pdu_len = 0; 2372 req->pdu_sent = 0; 2373 req->data_len = blk_rq_nr_phys_segments(rq) ? 2374 blk_rq_payload_bytes(rq) : 0; 2375 req->curr_bio = rq->bio; 2376 if (req->curr_bio && req->data_len) 2377 nvme_tcp_init_iter(req, rq_data_dir(rq)); 2378 2379 if (rq_data_dir(rq) == WRITE && 2380 req->data_len <= nvme_tcp_inline_data_size(queue)) 2381 req->pdu_len = req->data_len; 2382 2383 pdu->hdr.type = nvme_tcp_cmd; 2384 pdu->hdr.flags = 0; 2385 if (queue->hdr_digest) 2386 pdu->hdr.flags |= NVME_TCP_F_HDGST; 2387 if (queue->data_digest && req->pdu_len) { 2388 pdu->hdr.flags |= NVME_TCP_F_DDGST; 2389 ddgst = nvme_tcp_ddgst_len(queue); 2390 } 2391 pdu->hdr.hlen = sizeof(*pdu); 2392 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; 2393 pdu->hdr.plen = 2394 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); 2395 2396 ret = nvme_tcp_map_data(queue, rq); 2397 if (unlikely(ret)) { 2398 nvme_cleanup_cmd(rq); 2399 dev_err(queue->ctrl->ctrl.device, 2400 "Failed to map data (%d)\n", ret); 2401 return ret; 2402 } 2403 2404 return 0; 2405 } 2406 2407 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx) 2408 { 2409 struct nvme_tcp_queue *queue = hctx->driver_data; 2410 2411 if (!llist_empty(&queue->req_list)) 2412 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); 2413 } 2414 2415 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, 2416 const struct blk_mq_queue_data *bd) 2417 { 2418 struct nvme_ns *ns = hctx->queue->queuedata; 2419 struct nvme_tcp_queue *queue = hctx->driver_data; 2420 struct request *rq = bd->rq; 2421 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); 2422 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); 2423 blk_status_t ret; 2424 2425 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2426 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); 2427 2428 ret = nvme_tcp_setup_cmd_pdu(ns, rq); 2429 if (unlikely(ret)) 2430 return ret; 2431 2432 blk_mq_start_request(rq); 2433 2434 nvme_tcp_queue_request(req, true, bd->last); 2435 2436 return BLK_STS_OK; 2437 } 2438 2439 static int nvme_tcp_map_queues(struct blk_mq_tag_set *set) 2440 { 2441 struct nvme_tcp_ctrl *ctrl = set->driver_data; 2442 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2443 2444 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { 2445 /* separate read/write queues */ 2446 set->map[HCTX_TYPE_DEFAULT].nr_queues = 2447 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2448 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 2449 set->map[HCTX_TYPE_READ].nr_queues = 2450 ctrl->io_queues[HCTX_TYPE_READ]; 2451 set->map[HCTX_TYPE_READ].queue_offset = 2452 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2453 } else { 2454 /* shared read/write queues */ 2455 set->map[HCTX_TYPE_DEFAULT].nr_queues = 2456 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2457 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; 2458 set->map[HCTX_TYPE_READ].nr_queues = 2459 ctrl->io_queues[HCTX_TYPE_DEFAULT]; 2460 set->map[HCTX_TYPE_READ].queue_offset = 0; 2461 } 2462 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); 2463 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); 2464 2465 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { 2466 /* map dedicated poll queues only if we have queues left */ 2467 set->map[HCTX_TYPE_POLL].nr_queues = 2468 ctrl->io_queues[HCTX_TYPE_POLL]; 2469 set->map[HCTX_TYPE_POLL].queue_offset = 2470 ctrl->io_queues[HCTX_TYPE_DEFAULT] + 2471 ctrl->io_queues[HCTX_TYPE_READ]; 2472 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); 2473 } 2474 2475 dev_info(ctrl->ctrl.device, 2476 "mapped %d/%d/%d default/read/poll queues.\n", 2477 ctrl->io_queues[HCTX_TYPE_DEFAULT], 2478 ctrl->io_queues[HCTX_TYPE_READ], 2479 ctrl->io_queues[HCTX_TYPE_POLL]); 2480 2481 return 0; 2482 } 2483 2484 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx) 2485 { 2486 struct nvme_tcp_queue *queue = hctx->driver_data; 2487 struct sock *sk = queue->sock->sk; 2488 2489 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) 2490 return 0; 2491 2492 set_bit(NVME_TCP_Q_POLLING, &queue->flags); 2493 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) 2494 sk_busy_loop(sk, true); 2495 nvme_tcp_try_recv(queue); 2496 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); 2497 return queue->nr_cqe; 2498 } 2499 2500 static const struct blk_mq_ops nvme_tcp_mq_ops = { 2501 .queue_rq = nvme_tcp_queue_rq, 2502 .commit_rqs = nvme_tcp_commit_rqs, 2503 .complete = nvme_complete_rq, 2504 .init_request = nvme_tcp_init_request, 2505 .exit_request = nvme_tcp_exit_request, 2506 .init_hctx = nvme_tcp_init_hctx, 2507 .timeout = nvme_tcp_timeout, 2508 .map_queues = nvme_tcp_map_queues, 2509 .poll = nvme_tcp_poll, 2510 }; 2511 2512 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = { 2513 .queue_rq = nvme_tcp_queue_rq, 2514 .complete = nvme_complete_rq, 2515 .init_request = nvme_tcp_init_request, 2516 .exit_request = nvme_tcp_exit_request, 2517 .init_hctx = nvme_tcp_init_admin_hctx, 2518 .timeout = nvme_tcp_timeout, 2519 }; 2520 2521 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { 2522 .name = "tcp", 2523 .module = THIS_MODULE, 2524 .flags = NVME_F_FABRICS, 2525 .reg_read32 = nvmf_reg_read32, 2526 .reg_read64 = nvmf_reg_read64, 2527 .reg_write32 = nvmf_reg_write32, 2528 .free_ctrl = nvme_tcp_free_ctrl, 2529 .submit_async_event = nvme_tcp_submit_async_event, 2530 .delete_ctrl = nvme_tcp_delete_ctrl, 2531 .get_address = nvmf_get_address, 2532 }; 2533 2534 static bool 2535 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts) 2536 { 2537 struct nvme_tcp_ctrl *ctrl; 2538 bool found = false; 2539 2540 mutex_lock(&nvme_tcp_ctrl_mutex); 2541 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { 2542 found = nvmf_ip_options_match(&ctrl->ctrl, opts); 2543 if (found) 2544 break; 2545 } 2546 mutex_unlock(&nvme_tcp_ctrl_mutex); 2547 2548 return found; 2549 } 2550 2551 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, 2552 struct nvmf_ctrl_options *opts) 2553 { 2554 struct nvme_tcp_ctrl *ctrl; 2555 int ret; 2556 2557 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); 2558 if (!ctrl) 2559 return ERR_PTR(-ENOMEM); 2560 2561 INIT_LIST_HEAD(&ctrl->list); 2562 ctrl->ctrl.opts = opts; 2563 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 2564 opts->nr_poll_queues + 1; 2565 ctrl->ctrl.sqsize = opts->queue_size - 1; 2566 ctrl->ctrl.kato = opts->kato; 2567 2568 INIT_DELAYED_WORK(&ctrl->connect_work, 2569 nvme_tcp_reconnect_ctrl_work); 2570 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); 2571 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); 2572 2573 if (!(opts->mask & NVMF_OPT_TRSVCID)) { 2574 opts->trsvcid = 2575 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL); 2576 if (!opts->trsvcid) { 2577 ret = -ENOMEM; 2578 goto out_free_ctrl; 2579 } 2580 opts->mask |= NVMF_OPT_TRSVCID; 2581 } 2582 2583 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 2584 opts->traddr, opts->trsvcid, &ctrl->addr); 2585 if (ret) { 2586 pr_err("malformed address passed: %s:%s\n", 2587 opts->traddr, opts->trsvcid); 2588 goto out_free_ctrl; 2589 } 2590 2591 if (opts->mask & NVMF_OPT_HOST_TRADDR) { 2592 ret = inet_pton_with_scope(&init_net, AF_UNSPEC, 2593 opts->host_traddr, NULL, &ctrl->src_addr); 2594 if (ret) { 2595 pr_err("malformed src address passed: %s\n", 2596 opts->host_traddr); 2597 goto out_free_ctrl; 2598 } 2599 } 2600 2601 if (opts->mask & NVMF_OPT_HOST_IFACE) { 2602 if (!__dev_get_by_name(&init_net, opts->host_iface)) { 2603 pr_err("invalid interface passed: %s\n", 2604 opts->host_iface); 2605 ret = -ENODEV; 2606 goto out_free_ctrl; 2607 } 2608 } 2609 2610 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { 2611 ret = -EALREADY; 2612 goto out_free_ctrl; 2613 } 2614 2615 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), 2616 GFP_KERNEL); 2617 if (!ctrl->queues) { 2618 ret = -ENOMEM; 2619 goto out_free_ctrl; 2620 } 2621 2622 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); 2623 if (ret) 2624 goto out_kfree_queues; 2625 2626 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { 2627 WARN_ON_ONCE(1); 2628 ret = -EINTR; 2629 goto out_uninit_ctrl; 2630 } 2631 2632 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); 2633 if (ret) 2634 goto out_uninit_ctrl; 2635 2636 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", 2637 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); 2638 2639 mutex_lock(&nvme_tcp_ctrl_mutex); 2640 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); 2641 mutex_unlock(&nvme_tcp_ctrl_mutex); 2642 2643 return &ctrl->ctrl; 2644 2645 out_uninit_ctrl: 2646 nvme_uninit_ctrl(&ctrl->ctrl); 2647 nvme_put_ctrl(&ctrl->ctrl); 2648 if (ret > 0) 2649 ret = -EIO; 2650 return ERR_PTR(ret); 2651 out_kfree_queues: 2652 kfree(ctrl->queues); 2653 out_free_ctrl: 2654 kfree(ctrl); 2655 return ERR_PTR(ret); 2656 } 2657 2658 static struct nvmf_transport_ops nvme_tcp_transport = { 2659 .name = "tcp", 2660 .module = THIS_MODULE, 2661 .required_opts = NVMF_OPT_TRADDR, 2662 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY | 2663 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO | 2664 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | 2665 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | 2666 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE, 2667 .create_ctrl = nvme_tcp_create_ctrl, 2668 }; 2669 2670 static int __init nvme_tcp_init_module(void) 2671 { 2672 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", 2673 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 2674 if (!nvme_tcp_wq) 2675 return -ENOMEM; 2676 2677 nvmf_register_transport(&nvme_tcp_transport); 2678 return 0; 2679 } 2680 2681 static void __exit nvme_tcp_cleanup_module(void) 2682 { 2683 struct nvme_tcp_ctrl *ctrl; 2684 2685 nvmf_unregister_transport(&nvme_tcp_transport); 2686 2687 mutex_lock(&nvme_tcp_ctrl_mutex); 2688 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) 2689 nvme_delete_ctrl(&ctrl->ctrl); 2690 mutex_unlock(&nvme_tcp_ctrl_mutex); 2691 flush_workqueue(nvme_delete_wq); 2692 2693 destroy_workqueue(nvme_tcp_wq); 2694 } 2695 2696 module_init(nvme_tcp_init_module); 2697 module_exit(nvme_tcp_cleanup_module); 2698 2699 MODULE_LICENSE("GPL v2"); 2700