1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Network block device - make block devices work over TCP 4 * 5 * Note that you can not swap over this thing, yet. Seems to work but 6 * deadlocks sometimes - you can not swap over TCP in general. 7 * 8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 10 * 11 * (part of code stolen from loop.c) 12 */ 13 14 #include <linux/major.h> 15 16 #include <linux/blkdev.h> 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/fs.h> 22 #include <linux/bio.h> 23 #include <linux/stat.h> 24 #include <linux/errno.h> 25 #include <linux/file.h> 26 #include <linux/ioctl.h> 27 #include <linux/mutex.h> 28 #include <linux/compiler.h> 29 #include <linux/completion.h> 30 #include <linux/err.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <net/sock.h> 34 #include <linux/net.h> 35 #include <linux/kthread.h> 36 #include <linux/types.h> 37 #include <linux/debugfs.h> 38 #include <linux/blk-mq.h> 39 40 #include <linux/uaccess.h> 41 #include <asm/types.h> 42 43 #include <linux/nbd.h> 44 #include <linux/nbd-netlink.h> 45 #include <net/genetlink.h> 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/nbd.h> 49 50 static DEFINE_IDR(nbd_index_idr); 51 static DEFINE_MUTEX(nbd_index_mutex); 52 static int nbd_total_devices = 0; 53 54 struct nbd_sock { 55 struct socket *sock; 56 struct mutex tx_lock; 57 struct request *pending; 58 int sent; 59 bool dead; 60 int fallback_index; 61 int cookie; 62 }; 63 64 struct recv_thread_args { 65 struct work_struct work; 66 struct nbd_device *nbd; 67 int index; 68 }; 69 70 struct link_dead_args { 71 struct work_struct work; 72 int index; 73 }; 74 75 #define NBD_RT_TIMEDOUT 0 76 #define NBD_RT_DISCONNECT_REQUESTED 1 77 #define NBD_RT_DISCONNECTED 2 78 #define NBD_RT_HAS_PID_FILE 3 79 #define NBD_RT_HAS_CONFIG_REF 4 80 #define NBD_RT_BOUND 5 81 #define NBD_RT_DESTROY_ON_DISCONNECT 6 82 #define NBD_RT_DISCONNECT_ON_CLOSE 7 83 84 #define NBD_DESTROY_ON_DISCONNECT 0 85 #define NBD_DISCONNECT_REQUESTED 1 86 87 struct nbd_config { 88 u32 flags; 89 unsigned long runtime_flags; 90 u64 dead_conn_timeout; 91 92 struct nbd_sock **socks; 93 int num_connections; 94 atomic_t live_connections; 95 wait_queue_head_t conn_wait; 96 97 atomic_t recv_threads; 98 wait_queue_head_t recv_wq; 99 loff_t blksize; 100 loff_t bytesize; 101 #if IS_ENABLED(CONFIG_DEBUG_FS) 102 struct dentry *dbg_dir; 103 #endif 104 }; 105 106 struct nbd_device { 107 struct blk_mq_tag_set tag_set; 108 109 int index; 110 refcount_t config_refs; 111 refcount_t refs; 112 struct nbd_config *config; 113 struct mutex config_lock; 114 struct gendisk *disk; 115 struct workqueue_struct *recv_workq; 116 117 struct list_head list; 118 struct task_struct *task_recv; 119 struct task_struct *task_setup; 120 121 struct completion *destroy_complete; 122 unsigned long flags; 123 }; 124 125 #define NBD_CMD_REQUEUED 1 126 127 struct nbd_cmd { 128 struct nbd_device *nbd; 129 struct mutex lock; 130 int index; 131 int cookie; 132 int retries; 133 blk_status_t status; 134 unsigned long flags; 135 u32 cmd_cookie; 136 }; 137 138 #if IS_ENABLED(CONFIG_DEBUG_FS) 139 static struct dentry *nbd_dbg_dir; 140 #endif 141 142 #define nbd_name(nbd) ((nbd)->disk->disk_name) 143 144 #define NBD_MAGIC 0x68797548 145 146 #define NBD_DEF_BLKSIZE 1024 147 148 static unsigned int nbds_max = 16; 149 static int max_part = 16; 150 static int part_shift; 151 152 static int nbd_dev_dbg_init(struct nbd_device *nbd); 153 static void nbd_dev_dbg_close(struct nbd_device *nbd); 154 static void nbd_config_put(struct nbd_device *nbd); 155 static void nbd_connect_reply(struct genl_info *info, int index); 156 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); 157 static void nbd_dead_link_work(struct work_struct *work); 158 static void nbd_disconnect_and_put(struct nbd_device *nbd); 159 160 static inline struct device *nbd_to_dev(struct nbd_device *nbd) 161 { 162 return disk_to_dev(nbd->disk); 163 } 164 165 static void nbd_requeue_cmd(struct nbd_cmd *cmd) 166 { 167 struct request *req = blk_mq_rq_from_pdu(cmd); 168 169 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) 170 blk_mq_requeue_request(req, true); 171 } 172 173 #define NBD_COOKIE_BITS 32 174 175 static u64 nbd_cmd_handle(struct nbd_cmd *cmd) 176 { 177 struct request *req = blk_mq_rq_from_pdu(cmd); 178 u32 tag = blk_mq_unique_tag(req); 179 u64 cookie = cmd->cmd_cookie; 180 181 return (cookie << NBD_COOKIE_BITS) | tag; 182 } 183 184 static u32 nbd_handle_to_tag(u64 handle) 185 { 186 return (u32)handle; 187 } 188 189 static u32 nbd_handle_to_cookie(u64 handle) 190 { 191 return (u32)(handle >> NBD_COOKIE_BITS); 192 } 193 194 static const char *nbdcmd_to_ascii(int cmd) 195 { 196 switch (cmd) { 197 case NBD_CMD_READ: return "read"; 198 case NBD_CMD_WRITE: return "write"; 199 case NBD_CMD_DISC: return "disconnect"; 200 case NBD_CMD_FLUSH: return "flush"; 201 case NBD_CMD_TRIM: return "trim/discard"; 202 } 203 return "invalid"; 204 } 205 206 static ssize_t pid_show(struct device *dev, 207 struct device_attribute *attr, char *buf) 208 { 209 struct gendisk *disk = dev_to_disk(dev); 210 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 211 212 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); 213 } 214 215 static const struct device_attribute pid_attr = { 216 .attr = { .name = "pid", .mode = 0444}, 217 .show = pid_show, 218 }; 219 220 static void nbd_dev_remove(struct nbd_device *nbd) 221 { 222 struct gendisk *disk = nbd->disk; 223 struct request_queue *q; 224 225 if (disk) { 226 q = disk->queue; 227 del_gendisk(disk); 228 blk_cleanup_queue(q); 229 blk_mq_free_tag_set(&nbd->tag_set); 230 disk->private_data = NULL; 231 put_disk(disk); 232 } 233 234 /* 235 * Place this in the last just before the nbd is freed to 236 * make sure that the disk and the related kobject are also 237 * totally removed to avoid duplicate creation of the same 238 * one. 239 */ 240 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete) 241 complete(nbd->destroy_complete); 242 243 kfree(nbd); 244 } 245 246 static void nbd_put(struct nbd_device *nbd) 247 { 248 if (refcount_dec_and_mutex_lock(&nbd->refs, 249 &nbd_index_mutex)) { 250 idr_remove(&nbd_index_idr, nbd->index); 251 nbd_dev_remove(nbd); 252 mutex_unlock(&nbd_index_mutex); 253 } 254 } 255 256 static int nbd_disconnected(struct nbd_config *config) 257 { 258 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) || 259 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); 260 } 261 262 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, 263 int notify) 264 { 265 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { 266 struct link_dead_args *args; 267 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO); 268 if (args) { 269 INIT_WORK(&args->work, nbd_dead_link_work); 270 args->index = nbd->index; 271 queue_work(system_wq, &args->work); 272 } 273 } 274 if (!nsock->dead) { 275 kernel_sock_shutdown(nsock->sock, SHUT_RDWR); 276 if (atomic_dec_return(&nbd->config->live_connections) == 0) { 277 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED, 278 &nbd->config->runtime_flags)) { 279 set_bit(NBD_RT_DISCONNECTED, 280 &nbd->config->runtime_flags); 281 dev_info(nbd_to_dev(nbd), 282 "Disconnected due to user request.\n"); 283 } 284 } 285 } 286 nsock->dead = true; 287 nsock->pending = NULL; 288 nsock->sent = 0; 289 } 290 291 static void nbd_size_clear(struct nbd_device *nbd) 292 { 293 if (nbd->config->bytesize) { 294 set_capacity(nbd->disk, 0); 295 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 296 } 297 } 298 299 static void nbd_size_update(struct nbd_device *nbd) 300 { 301 struct nbd_config *config = nbd->config; 302 struct block_device *bdev = bdget_disk(nbd->disk, 0); 303 304 if (config->flags & NBD_FLAG_SEND_TRIM) { 305 nbd->disk->queue->limits.discard_granularity = config->blksize; 306 nbd->disk->queue->limits.discard_alignment = config->blksize; 307 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); 308 } 309 blk_queue_logical_block_size(nbd->disk->queue, config->blksize); 310 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 311 set_capacity(nbd->disk, config->bytesize >> 9); 312 if (bdev) { 313 if (bdev->bd_disk) { 314 bd_set_size(bdev, config->bytesize); 315 set_blocksize(bdev, config->blksize); 316 } else 317 bdev->bd_invalidated = 1; 318 bdput(bdev); 319 } 320 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 321 } 322 323 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, 324 loff_t nr_blocks) 325 { 326 struct nbd_config *config = nbd->config; 327 config->blksize = blocksize; 328 config->bytesize = blocksize * nr_blocks; 329 if (nbd->task_recv != NULL) 330 nbd_size_update(nbd); 331 } 332 333 static void nbd_complete_rq(struct request *req) 334 { 335 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 336 337 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, 338 cmd->status ? "failed" : "done"); 339 340 blk_mq_end_request(req, cmd->status); 341 } 342 343 /* 344 * Forcibly shutdown the socket causing all listeners to error 345 */ 346 static void sock_shutdown(struct nbd_device *nbd) 347 { 348 struct nbd_config *config = nbd->config; 349 int i; 350 351 if (config->num_connections == 0) 352 return; 353 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) 354 return; 355 356 for (i = 0; i < config->num_connections; i++) { 357 struct nbd_sock *nsock = config->socks[i]; 358 mutex_lock(&nsock->tx_lock); 359 nbd_mark_nsock_dead(nbd, nsock, 0); 360 mutex_unlock(&nsock->tx_lock); 361 } 362 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); 363 } 364 365 static u32 req_to_nbd_cmd_type(struct request *req) 366 { 367 switch (req_op(req)) { 368 case REQ_OP_DISCARD: 369 return NBD_CMD_TRIM; 370 case REQ_OP_FLUSH: 371 return NBD_CMD_FLUSH; 372 case REQ_OP_WRITE: 373 return NBD_CMD_WRITE; 374 case REQ_OP_READ: 375 return NBD_CMD_READ; 376 default: 377 return U32_MAX; 378 } 379 } 380 381 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, 382 bool reserved) 383 { 384 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 385 struct nbd_device *nbd = cmd->nbd; 386 struct nbd_config *config; 387 388 if (!mutex_trylock(&cmd->lock)) 389 return BLK_EH_RESET_TIMER; 390 391 if (!refcount_inc_not_zero(&nbd->config_refs)) { 392 cmd->status = BLK_STS_TIMEOUT; 393 mutex_unlock(&cmd->lock); 394 goto done; 395 } 396 config = nbd->config; 397 398 if (config->num_connections > 1) { 399 dev_err_ratelimited(nbd_to_dev(nbd), 400 "Connection timed out, retrying (%d/%d alive)\n", 401 atomic_read(&config->live_connections), 402 config->num_connections); 403 /* 404 * Hooray we have more connections, requeue this IO, the submit 405 * path will put it on a real connection. 406 */ 407 if (config->socks && config->num_connections > 1) { 408 if (cmd->index < config->num_connections) { 409 struct nbd_sock *nsock = 410 config->socks[cmd->index]; 411 mutex_lock(&nsock->tx_lock); 412 /* We can have multiple outstanding requests, so 413 * we don't want to mark the nsock dead if we've 414 * already reconnected with a new socket, so 415 * only mark it dead if its the same socket we 416 * were sent out on. 417 */ 418 if (cmd->cookie == nsock->cookie) 419 nbd_mark_nsock_dead(nbd, nsock, 1); 420 mutex_unlock(&nsock->tx_lock); 421 } 422 mutex_unlock(&cmd->lock); 423 nbd_requeue_cmd(cmd); 424 nbd_config_put(nbd); 425 return BLK_EH_DONE; 426 } 427 } 428 429 if (!nbd->tag_set.timeout) { 430 /* 431 * Userspace sets timeout=0 to disable socket disconnection, 432 * so just warn and reset the timer. 433 */ 434 cmd->retries++; 435 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", 436 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)), 437 (unsigned long long)blk_rq_pos(req) << 9, 438 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries); 439 440 mutex_unlock(&cmd->lock); 441 nbd_config_put(nbd); 442 return BLK_EH_RESET_TIMER; 443 } 444 445 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); 446 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags); 447 cmd->status = BLK_STS_IOERR; 448 mutex_unlock(&cmd->lock); 449 sock_shutdown(nbd); 450 nbd_config_put(nbd); 451 done: 452 blk_mq_complete_request(req); 453 return BLK_EH_DONE; 454 } 455 456 /* 457 * Send or receive packet. 458 */ 459 static int sock_xmit(struct nbd_device *nbd, int index, int send, 460 struct iov_iter *iter, int msg_flags, int *sent) 461 { 462 struct nbd_config *config = nbd->config; 463 struct socket *sock = config->socks[index]->sock; 464 int result; 465 struct msghdr msg; 466 unsigned int noreclaim_flag; 467 468 if (unlikely(!sock)) { 469 dev_err_ratelimited(disk_to_dev(nbd->disk), 470 "Attempted %s on closed socket in sock_xmit\n", 471 (send ? "send" : "recv")); 472 return -EINVAL; 473 } 474 475 msg.msg_iter = *iter; 476 477 noreclaim_flag = memalloc_noreclaim_save(); 478 do { 479 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; 480 msg.msg_name = NULL; 481 msg.msg_namelen = 0; 482 msg.msg_control = NULL; 483 msg.msg_controllen = 0; 484 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 485 486 if (send) 487 result = sock_sendmsg(sock, &msg); 488 else 489 result = sock_recvmsg(sock, &msg, msg.msg_flags); 490 491 if (result <= 0) { 492 if (result == 0) 493 result = -EPIPE; /* short read */ 494 break; 495 } 496 if (sent) 497 *sent += result; 498 } while (msg_data_left(&msg)); 499 500 memalloc_noreclaim_restore(noreclaim_flag); 501 502 return result; 503 } 504 505 /* 506 * Different settings for sk->sk_sndtimeo can result in different return values 507 * if there is a signal pending when we enter sendmsg, because reasons? 508 */ 509 static inline int was_interrupted(int result) 510 { 511 return result == -ERESTARTSYS || result == -EINTR; 512 } 513 514 /* always call with the tx_lock held */ 515 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 516 { 517 struct request *req = blk_mq_rq_from_pdu(cmd); 518 struct nbd_config *config = nbd->config; 519 struct nbd_sock *nsock = config->socks[index]; 520 int result; 521 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 522 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 523 struct iov_iter from; 524 unsigned long size = blk_rq_bytes(req); 525 struct bio *bio; 526 u64 handle; 527 u32 type; 528 u32 nbd_cmd_flags = 0; 529 int sent = nsock->sent, skip = 0; 530 531 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); 532 533 type = req_to_nbd_cmd_type(req); 534 if (type == U32_MAX) 535 return -EIO; 536 537 if (rq_data_dir(req) == WRITE && 538 (config->flags & NBD_FLAG_READ_ONLY)) { 539 dev_err_ratelimited(disk_to_dev(nbd->disk), 540 "Write on read-only\n"); 541 return -EIO; 542 } 543 544 if (req->cmd_flags & REQ_FUA) 545 nbd_cmd_flags |= NBD_CMD_FLAG_FUA; 546 547 /* We did a partial send previously, and we at least sent the whole 548 * request struct, so just go and send the rest of the pages in the 549 * request. 550 */ 551 if (sent) { 552 if (sent >= sizeof(request)) { 553 skip = sent - sizeof(request); 554 555 /* initialize handle for tracing purposes */ 556 handle = nbd_cmd_handle(cmd); 557 558 goto send_pages; 559 } 560 iov_iter_advance(&from, sent); 561 } else { 562 cmd->cmd_cookie++; 563 } 564 cmd->index = index; 565 cmd->cookie = nsock->cookie; 566 cmd->retries = 0; 567 request.type = htonl(type | nbd_cmd_flags); 568 if (type != NBD_CMD_FLUSH) { 569 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 570 request.len = htonl(size); 571 } 572 handle = nbd_cmd_handle(cmd); 573 memcpy(request.handle, &handle, sizeof(handle)); 574 575 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); 576 577 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 578 req, nbdcmd_to_ascii(type), 579 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 580 result = sock_xmit(nbd, index, 1, &from, 581 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); 582 trace_nbd_header_sent(req, handle); 583 if (result <= 0) { 584 if (was_interrupted(result)) { 585 /* If we havne't sent anything we can just return BUSY, 586 * however if we have sent something we need to make 587 * sure we only allow this req to be sent until we are 588 * completely done. 589 */ 590 if (sent) { 591 nsock->pending = req; 592 nsock->sent = sent; 593 } 594 set_bit(NBD_CMD_REQUEUED, &cmd->flags); 595 return BLK_STS_RESOURCE; 596 } 597 dev_err_ratelimited(disk_to_dev(nbd->disk), 598 "Send control failed (result %d)\n", result); 599 return -EAGAIN; 600 } 601 send_pages: 602 if (type != NBD_CMD_WRITE) 603 goto out; 604 605 bio = req->bio; 606 while (bio) { 607 struct bio *next = bio->bi_next; 608 struct bvec_iter iter; 609 struct bio_vec bvec; 610 611 bio_for_each_segment(bvec, bio, iter) { 612 bool is_last = !next && bio_iter_last(bvec, iter); 613 int flags = is_last ? 0 : MSG_MORE; 614 615 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 616 req, bvec.bv_len); 617 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); 618 if (skip) { 619 if (skip >= iov_iter_count(&from)) { 620 skip -= iov_iter_count(&from); 621 continue; 622 } 623 iov_iter_advance(&from, skip); 624 skip = 0; 625 } 626 result = sock_xmit(nbd, index, 1, &from, flags, &sent); 627 if (result <= 0) { 628 if (was_interrupted(result)) { 629 /* We've already sent the header, we 630 * have no choice but to set pending and 631 * return BUSY. 632 */ 633 nsock->pending = req; 634 nsock->sent = sent; 635 set_bit(NBD_CMD_REQUEUED, &cmd->flags); 636 return BLK_STS_RESOURCE; 637 } 638 dev_err(disk_to_dev(nbd->disk), 639 "Send data failed (result %d)\n", 640 result); 641 return -EAGAIN; 642 } 643 /* 644 * The completion might already have come in, 645 * so break for the last one instead of letting 646 * the iterator do it. This prevents use-after-free 647 * of the bio. 648 */ 649 if (is_last) 650 break; 651 } 652 bio = next; 653 } 654 out: 655 trace_nbd_payload_sent(req, handle); 656 nsock->pending = NULL; 657 nsock->sent = 0; 658 return 0; 659 } 660 661 /* NULL returned = something went wrong, inform userspace */ 662 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) 663 { 664 struct nbd_config *config = nbd->config; 665 int result; 666 struct nbd_reply reply; 667 struct nbd_cmd *cmd; 668 struct request *req = NULL; 669 u64 handle; 670 u16 hwq; 671 u32 tag; 672 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; 673 struct iov_iter to; 674 int ret = 0; 675 676 reply.magic = 0; 677 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply)); 678 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 679 if (result <= 0) { 680 if (!nbd_disconnected(config)) 681 dev_err(disk_to_dev(nbd->disk), 682 "Receive control failed (result %d)\n", result); 683 return ERR_PTR(result); 684 } 685 686 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { 687 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", 688 (unsigned long)ntohl(reply.magic)); 689 return ERR_PTR(-EPROTO); 690 } 691 692 memcpy(&handle, reply.handle, sizeof(handle)); 693 tag = nbd_handle_to_tag(handle); 694 hwq = blk_mq_unique_tag_to_hwq(tag); 695 if (hwq < nbd->tag_set.nr_hw_queues) 696 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 697 blk_mq_unique_tag_to_tag(tag)); 698 if (!req || !blk_mq_request_started(req)) { 699 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", 700 tag, req); 701 return ERR_PTR(-ENOENT); 702 } 703 trace_nbd_header_received(req, handle); 704 cmd = blk_mq_rq_to_pdu(req); 705 706 mutex_lock(&cmd->lock); 707 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { 708 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", 709 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); 710 ret = -ENOENT; 711 goto out; 712 } 713 if (cmd->status != BLK_STS_OK) { 714 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", 715 req); 716 ret = -ENOENT; 717 goto out; 718 } 719 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { 720 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", 721 req); 722 ret = -ENOENT; 723 goto out; 724 } 725 if (ntohl(reply.error)) { 726 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 727 ntohl(reply.error)); 728 cmd->status = BLK_STS_IOERR; 729 goto out; 730 } 731 732 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); 733 if (rq_data_dir(req) != WRITE) { 734 struct req_iterator iter; 735 struct bio_vec bvec; 736 737 rq_for_each_segment(bvec, req, iter) { 738 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len); 739 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 740 if (result <= 0) { 741 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 742 result); 743 /* 744 * If we've disconnected or we only have 1 745 * connection then we need to make sure we 746 * complete this request, otherwise error out 747 * and let the timeout stuff handle resubmitting 748 * this request onto another connection. 749 */ 750 if (nbd_disconnected(config) || 751 config->num_connections <= 1) { 752 cmd->status = BLK_STS_IOERR; 753 goto out; 754 } 755 ret = -EIO; 756 goto out; 757 } 758 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 759 req, bvec.bv_len); 760 } 761 } 762 out: 763 trace_nbd_payload_received(req, handle); 764 mutex_unlock(&cmd->lock); 765 return ret ? ERR_PTR(ret) : cmd; 766 } 767 768 static void recv_work(struct work_struct *work) 769 { 770 struct recv_thread_args *args = container_of(work, 771 struct recv_thread_args, 772 work); 773 struct nbd_device *nbd = args->nbd; 774 struct nbd_config *config = nbd->config; 775 struct nbd_cmd *cmd; 776 777 while (1) { 778 cmd = nbd_read_stat(nbd, args->index); 779 if (IS_ERR(cmd)) { 780 struct nbd_sock *nsock = config->socks[args->index]; 781 782 mutex_lock(&nsock->tx_lock); 783 nbd_mark_nsock_dead(nbd, nsock, 1); 784 mutex_unlock(&nsock->tx_lock); 785 break; 786 } 787 788 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); 789 } 790 atomic_dec(&config->recv_threads); 791 wake_up(&config->recv_wq); 792 nbd_config_put(nbd); 793 kfree(args); 794 } 795 796 static bool nbd_clear_req(struct request *req, void *data, bool reserved) 797 { 798 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 799 800 mutex_lock(&cmd->lock); 801 cmd->status = BLK_STS_IOERR; 802 mutex_unlock(&cmd->lock); 803 804 blk_mq_complete_request(req); 805 return true; 806 } 807 808 static void nbd_clear_que(struct nbd_device *nbd) 809 { 810 blk_mq_quiesce_queue(nbd->disk->queue); 811 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); 812 blk_mq_unquiesce_queue(nbd->disk->queue); 813 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); 814 } 815 816 static int find_fallback(struct nbd_device *nbd, int index) 817 { 818 struct nbd_config *config = nbd->config; 819 int new_index = -1; 820 struct nbd_sock *nsock = config->socks[index]; 821 int fallback = nsock->fallback_index; 822 823 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) 824 return new_index; 825 826 if (config->num_connections <= 1) { 827 dev_err_ratelimited(disk_to_dev(nbd->disk), 828 "Attempted send on invalid socket\n"); 829 return new_index; 830 } 831 832 if (fallback >= 0 && fallback < config->num_connections && 833 !config->socks[fallback]->dead) 834 return fallback; 835 836 if (nsock->fallback_index < 0 || 837 nsock->fallback_index >= config->num_connections || 838 config->socks[nsock->fallback_index]->dead) { 839 int i; 840 for (i = 0; i < config->num_connections; i++) { 841 if (i == index) 842 continue; 843 if (!config->socks[i]->dead) { 844 new_index = i; 845 break; 846 } 847 } 848 nsock->fallback_index = new_index; 849 if (new_index < 0) { 850 dev_err_ratelimited(disk_to_dev(nbd->disk), 851 "Dead connection, failed to find a fallback\n"); 852 return new_index; 853 } 854 } 855 new_index = nsock->fallback_index; 856 return new_index; 857 } 858 859 static int wait_for_reconnect(struct nbd_device *nbd) 860 { 861 struct nbd_config *config = nbd->config; 862 if (!config->dead_conn_timeout) 863 return 0; 864 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) 865 return 0; 866 return wait_event_timeout(config->conn_wait, 867 atomic_read(&config->live_connections) > 0, 868 config->dead_conn_timeout) > 0; 869 } 870 871 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) 872 { 873 struct request *req = blk_mq_rq_from_pdu(cmd); 874 struct nbd_device *nbd = cmd->nbd; 875 struct nbd_config *config; 876 struct nbd_sock *nsock; 877 int ret; 878 879 if (!refcount_inc_not_zero(&nbd->config_refs)) { 880 dev_err_ratelimited(disk_to_dev(nbd->disk), 881 "Socks array is empty\n"); 882 blk_mq_start_request(req); 883 return -EINVAL; 884 } 885 config = nbd->config; 886 887 if (index >= config->num_connections) { 888 dev_err_ratelimited(disk_to_dev(nbd->disk), 889 "Attempted send on invalid socket\n"); 890 nbd_config_put(nbd); 891 blk_mq_start_request(req); 892 return -EINVAL; 893 } 894 cmd->status = BLK_STS_OK; 895 again: 896 nsock = config->socks[index]; 897 mutex_lock(&nsock->tx_lock); 898 if (nsock->dead) { 899 int old_index = index; 900 index = find_fallback(nbd, index); 901 mutex_unlock(&nsock->tx_lock); 902 if (index < 0) { 903 if (wait_for_reconnect(nbd)) { 904 index = old_index; 905 goto again; 906 } 907 /* All the sockets should already be down at this point, 908 * we just want to make sure that DISCONNECTED is set so 909 * any requests that come in that were queue'ed waiting 910 * for the reconnect timer don't trigger the timer again 911 * and instead just error out. 912 */ 913 sock_shutdown(nbd); 914 nbd_config_put(nbd); 915 blk_mq_start_request(req); 916 return -EIO; 917 } 918 goto again; 919 } 920 921 /* Handle the case that we have a pending request that was partially 922 * transmitted that _has_ to be serviced first. We need to call requeue 923 * here so that it gets put _after_ the request that is already on the 924 * dispatch list. 925 */ 926 blk_mq_start_request(req); 927 if (unlikely(nsock->pending && nsock->pending != req)) { 928 nbd_requeue_cmd(cmd); 929 ret = 0; 930 goto out; 931 } 932 /* 933 * Some failures are related to the link going down, so anything that 934 * returns EAGAIN can be retried on a different socket. 935 */ 936 ret = nbd_send_cmd(nbd, cmd, index); 937 if (ret == -EAGAIN) { 938 dev_err_ratelimited(disk_to_dev(nbd->disk), 939 "Request send failed, requeueing\n"); 940 nbd_mark_nsock_dead(nbd, nsock, 1); 941 nbd_requeue_cmd(cmd); 942 ret = 0; 943 } 944 out: 945 mutex_unlock(&nsock->tx_lock); 946 nbd_config_put(nbd); 947 return ret; 948 } 949 950 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 951 const struct blk_mq_queue_data *bd) 952 { 953 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 954 int ret; 955 956 /* 957 * Since we look at the bio's to send the request over the network we 958 * need to make sure the completion work doesn't mark this request done 959 * before we are done doing our send. This keeps us from dereferencing 960 * freed data if we have particularly fast completions (ie we get the 961 * completion before we exit sock_xmit on the last bvec) or in the case 962 * that the server is misbehaving (or there was an error) before we're 963 * done sending everything over the wire. 964 */ 965 mutex_lock(&cmd->lock); 966 clear_bit(NBD_CMD_REQUEUED, &cmd->flags); 967 968 /* We can be called directly from the user space process, which means we 969 * could possibly have signals pending so our sendmsg will fail. In 970 * this case we need to return that we are busy, otherwise error out as 971 * appropriate. 972 */ 973 ret = nbd_handle_cmd(cmd, hctx->queue_num); 974 if (ret < 0) 975 ret = BLK_STS_IOERR; 976 else if (!ret) 977 ret = BLK_STS_OK; 978 mutex_unlock(&cmd->lock); 979 980 return ret; 981 } 982 983 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, 984 int *err) 985 { 986 struct socket *sock; 987 988 *err = 0; 989 sock = sockfd_lookup(fd, err); 990 if (!sock) 991 return NULL; 992 993 if (sock->ops->shutdown == sock_no_shutdown) { 994 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); 995 *err = -EINVAL; 996 return NULL; 997 } 998 999 return sock; 1000 } 1001 1002 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, 1003 bool netlink) 1004 { 1005 struct nbd_config *config = nbd->config; 1006 struct socket *sock; 1007 struct nbd_sock **socks; 1008 struct nbd_sock *nsock; 1009 int err; 1010 1011 sock = nbd_get_socket(nbd, arg, &err); 1012 if (!sock) 1013 return err; 1014 1015 if (!netlink && !nbd->task_setup && 1016 !test_bit(NBD_RT_BOUND, &config->runtime_flags)) 1017 nbd->task_setup = current; 1018 1019 if (!netlink && 1020 (nbd->task_setup != current || 1021 test_bit(NBD_RT_BOUND, &config->runtime_flags))) { 1022 dev_err(disk_to_dev(nbd->disk), 1023 "Device being setup by another task"); 1024 sockfd_put(sock); 1025 return -EBUSY; 1026 } 1027 1028 socks = krealloc(config->socks, (config->num_connections + 1) * 1029 sizeof(struct nbd_sock *), GFP_KERNEL); 1030 if (!socks) { 1031 sockfd_put(sock); 1032 return -ENOMEM; 1033 } 1034 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); 1035 if (!nsock) { 1036 sockfd_put(sock); 1037 return -ENOMEM; 1038 } 1039 1040 config->socks = socks; 1041 1042 nsock->fallback_index = -1; 1043 nsock->dead = false; 1044 mutex_init(&nsock->tx_lock); 1045 nsock->sock = sock; 1046 nsock->pending = NULL; 1047 nsock->sent = 0; 1048 nsock->cookie = 0; 1049 socks[config->num_connections++] = nsock; 1050 atomic_inc(&config->live_connections); 1051 1052 return 0; 1053 } 1054 1055 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) 1056 { 1057 struct nbd_config *config = nbd->config; 1058 struct socket *sock, *old; 1059 struct recv_thread_args *args; 1060 int i; 1061 int err; 1062 1063 sock = nbd_get_socket(nbd, arg, &err); 1064 if (!sock) 1065 return err; 1066 1067 args = kzalloc(sizeof(*args), GFP_KERNEL); 1068 if (!args) { 1069 sockfd_put(sock); 1070 return -ENOMEM; 1071 } 1072 1073 for (i = 0; i < config->num_connections; i++) { 1074 struct nbd_sock *nsock = config->socks[i]; 1075 1076 if (!nsock->dead) 1077 continue; 1078 1079 mutex_lock(&nsock->tx_lock); 1080 if (!nsock->dead) { 1081 mutex_unlock(&nsock->tx_lock); 1082 continue; 1083 } 1084 sk_set_memalloc(sock->sk); 1085 if (nbd->tag_set.timeout) 1086 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; 1087 atomic_inc(&config->recv_threads); 1088 refcount_inc(&nbd->config_refs); 1089 old = nsock->sock; 1090 nsock->fallback_index = -1; 1091 nsock->sock = sock; 1092 nsock->dead = false; 1093 INIT_WORK(&args->work, recv_work); 1094 args->index = i; 1095 args->nbd = nbd; 1096 nsock->cookie++; 1097 mutex_unlock(&nsock->tx_lock); 1098 sockfd_put(old); 1099 1100 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); 1101 1102 /* We take the tx_mutex in an error path in the recv_work, so we 1103 * need to queue_work outside of the tx_mutex. 1104 */ 1105 queue_work(nbd->recv_workq, &args->work); 1106 1107 atomic_inc(&config->live_connections); 1108 wake_up(&config->conn_wait); 1109 return 0; 1110 } 1111 sockfd_put(sock); 1112 kfree(args); 1113 return -ENOSPC; 1114 } 1115 1116 static void nbd_bdev_reset(struct block_device *bdev) 1117 { 1118 if (bdev->bd_openers > 1) 1119 return; 1120 bd_set_size(bdev, 0); 1121 } 1122 1123 static void nbd_parse_flags(struct nbd_device *nbd) 1124 { 1125 struct nbd_config *config = nbd->config; 1126 if (config->flags & NBD_FLAG_READ_ONLY) 1127 set_disk_ro(nbd->disk, true); 1128 else 1129 set_disk_ro(nbd->disk, false); 1130 if (config->flags & NBD_FLAG_SEND_TRIM) 1131 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1132 if (config->flags & NBD_FLAG_SEND_FLUSH) { 1133 if (config->flags & NBD_FLAG_SEND_FUA) 1134 blk_queue_write_cache(nbd->disk->queue, true, true); 1135 else 1136 blk_queue_write_cache(nbd->disk->queue, true, false); 1137 } 1138 else 1139 blk_queue_write_cache(nbd->disk->queue, false, false); 1140 } 1141 1142 static void send_disconnects(struct nbd_device *nbd) 1143 { 1144 struct nbd_config *config = nbd->config; 1145 struct nbd_request request = { 1146 .magic = htonl(NBD_REQUEST_MAGIC), 1147 .type = htonl(NBD_CMD_DISC), 1148 }; 1149 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 1150 struct iov_iter from; 1151 int i, ret; 1152 1153 for (i = 0; i < config->num_connections; i++) { 1154 struct nbd_sock *nsock = config->socks[i]; 1155 1156 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); 1157 mutex_lock(&nsock->tx_lock); 1158 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); 1159 if (ret <= 0) 1160 dev_err(disk_to_dev(nbd->disk), 1161 "Send disconnect failed %d\n", ret); 1162 mutex_unlock(&nsock->tx_lock); 1163 } 1164 } 1165 1166 static int nbd_disconnect(struct nbd_device *nbd) 1167 { 1168 struct nbd_config *config = nbd->config; 1169 1170 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 1171 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); 1172 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); 1173 send_disconnects(nbd); 1174 return 0; 1175 } 1176 1177 static void nbd_clear_sock(struct nbd_device *nbd) 1178 { 1179 sock_shutdown(nbd); 1180 nbd_clear_que(nbd); 1181 nbd->task_setup = NULL; 1182 } 1183 1184 static void nbd_config_put(struct nbd_device *nbd) 1185 { 1186 if (refcount_dec_and_mutex_lock(&nbd->config_refs, 1187 &nbd->config_lock)) { 1188 struct nbd_config *config = nbd->config; 1189 nbd_dev_dbg_close(nbd); 1190 nbd_size_clear(nbd); 1191 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE, 1192 &config->runtime_flags)) 1193 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 1194 nbd->task_recv = NULL; 1195 nbd_clear_sock(nbd); 1196 if (config->num_connections) { 1197 int i; 1198 for (i = 0; i < config->num_connections; i++) { 1199 sockfd_put(config->socks[i]->sock); 1200 kfree(config->socks[i]); 1201 } 1202 kfree(config->socks); 1203 } 1204 kfree(nbd->config); 1205 nbd->config = NULL; 1206 1207 if (nbd->recv_workq) 1208 destroy_workqueue(nbd->recv_workq); 1209 nbd->recv_workq = NULL; 1210 1211 nbd->tag_set.timeout = 0; 1212 nbd->disk->queue->limits.discard_granularity = 0; 1213 nbd->disk->queue->limits.discard_alignment = 0; 1214 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); 1215 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1216 1217 mutex_unlock(&nbd->config_lock); 1218 nbd_put(nbd); 1219 module_put(THIS_MODULE); 1220 } 1221 } 1222 1223 static int nbd_start_device(struct nbd_device *nbd) 1224 { 1225 struct nbd_config *config = nbd->config; 1226 int num_connections = config->num_connections; 1227 int error = 0, i; 1228 1229 if (nbd->task_recv) 1230 return -EBUSY; 1231 if (!config->socks) 1232 return -EINVAL; 1233 if (num_connections > 1 && 1234 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) { 1235 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); 1236 return -EINVAL; 1237 } 1238 1239 nbd->recv_workq = alloc_workqueue("knbd%d-recv", 1240 WQ_MEM_RECLAIM | WQ_HIGHPRI | 1241 WQ_UNBOUND, 0, nbd->index); 1242 if (!nbd->recv_workq) { 1243 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); 1244 return -ENOMEM; 1245 } 1246 1247 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); 1248 nbd->task_recv = current; 1249 1250 nbd_parse_flags(nbd); 1251 1252 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 1253 if (error) { 1254 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 1255 return error; 1256 } 1257 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags); 1258 1259 nbd_dev_dbg_init(nbd); 1260 for (i = 0; i < num_connections; i++) { 1261 struct recv_thread_args *args; 1262 1263 args = kzalloc(sizeof(*args), GFP_KERNEL); 1264 if (!args) { 1265 sock_shutdown(nbd); 1266 return -ENOMEM; 1267 } 1268 sk_set_memalloc(config->socks[i]->sock->sk); 1269 if (nbd->tag_set.timeout) 1270 config->socks[i]->sock->sk->sk_sndtimeo = 1271 nbd->tag_set.timeout; 1272 atomic_inc(&config->recv_threads); 1273 refcount_inc(&nbd->config_refs); 1274 INIT_WORK(&args->work, recv_work); 1275 args->nbd = nbd; 1276 args->index = i; 1277 queue_work(nbd->recv_workq, &args->work); 1278 } 1279 nbd_size_update(nbd); 1280 return error; 1281 } 1282 1283 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) 1284 { 1285 struct nbd_config *config = nbd->config; 1286 int ret; 1287 1288 ret = nbd_start_device(nbd); 1289 if (ret) 1290 return ret; 1291 1292 if (max_part) 1293 bdev->bd_invalidated = 1; 1294 mutex_unlock(&nbd->config_lock); 1295 ret = wait_event_interruptible(config->recv_wq, 1296 atomic_read(&config->recv_threads) == 0); 1297 if (ret) { 1298 sock_shutdown(nbd); 1299 flush_workqueue(nbd->recv_workq); 1300 } 1301 mutex_lock(&nbd->config_lock); 1302 nbd_bdev_reset(bdev); 1303 /* user requested, ignore socket errors */ 1304 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags)) 1305 ret = 0; 1306 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags)) 1307 ret = -ETIMEDOUT; 1308 return ret; 1309 } 1310 1311 static void nbd_clear_sock_ioctl(struct nbd_device *nbd, 1312 struct block_device *bdev) 1313 { 1314 sock_shutdown(nbd); 1315 __invalidate_device(bdev, true); 1316 nbd_bdev_reset(bdev); 1317 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, 1318 &nbd->config->runtime_flags)) 1319 nbd_config_put(nbd); 1320 } 1321 1322 static bool nbd_is_valid_blksize(unsigned long blksize) 1323 { 1324 if (!blksize || !is_power_of_2(blksize) || blksize < 512 || 1325 blksize > PAGE_SIZE) 1326 return false; 1327 return true; 1328 } 1329 1330 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) 1331 { 1332 nbd->tag_set.timeout = timeout * HZ; 1333 if (timeout) 1334 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); 1335 } 1336 1337 /* Must be called with config_lock held */ 1338 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, 1339 unsigned int cmd, unsigned long arg) 1340 { 1341 struct nbd_config *config = nbd->config; 1342 1343 switch (cmd) { 1344 case NBD_DISCONNECT: 1345 return nbd_disconnect(nbd); 1346 case NBD_CLEAR_SOCK: 1347 nbd_clear_sock_ioctl(nbd, bdev); 1348 return 0; 1349 case NBD_SET_SOCK: 1350 return nbd_add_socket(nbd, arg, false); 1351 case NBD_SET_BLKSIZE: 1352 if (!arg) 1353 arg = NBD_DEF_BLKSIZE; 1354 if (!nbd_is_valid_blksize(arg)) 1355 return -EINVAL; 1356 nbd_size_set(nbd, arg, 1357 div_s64(config->bytesize, arg)); 1358 return 0; 1359 case NBD_SET_SIZE: 1360 nbd_size_set(nbd, config->blksize, 1361 div_s64(arg, config->blksize)); 1362 return 0; 1363 case NBD_SET_SIZE_BLOCKS: 1364 nbd_size_set(nbd, config->blksize, arg); 1365 return 0; 1366 case NBD_SET_TIMEOUT: 1367 nbd_set_cmd_timeout(nbd, arg); 1368 return 0; 1369 1370 case NBD_SET_FLAGS: 1371 config->flags = arg; 1372 return 0; 1373 case NBD_DO_IT: 1374 return nbd_start_device_ioctl(nbd, bdev); 1375 case NBD_CLEAR_QUE: 1376 /* 1377 * This is for compatibility only. The queue is always cleared 1378 * by NBD_DO_IT or NBD_CLEAR_SOCK. 1379 */ 1380 return 0; 1381 case NBD_PRINT_DEBUG: 1382 /* 1383 * For compatibility only, we no longer keep a list of 1384 * outstanding requests. 1385 */ 1386 return 0; 1387 } 1388 return -ENOTTY; 1389 } 1390 1391 static int nbd_ioctl(struct block_device *bdev, fmode_t mode, 1392 unsigned int cmd, unsigned long arg) 1393 { 1394 struct nbd_device *nbd = bdev->bd_disk->private_data; 1395 struct nbd_config *config = nbd->config; 1396 int error = -EINVAL; 1397 1398 if (!capable(CAP_SYS_ADMIN)) 1399 return -EPERM; 1400 1401 /* The block layer will pass back some non-nbd ioctls in case we have 1402 * special handling for them, but we don't so just return an error. 1403 */ 1404 if (_IOC_TYPE(cmd) != 0xab) 1405 return -EINVAL; 1406 1407 mutex_lock(&nbd->config_lock); 1408 1409 /* Don't allow ioctl operations on a nbd device that was created with 1410 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine. 1411 */ 1412 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || 1413 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK)) 1414 error = __nbd_ioctl(bdev, nbd, cmd, arg); 1415 else 1416 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n"); 1417 mutex_unlock(&nbd->config_lock); 1418 return error; 1419 } 1420 1421 static struct nbd_config *nbd_alloc_config(void) 1422 { 1423 struct nbd_config *config; 1424 1425 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); 1426 if (!config) 1427 return NULL; 1428 atomic_set(&config->recv_threads, 0); 1429 init_waitqueue_head(&config->recv_wq); 1430 init_waitqueue_head(&config->conn_wait); 1431 config->blksize = NBD_DEF_BLKSIZE; 1432 atomic_set(&config->live_connections, 0); 1433 try_module_get(THIS_MODULE); 1434 return config; 1435 } 1436 1437 static int nbd_open(struct block_device *bdev, fmode_t mode) 1438 { 1439 struct nbd_device *nbd; 1440 int ret = 0; 1441 1442 mutex_lock(&nbd_index_mutex); 1443 nbd = bdev->bd_disk->private_data; 1444 if (!nbd) { 1445 ret = -ENXIO; 1446 goto out; 1447 } 1448 if (!refcount_inc_not_zero(&nbd->refs)) { 1449 ret = -ENXIO; 1450 goto out; 1451 } 1452 if (!refcount_inc_not_zero(&nbd->config_refs)) { 1453 struct nbd_config *config; 1454 1455 mutex_lock(&nbd->config_lock); 1456 if (refcount_inc_not_zero(&nbd->config_refs)) { 1457 mutex_unlock(&nbd->config_lock); 1458 goto out; 1459 } 1460 config = nbd->config = nbd_alloc_config(); 1461 if (!config) { 1462 ret = -ENOMEM; 1463 mutex_unlock(&nbd->config_lock); 1464 goto out; 1465 } 1466 refcount_set(&nbd->config_refs, 1); 1467 refcount_inc(&nbd->refs); 1468 mutex_unlock(&nbd->config_lock); 1469 bdev->bd_invalidated = 1; 1470 } else if (nbd_disconnected(nbd->config)) { 1471 bdev->bd_invalidated = 1; 1472 } 1473 out: 1474 mutex_unlock(&nbd_index_mutex); 1475 return ret; 1476 } 1477 1478 static void nbd_release(struct gendisk *disk, fmode_t mode) 1479 { 1480 struct nbd_device *nbd = disk->private_data; 1481 struct block_device *bdev = bdget_disk(disk, 0); 1482 1483 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && 1484 bdev->bd_openers == 0) 1485 nbd_disconnect_and_put(nbd); 1486 1487 nbd_config_put(nbd); 1488 nbd_put(nbd); 1489 } 1490 1491 static const struct block_device_operations nbd_fops = 1492 { 1493 .owner = THIS_MODULE, 1494 .open = nbd_open, 1495 .release = nbd_release, 1496 .ioctl = nbd_ioctl, 1497 .compat_ioctl = nbd_ioctl, 1498 }; 1499 1500 #if IS_ENABLED(CONFIG_DEBUG_FS) 1501 1502 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) 1503 { 1504 struct nbd_device *nbd = s->private; 1505 1506 if (nbd->task_recv) 1507 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); 1508 1509 return 0; 1510 } 1511 1512 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) 1513 { 1514 return single_open(file, nbd_dbg_tasks_show, inode->i_private); 1515 } 1516 1517 static const struct file_operations nbd_dbg_tasks_ops = { 1518 .open = nbd_dbg_tasks_open, 1519 .read = seq_read, 1520 .llseek = seq_lseek, 1521 .release = single_release, 1522 }; 1523 1524 static int nbd_dbg_flags_show(struct seq_file *s, void *unused) 1525 { 1526 struct nbd_device *nbd = s->private; 1527 u32 flags = nbd->config->flags; 1528 1529 seq_printf(s, "Hex: 0x%08x\n\n", flags); 1530 1531 seq_puts(s, "Known flags:\n"); 1532 1533 if (flags & NBD_FLAG_HAS_FLAGS) 1534 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); 1535 if (flags & NBD_FLAG_READ_ONLY) 1536 seq_puts(s, "NBD_FLAG_READ_ONLY\n"); 1537 if (flags & NBD_FLAG_SEND_FLUSH) 1538 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); 1539 if (flags & NBD_FLAG_SEND_FUA) 1540 seq_puts(s, "NBD_FLAG_SEND_FUA\n"); 1541 if (flags & NBD_FLAG_SEND_TRIM) 1542 seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); 1543 1544 return 0; 1545 } 1546 1547 static int nbd_dbg_flags_open(struct inode *inode, struct file *file) 1548 { 1549 return single_open(file, nbd_dbg_flags_show, inode->i_private); 1550 } 1551 1552 static const struct file_operations nbd_dbg_flags_ops = { 1553 .open = nbd_dbg_flags_open, 1554 .read = seq_read, 1555 .llseek = seq_lseek, 1556 .release = single_release, 1557 }; 1558 1559 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1560 { 1561 struct dentry *dir; 1562 struct nbd_config *config = nbd->config; 1563 1564 if (!nbd_dbg_dir) 1565 return -EIO; 1566 1567 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); 1568 if (!dir) { 1569 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", 1570 nbd_name(nbd)); 1571 return -EIO; 1572 } 1573 config->dbg_dir = dir; 1574 1575 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); 1576 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1577 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1578 debugfs_create_u64("blocksize", 0444, dir, &config->blksize); 1579 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); 1580 1581 return 0; 1582 } 1583 1584 static void nbd_dev_dbg_close(struct nbd_device *nbd) 1585 { 1586 debugfs_remove_recursive(nbd->config->dbg_dir); 1587 } 1588 1589 static int nbd_dbg_init(void) 1590 { 1591 struct dentry *dbg_dir; 1592 1593 dbg_dir = debugfs_create_dir("nbd", NULL); 1594 if (!dbg_dir) 1595 return -EIO; 1596 1597 nbd_dbg_dir = dbg_dir; 1598 1599 return 0; 1600 } 1601 1602 static void nbd_dbg_close(void) 1603 { 1604 debugfs_remove_recursive(nbd_dbg_dir); 1605 } 1606 1607 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ 1608 1609 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1610 { 1611 return 0; 1612 } 1613 1614 static void nbd_dev_dbg_close(struct nbd_device *nbd) 1615 { 1616 } 1617 1618 static int nbd_dbg_init(void) 1619 { 1620 return 0; 1621 } 1622 1623 static void nbd_dbg_close(void) 1624 { 1625 } 1626 1627 #endif 1628 1629 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 1630 unsigned int hctx_idx, unsigned int numa_node) 1631 { 1632 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1633 cmd->nbd = set->driver_data; 1634 cmd->flags = 0; 1635 mutex_init(&cmd->lock); 1636 return 0; 1637 } 1638 1639 static const struct blk_mq_ops nbd_mq_ops = { 1640 .queue_rq = nbd_queue_rq, 1641 .complete = nbd_complete_rq, 1642 .init_request = nbd_init_request, 1643 .timeout = nbd_xmit_timeout, 1644 }; 1645 1646 static int nbd_dev_add(int index) 1647 { 1648 struct nbd_device *nbd; 1649 struct gendisk *disk; 1650 struct request_queue *q; 1651 int err = -ENOMEM; 1652 1653 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); 1654 if (!nbd) 1655 goto out; 1656 1657 disk = alloc_disk(1 << part_shift); 1658 if (!disk) 1659 goto out_free_nbd; 1660 1661 if (index >= 0) { 1662 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, 1663 GFP_KERNEL); 1664 if (err == -ENOSPC) 1665 err = -EEXIST; 1666 } else { 1667 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); 1668 if (err >= 0) 1669 index = err; 1670 } 1671 if (err < 0) 1672 goto out_free_disk; 1673 1674 nbd->index = index; 1675 nbd->disk = disk; 1676 nbd->tag_set.ops = &nbd_mq_ops; 1677 nbd->tag_set.nr_hw_queues = 1; 1678 nbd->tag_set.queue_depth = 128; 1679 nbd->tag_set.numa_node = NUMA_NO_NODE; 1680 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); 1681 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 1682 BLK_MQ_F_BLOCKING; 1683 nbd->tag_set.driver_data = nbd; 1684 nbd->destroy_complete = NULL; 1685 1686 err = blk_mq_alloc_tag_set(&nbd->tag_set); 1687 if (err) 1688 goto out_free_idr; 1689 1690 q = blk_mq_init_queue(&nbd->tag_set); 1691 if (IS_ERR(q)) { 1692 err = PTR_ERR(q); 1693 goto out_free_tags; 1694 } 1695 disk->queue = q; 1696 1697 /* 1698 * Tell the block layer that we are not a rotational device 1699 */ 1700 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); 1701 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); 1702 disk->queue->limits.discard_granularity = 0; 1703 disk->queue->limits.discard_alignment = 0; 1704 blk_queue_max_discard_sectors(disk->queue, 0); 1705 blk_queue_max_segment_size(disk->queue, UINT_MAX); 1706 blk_queue_max_segments(disk->queue, USHRT_MAX); 1707 blk_queue_max_hw_sectors(disk->queue, 65536); 1708 disk->queue->limits.max_sectors = 256; 1709 1710 mutex_init(&nbd->config_lock); 1711 refcount_set(&nbd->config_refs, 0); 1712 refcount_set(&nbd->refs, 1); 1713 INIT_LIST_HEAD(&nbd->list); 1714 disk->major = NBD_MAJOR; 1715 disk->first_minor = index << part_shift; 1716 disk->fops = &nbd_fops; 1717 disk->private_data = nbd; 1718 sprintf(disk->disk_name, "nbd%d", index); 1719 add_disk(disk); 1720 nbd_total_devices++; 1721 return index; 1722 1723 out_free_tags: 1724 blk_mq_free_tag_set(&nbd->tag_set); 1725 out_free_idr: 1726 idr_remove(&nbd_index_idr, index); 1727 out_free_disk: 1728 put_disk(disk); 1729 out_free_nbd: 1730 kfree(nbd); 1731 out: 1732 return err; 1733 } 1734 1735 static int find_free_cb(int id, void *ptr, void *data) 1736 { 1737 struct nbd_device *nbd = ptr; 1738 struct nbd_device **found = data; 1739 1740 if (!refcount_read(&nbd->config_refs)) { 1741 *found = nbd; 1742 return 1; 1743 } 1744 return 0; 1745 } 1746 1747 /* Netlink interface. */ 1748 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { 1749 [NBD_ATTR_INDEX] = { .type = NLA_U32 }, 1750 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 }, 1751 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 }, 1752 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 }, 1753 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 }, 1754 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 }, 1755 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED}, 1756 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 }, 1757 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED}, 1758 }; 1759 1760 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { 1761 [NBD_SOCK_FD] = { .type = NLA_U32 }, 1762 }; 1763 1764 /* We don't use this right now since we don't parse the incoming list, but we 1765 * still want it here so userspace knows what to expect. 1766 */ 1767 static const struct nla_policy __attribute__((unused)) 1768 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = { 1769 [NBD_DEVICE_INDEX] = { .type = NLA_U32 }, 1770 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 }, 1771 }; 1772 1773 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) 1774 { 1775 struct nbd_config *config = nbd->config; 1776 u64 bsize = config->blksize; 1777 u64 bytes = config->bytesize; 1778 1779 if (info->attrs[NBD_ATTR_SIZE_BYTES]) 1780 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); 1781 1782 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) { 1783 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); 1784 if (!bsize) 1785 bsize = NBD_DEF_BLKSIZE; 1786 if (!nbd_is_valid_blksize(bsize)) { 1787 printk(KERN_ERR "Invalid block size %llu\n", bsize); 1788 return -EINVAL; 1789 } 1790 } 1791 1792 if (bytes != config->bytesize || bsize != config->blksize) 1793 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize)); 1794 return 0; 1795 } 1796 1797 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) 1798 { 1799 DECLARE_COMPLETION_ONSTACK(destroy_complete); 1800 struct nbd_device *nbd = NULL; 1801 struct nbd_config *config; 1802 int index = -1; 1803 int ret; 1804 bool put_dev = false; 1805 1806 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1807 return -EPERM; 1808 1809 if (info->attrs[NBD_ATTR_INDEX]) 1810 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 1811 if (!info->attrs[NBD_ATTR_SOCKETS]) { 1812 printk(KERN_ERR "nbd: must specify at least one socket\n"); 1813 return -EINVAL; 1814 } 1815 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) { 1816 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n"); 1817 return -EINVAL; 1818 } 1819 again: 1820 mutex_lock(&nbd_index_mutex); 1821 if (index == -1) { 1822 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd); 1823 if (ret == 0) { 1824 int new_index; 1825 new_index = nbd_dev_add(-1); 1826 if (new_index < 0) { 1827 mutex_unlock(&nbd_index_mutex); 1828 printk(KERN_ERR "nbd: failed to add new device\n"); 1829 return new_index; 1830 } 1831 nbd = idr_find(&nbd_index_idr, new_index); 1832 } 1833 } else { 1834 nbd = idr_find(&nbd_index_idr, index); 1835 if (!nbd) { 1836 ret = nbd_dev_add(index); 1837 if (ret < 0) { 1838 mutex_unlock(&nbd_index_mutex); 1839 printk(KERN_ERR "nbd: failed to add new device\n"); 1840 return ret; 1841 } 1842 nbd = idr_find(&nbd_index_idr, index); 1843 } 1844 } 1845 if (!nbd) { 1846 printk(KERN_ERR "nbd: couldn't find device at index %d\n", 1847 index); 1848 mutex_unlock(&nbd_index_mutex); 1849 return -EINVAL; 1850 } 1851 1852 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && 1853 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) { 1854 nbd->destroy_complete = &destroy_complete; 1855 mutex_unlock(&nbd_index_mutex); 1856 1857 /* Wait untill the the nbd stuff is totally destroyed */ 1858 wait_for_completion(&destroy_complete); 1859 goto again; 1860 } 1861 1862 if (!refcount_inc_not_zero(&nbd->refs)) { 1863 mutex_unlock(&nbd_index_mutex); 1864 if (index == -1) 1865 goto again; 1866 printk(KERN_ERR "nbd: device at index %d is going down\n", 1867 index); 1868 return -EINVAL; 1869 } 1870 mutex_unlock(&nbd_index_mutex); 1871 1872 mutex_lock(&nbd->config_lock); 1873 if (refcount_read(&nbd->config_refs)) { 1874 mutex_unlock(&nbd->config_lock); 1875 nbd_put(nbd); 1876 if (index == -1) 1877 goto again; 1878 printk(KERN_ERR "nbd: nbd%d already in use\n", index); 1879 return -EBUSY; 1880 } 1881 if (WARN_ON(nbd->config)) { 1882 mutex_unlock(&nbd->config_lock); 1883 nbd_put(nbd); 1884 return -EINVAL; 1885 } 1886 config = nbd->config = nbd_alloc_config(); 1887 if (!nbd->config) { 1888 mutex_unlock(&nbd->config_lock); 1889 nbd_put(nbd); 1890 printk(KERN_ERR "nbd: couldn't allocate config\n"); 1891 return -ENOMEM; 1892 } 1893 refcount_set(&nbd->config_refs, 1); 1894 set_bit(NBD_RT_BOUND, &config->runtime_flags); 1895 1896 ret = nbd_genl_size_set(info, nbd); 1897 if (ret) 1898 goto out; 1899 1900 if (info->attrs[NBD_ATTR_TIMEOUT]) 1901 nbd_set_cmd_timeout(nbd, 1902 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); 1903 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { 1904 config->dead_conn_timeout = 1905 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); 1906 config->dead_conn_timeout *= HZ; 1907 } 1908 if (info->attrs[NBD_ATTR_SERVER_FLAGS]) 1909 config->flags = 1910 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]); 1911 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { 1912 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); 1913 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { 1914 set_bit(NBD_RT_DESTROY_ON_DISCONNECT, 1915 &config->runtime_flags); 1916 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); 1917 put_dev = true; 1918 } else { 1919 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); 1920 } 1921 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { 1922 set_bit(NBD_RT_DISCONNECT_ON_CLOSE, 1923 &config->runtime_flags); 1924 } 1925 } 1926 1927 if (info->attrs[NBD_ATTR_SOCKETS]) { 1928 struct nlattr *attr; 1929 int rem, fd; 1930 1931 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], 1932 rem) { 1933 struct nlattr *socks[NBD_SOCK_MAX+1]; 1934 1935 if (nla_type(attr) != NBD_SOCK_ITEM) { 1936 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n"); 1937 ret = -EINVAL; 1938 goto out; 1939 } 1940 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, 1941 attr, 1942 nbd_sock_policy, 1943 info->extack); 1944 if (ret != 0) { 1945 printk(KERN_ERR "nbd: error processing sock list\n"); 1946 ret = -EINVAL; 1947 goto out; 1948 } 1949 if (!socks[NBD_SOCK_FD]) 1950 continue; 1951 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]); 1952 ret = nbd_add_socket(nbd, fd, true); 1953 if (ret) 1954 goto out; 1955 } 1956 } 1957 ret = nbd_start_device(nbd); 1958 out: 1959 mutex_unlock(&nbd->config_lock); 1960 if (!ret) { 1961 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags); 1962 refcount_inc(&nbd->config_refs); 1963 nbd_connect_reply(info, nbd->index); 1964 } 1965 nbd_config_put(nbd); 1966 if (put_dev) 1967 nbd_put(nbd); 1968 return ret; 1969 } 1970 1971 static void nbd_disconnect_and_put(struct nbd_device *nbd) 1972 { 1973 mutex_lock(&nbd->config_lock); 1974 nbd_disconnect(nbd); 1975 nbd_clear_sock(nbd); 1976 mutex_unlock(&nbd->config_lock); 1977 /* 1978 * Make sure recv thread has finished, so it does not drop the last 1979 * config ref and try to destroy the workqueue from inside the work 1980 * queue. 1981 */ 1982 flush_workqueue(nbd->recv_workq); 1983 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, 1984 &nbd->config->runtime_flags)) 1985 nbd_config_put(nbd); 1986 } 1987 1988 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) 1989 { 1990 struct nbd_device *nbd; 1991 int index; 1992 1993 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1994 return -EPERM; 1995 1996 if (!info->attrs[NBD_ATTR_INDEX]) { 1997 printk(KERN_ERR "nbd: must specify an index to disconnect\n"); 1998 return -EINVAL; 1999 } 2000 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2001 mutex_lock(&nbd_index_mutex); 2002 nbd = idr_find(&nbd_index_idr, index); 2003 if (!nbd) { 2004 mutex_unlock(&nbd_index_mutex); 2005 printk(KERN_ERR "nbd: couldn't find device at index %d\n", 2006 index); 2007 return -EINVAL; 2008 } 2009 if (!refcount_inc_not_zero(&nbd->refs)) { 2010 mutex_unlock(&nbd_index_mutex); 2011 printk(KERN_ERR "nbd: device at index %d is going down\n", 2012 index); 2013 return -EINVAL; 2014 } 2015 mutex_unlock(&nbd_index_mutex); 2016 if (!refcount_inc_not_zero(&nbd->config_refs)) { 2017 nbd_put(nbd); 2018 return 0; 2019 } 2020 nbd_disconnect_and_put(nbd); 2021 nbd_config_put(nbd); 2022 nbd_put(nbd); 2023 return 0; 2024 } 2025 2026 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) 2027 { 2028 struct nbd_device *nbd = NULL; 2029 struct nbd_config *config; 2030 int index; 2031 int ret = 0; 2032 bool put_dev = false; 2033 2034 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 2035 return -EPERM; 2036 2037 if (!info->attrs[NBD_ATTR_INDEX]) { 2038 printk(KERN_ERR "nbd: must specify a device to reconfigure\n"); 2039 return -EINVAL; 2040 } 2041 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2042 mutex_lock(&nbd_index_mutex); 2043 nbd = idr_find(&nbd_index_idr, index); 2044 if (!nbd) { 2045 mutex_unlock(&nbd_index_mutex); 2046 printk(KERN_ERR "nbd: couldn't find a device at index %d\n", 2047 index); 2048 return -EINVAL; 2049 } 2050 if (!refcount_inc_not_zero(&nbd->refs)) { 2051 mutex_unlock(&nbd_index_mutex); 2052 printk(KERN_ERR "nbd: device at index %d is going down\n", 2053 index); 2054 return -EINVAL; 2055 } 2056 mutex_unlock(&nbd_index_mutex); 2057 2058 if (!refcount_inc_not_zero(&nbd->config_refs)) { 2059 dev_err(nbd_to_dev(nbd), 2060 "not configured, cannot reconfigure\n"); 2061 nbd_put(nbd); 2062 return -EINVAL; 2063 } 2064 2065 mutex_lock(&nbd->config_lock); 2066 config = nbd->config; 2067 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || 2068 !nbd->task_recv) { 2069 dev_err(nbd_to_dev(nbd), 2070 "not configured, cannot reconfigure\n"); 2071 ret = -EINVAL; 2072 goto out; 2073 } 2074 2075 ret = nbd_genl_size_set(info, nbd); 2076 if (ret) 2077 goto out; 2078 2079 if (info->attrs[NBD_ATTR_TIMEOUT]) 2080 nbd_set_cmd_timeout(nbd, 2081 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); 2082 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { 2083 config->dead_conn_timeout = 2084 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); 2085 config->dead_conn_timeout *= HZ; 2086 } 2087 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { 2088 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); 2089 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { 2090 if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT, 2091 &config->runtime_flags)) 2092 put_dev = true; 2093 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); 2094 } else { 2095 if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT, 2096 &config->runtime_flags)) 2097 refcount_inc(&nbd->refs); 2098 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); 2099 } 2100 2101 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { 2102 set_bit(NBD_RT_DISCONNECT_ON_CLOSE, 2103 &config->runtime_flags); 2104 } else { 2105 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE, 2106 &config->runtime_flags); 2107 } 2108 } 2109 2110 if (info->attrs[NBD_ATTR_SOCKETS]) { 2111 struct nlattr *attr; 2112 int rem, fd; 2113 2114 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], 2115 rem) { 2116 struct nlattr *socks[NBD_SOCK_MAX+1]; 2117 2118 if (nla_type(attr) != NBD_SOCK_ITEM) { 2119 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n"); 2120 ret = -EINVAL; 2121 goto out; 2122 } 2123 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, 2124 attr, 2125 nbd_sock_policy, 2126 info->extack); 2127 if (ret != 0) { 2128 printk(KERN_ERR "nbd: error processing sock list\n"); 2129 ret = -EINVAL; 2130 goto out; 2131 } 2132 if (!socks[NBD_SOCK_FD]) 2133 continue; 2134 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]); 2135 ret = nbd_reconnect_socket(nbd, fd); 2136 if (ret) { 2137 if (ret == -ENOSPC) 2138 ret = 0; 2139 goto out; 2140 } 2141 dev_info(nbd_to_dev(nbd), "reconnected socket\n"); 2142 } 2143 } 2144 out: 2145 mutex_unlock(&nbd->config_lock); 2146 nbd_config_put(nbd); 2147 nbd_put(nbd); 2148 if (put_dev) 2149 nbd_put(nbd); 2150 return ret; 2151 } 2152 2153 static const struct genl_ops nbd_connect_genl_ops[] = { 2154 { 2155 .cmd = NBD_CMD_CONNECT, 2156 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2157 .doit = nbd_genl_connect, 2158 }, 2159 { 2160 .cmd = NBD_CMD_DISCONNECT, 2161 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2162 .doit = nbd_genl_disconnect, 2163 }, 2164 { 2165 .cmd = NBD_CMD_RECONFIGURE, 2166 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2167 .doit = nbd_genl_reconfigure, 2168 }, 2169 { 2170 .cmd = NBD_CMD_STATUS, 2171 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2172 .doit = nbd_genl_status, 2173 }, 2174 }; 2175 2176 static const struct genl_multicast_group nbd_mcast_grps[] = { 2177 { .name = NBD_GENL_MCAST_GROUP_NAME, }, 2178 }; 2179 2180 static struct genl_family nbd_genl_family __ro_after_init = { 2181 .hdrsize = 0, 2182 .name = NBD_GENL_FAMILY_NAME, 2183 .version = NBD_GENL_VERSION, 2184 .module = THIS_MODULE, 2185 .ops = nbd_connect_genl_ops, 2186 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops), 2187 .maxattr = NBD_ATTR_MAX, 2188 .policy = nbd_attr_policy, 2189 .mcgrps = nbd_mcast_grps, 2190 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps), 2191 }; 2192 2193 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) 2194 { 2195 struct nlattr *dev_opt; 2196 u8 connected = 0; 2197 int ret; 2198 2199 /* This is a little racey, but for status it's ok. The 2200 * reason we don't take a ref here is because we can't 2201 * take a ref in the index == -1 case as we would need 2202 * to put under the nbd_index_mutex, which could 2203 * deadlock if we are configured to remove ourselves 2204 * once we're disconnected. 2205 */ 2206 if (refcount_read(&nbd->config_refs)) 2207 connected = 1; 2208 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM); 2209 if (!dev_opt) 2210 return -EMSGSIZE; 2211 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); 2212 if (ret) 2213 return -EMSGSIZE; 2214 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED, 2215 connected); 2216 if (ret) 2217 return -EMSGSIZE; 2218 nla_nest_end(reply, dev_opt); 2219 return 0; 2220 } 2221 2222 static int status_cb(int id, void *ptr, void *data) 2223 { 2224 struct nbd_device *nbd = ptr; 2225 return populate_nbd_status(nbd, (struct sk_buff *)data); 2226 } 2227 2228 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) 2229 { 2230 struct nlattr *dev_list; 2231 struct sk_buff *reply; 2232 void *reply_head; 2233 size_t msg_size; 2234 int index = -1; 2235 int ret = -ENOMEM; 2236 2237 if (info->attrs[NBD_ATTR_INDEX]) 2238 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2239 2240 mutex_lock(&nbd_index_mutex); 2241 2242 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) + 2243 nla_attr_size(sizeof(u8))); 2244 msg_size *= (index == -1) ? nbd_total_devices : 1; 2245 2246 reply = genlmsg_new(msg_size, GFP_KERNEL); 2247 if (!reply) 2248 goto out; 2249 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0, 2250 NBD_CMD_STATUS); 2251 if (!reply_head) { 2252 nlmsg_free(reply); 2253 goto out; 2254 } 2255 2256 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST); 2257 if (index == -1) { 2258 ret = idr_for_each(&nbd_index_idr, &status_cb, reply); 2259 if (ret) { 2260 nlmsg_free(reply); 2261 goto out; 2262 } 2263 } else { 2264 struct nbd_device *nbd; 2265 nbd = idr_find(&nbd_index_idr, index); 2266 if (nbd) { 2267 ret = populate_nbd_status(nbd, reply); 2268 if (ret) { 2269 nlmsg_free(reply); 2270 goto out; 2271 } 2272 } 2273 } 2274 nla_nest_end(reply, dev_list); 2275 genlmsg_end(reply, reply_head); 2276 ret = genlmsg_reply(reply, info); 2277 out: 2278 mutex_unlock(&nbd_index_mutex); 2279 return ret; 2280 } 2281 2282 static void nbd_connect_reply(struct genl_info *info, int index) 2283 { 2284 struct sk_buff *skb; 2285 void *msg_head; 2286 int ret; 2287 2288 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL); 2289 if (!skb) 2290 return; 2291 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0, 2292 NBD_CMD_CONNECT); 2293 if (!msg_head) { 2294 nlmsg_free(skb); 2295 return; 2296 } 2297 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index); 2298 if (ret) { 2299 nlmsg_free(skb); 2300 return; 2301 } 2302 genlmsg_end(skb, msg_head); 2303 genlmsg_reply(skb, info); 2304 } 2305 2306 static void nbd_mcast_index(int index) 2307 { 2308 struct sk_buff *skb; 2309 void *msg_head; 2310 int ret; 2311 2312 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL); 2313 if (!skb) 2314 return; 2315 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0, 2316 NBD_CMD_LINK_DEAD); 2317 if (!msg_head) { 2318 nlmsg_free(skb); 2319 return; 2320 } 2321 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index); 2322 if (ret) { 2323 nlmsg_free(skb); 2324 return; 2325 } 2326 genlmsg_end(skb, msg_head); 2327 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL); 2328 } 2329 2330 static void nbd_dead_link_work(struct work_struct *work) 2331 { 2332 struct link_dead_args *args = container_of(work, struct link_dead_args, 2333 work); 2334 nbd_mcast_index(args->index); 2335 kfree(args); 2336 } 2337 2338 static int __init nbd_init(void) 2339 { 2340 int i; 2341 2342 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 2343 2344 if (max_part < 0) { 2345 printk(KERN_ERR "nbd: max_part must be >= 0\n"); 2346 return -EINVAL; 2347 } 2348 2349 part_shift = 0; 2350 if (max_part > 0) { 2351 part_shift = fls(max_part); 2352 2353 /* 2354 * Adjust max_part according to part_shift as it is exported 2355 * to user space so that user can know the max number of 2356 * partition kernel should be able to manage. 2357 * 2358 * Note that -1 is required because partition 0 is reserved 2359 * for the whole disk. 2360 */ 2361 max_part = (1UL << part_shift) - 1; 2362 } 2363 2364 if ((1UL << part_shift) > DISK_MAX_PARTS) 2365 return -EINVAL; 2366 2367 if (nbds_max > 1UL << (MINORBITS - part_shift)) 2368 return -EINVAL; 2369 2370 if (register_blkdev(NBD_MAJOR, "nbd")) 2371 return -EIO; 2372 2373 if (genl_register_family(&nbd_genl_family)) { 2374 unregister_blkdev(NBD_MAJOR, "nbd"); 2375 return -EINVAL; 2376 } 2377 nbd_dbg_init(); 2378 2379 mutex_lock(&nbd_index_mutex); 2380 for (i = 0; i < nbds_max; i++) 2381 nbd_dev_add(i); 2382 mutex_unlock(&nbd_index_mutex); 2383 return 0; 2384 } 2385 2386 static int nbd_exit_cb(int id, void *ptr, void *data) 2387 { 2388 struct list_head *list = (struct list_head *)data; 2389 struct nbd_device *nbd = ptr; 2390 2391 list_add_tail(&nbd->list, list); 2392 return 0; 2393 } 2394 2395 static void __exit nbd_cleanup(void) 2396 { 2397 struct nbd_device *nbd; 2398 LIST_HEAD(del_list); 2399 2400 nbd_dbg_close(); 2401 2402 mutex_lock(&nbd_index_mutex); 2403 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list); 2404 mutex_unlock(&nbd_index_mutex); 2405 2406 while (!list_empty(&del_list)) { 2407 nbd = list_first_entry(&del_list, struct nbd_device, list); 2408 list_del_init(&nbd->list); 2409 if (refcount_read(&nbd->refs) != 1) 2410 printk(KERN_ERR "nbd: possibly leaking a device\n"); 2411 nbd_put(nbd); 2412 } 2413 2414 idr_destroy(&nbd_index_idr); 2415 genl_unregister_family(&nbd_genl_family); 2416 unregister_blkdev(NBD_MAJOR, "nbd"); 2417 } 2418 2419 module_init(nbd_init); 2420 module_exit(nbd_cleanup); 2421 2422 MODULE_DESCRIPTION("Network Block Device"); 2423 MODULE_LICENSE("GPL"); 2424 2425 module_param(nbds_max, int, 0444); 2426 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 2427 module_param(max_part, int, 0444); 2428 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)"); 2429