1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Network block device - make block devices work over TCP 4 * 5 * Note that you can not swap over this thing, yet. Seems to work but 6 * deadlocks sometimes - you can not swap over TCP in general. 7 * 8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 10 * 11 * (part of code stolen from loop.c) 12 */ 13 14 #include <linux/major.h> 15 16 #include <linux/blkdev.h> 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/fs.h> 22 #include <linux/bio.h> 23 #include <linux/stat.h> 24 #include <linux/errno.h> 25 #include <linux/file.h> 26 #include <linux/ioctl.h> 27 #include <linux/mutex.h> 28 #include <linux/compiler.h> 29 #include <linux/completion.h> 30 #include <linux/err.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <net/sock.h> 34 #include <linux/net.h> 35 #include <linux/kthread.h> 36 #include <linux/types.h> 37 #include <linux/debugfs.h> 38 #include <linux/blk-mq.h> 39 40 #include <linux/uaccess.h> 41 #include <asm/types.h> 42 43 #include <linux/nbd.h> 44 #include <linux/nbd-netlink.h> 45 #include <net/genetlink.h> 46 47 #define CREATE_TRACE_POINTS 48 #include <trace/events/nbd.h> 49 50 static DEFINE_IDR(nbd_index_idr); 51 static DEFINE_MUTEX(nbd_index_mutex); 52 static int nbd_total_devices = 0; 53 54 struct nbd_sock { 55 struct socket *sock; 56 struct mutex tx_lock; 57 struct request *pending; 58 int sent; 59 bool dead; 60 int fallback_index; 61 int cookie; 62 }; 63 64 struct recv_thread_args { 65 struct work_struct work; 66 struct nbd_device *nbd; 67 int index; 68 }; 69 70 struct link_dead_args { 71 struct work_struct work; 72 int index; 73 }; 74 75 #define NBD_RT_TIMEDOUT 0 76 #define NBD_RT_DISCONNECT_REQUESTED 1 77 #define NBD_RT_DISCONNECTED 2 78 #define NBD_RT_HAS_PID_FILE 3 79 #define NBD_RT_HAS_CONFIG_REF 4 80 #define NBD_RT_BOUND 5 81 #define NBD_RT_DESTROY_ON_DISCONNECT 6 82 #define NBD_RT_DISCONNECT_ON_CLOSE 7 83 84 #define NBD_DESTROY_ON_DISCONNECT 0 85 #define NBD_DISCONNECT_REQUESTED 1 86 87 struct nbd_config { 88 u32 flags; 89 unsigned long runtime_flags; 90 u64 dead_conn_timeout; 91 92 struct nbd_sock **socks; 93 int num_connections; 94 atomic_t live_connections; 95 wait_queue_head_t conn_wait; 96 97 atomic_t recv_threads; 98 wait_queue_head_t recv_wq; 99 loff_t blksize; 100 loff_t bytesize; 101 #if IS_ENABLED(CONFIG_DEBUG_FS) 102 struct dentry *dbg_dir; 103 #endif 104 }; 105 106 struct nbd_device { 107 struct blk_mq_tag_set tag_set; 108 109 int index; 110 refcount_t config_refs; 111 refcount_t refs; 112 struct nbd_config *config; 113 struct mutex config_lock; 114 struct gendisk *disk; 115 struct workqueue_struct *recv_workq; 116 117 struct list_head list; 118 struct task_struct *task_recv; 119 struct task_struct *task_setup; 120 121 struct completion *destroy_complete; 122 unsigned long flags; 123 }; 124 125 #define NBD_CMD_REQUEUED 1 126 127 struct nbd_cmd { 128 struct nbd_device *nbd; 129 struct mutex lock; 130 int index; 131 int cookie; 132 int retries; 133 blk_status_t status; 134 unsigned long flags; 135 u32 cmd_cookie; 136 }; 137 138 #if IS_ENABLED(CONFIG_DEBUG_FS) 139 static struct dentry *nbd_dbg_dir; 140 #endif 141 142 #define nbd_name(nbd) ((nbd)->disk->disk_name) 143 144 #define NBD_MAGIC 0x68797548 145 146 #define NBD_DEF_BLKSIZE 1024 147 148 static unsigned int nbds_max = 16; 149 static int max_part = 16; 150 static int part_shift; 151 152 static int nbd_dev_dbg_init(struct nbd_device *nbd); 153 static void nbd_dev_dbg_close(struct nbd_device *nbd); 154 static void nbd_config_put(struct nbd_device *nbd); 155 static void nbd_connect_reply(struct genl_info *info, int index); 156 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); 157 static void nbd_dead_link_work(struct work_struct *work); 158 static void nbd_disconnect_and_put(struct nbd_device *nbd); 159 160 static inline struct device *nbd_to_dev(struct nbd_device *nbd) 161 { 162 return disk_to_dev(nbd->disk); 163 } 164 165 static void nbd_requeue_cmd(struct nbd_cmd *cmd) 166 { 167 struct request *req = blk_mq_rq_from_pdu(cmd); 168 169 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) 170 blk_mq_requeue_request(req, true); 171 } 172 173 #define NBD_COOKIE_BITS 32 174 175 static u64 nbd_cmd_handle(struct nbd_cmd *cmd) 176 { 177 struct request *req = blk_mq_rq_from_pdu(cmd); 178 u32 tag = blk_mq_unique_tag(req); 179 u64 cookie = cmd->cmd_cookie; 180 181 return (cookie << NBD_COOKIE_BITS) | tag; 182 } 183 184 static u32 nbd_handle_to_tag(u64 handle) 185 { 186 return (u32)handle; 187 } 188 189 static u32 nbd_handle_to_cookie(u64 handle) 190 { 191 return (u32)(handle >> NBD_COOKIE_BITS); 192 } 193 194 static const char *nbdcmd_to_ascii(int cmd) 195 { 196 switch (cmd) { 197 case NBD_CMD_READ: return "read"; 198 case NBD_CMD_WRITE: return "write"; 199 case NBD_CMD_DISC: return "disconnect"; 200 case NBD_CMD_FLUSH: return "flush"; 201 case NBD_CMD_TRIM: return "trim/discard"; 202 } 203 return "invalid"; 204 } 205 206 static ssize_t pid_show(struct device *dev, 207 struct device_attribute *attr, char *buf) 208 { 209 struct gendisk *disk = dev_to_disk(dev); 210 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 211 212 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); 213 } 214 215 static const struct device_attribute pid_attr = { 216 .attr = { .name = "pid", .mode = 0444}, 217 .show = pid_show, 218 }; 219 220 static void nbd_dev_remove(struct nbd_device *nbd) 221 { 222 struct gendisk *disk = nbd->disk; 223 struct request_queue *q; 224 225 if (disk) { 226 q = disk->queue; 227 del_gendisk(disk); 228 blk_cleanup_queue(q); 229 blk_mq_free_tag_set(&nbd->tag_set); 230 disk->private_data = NULL; 231 put_disk(disk); 232 } 233 234 /* 235 * Place this in the last just before the nbd is freed to 236 * make sure that the disk and the related kobject are also 237 * totally removed to avoid duplicate creation of the same 238 * one. 239 */ 240 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete) 241 complete(nbd->destroy_complete); 242 243 kfree(nbd); 244 } 245 246 static void nbd_put(struct nbd_device *nbd) 247 { 248 if (refcount_dec_and_mutex_lock(&nbd->refs, 249 &nbd_index_mutex)) { 250 idr_remove(&nbd_index_idr, nbd->index); 251 nbd_dev_remove(nbd); 252 mutex_unlock(&nbd_index_mutex); 253 } 254 } 255 256 static int nbd_disconnected(struct nbd_config *config) 257 { 258 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) || 259 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); 260 } 261 262 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, 263 int notify) 264 { 265 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { 266 struct link_dead_args *args; 267 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO); 268 if (args) { 269 INIT_WORK(&args->work, nbd_dead_link_work); 270 args->index = nbd->index; 271 queue_work(system_wq, &args->work); 272 } 273 } 274 if (!nsock->dead) { 275 kernel_sock_shutdown(nsock->sock, SHUT_RDWR); 276 if (atomic_dec_return(&nbd->config->live_connections) == 0) { 277 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED, 278 &nbd->config->runtime_flags)) { 279 set_bit(NBD_RT_DISCONNECTED, 280 &nbd->config->runtime_flags); 281 dev_info(nbd_to_dev(nbd), 282 "Disconnected due to user request.\n"); 283 } 284 } 285 } 286 nsock->dead = true; 287 nsock->pending = NULL; 288 nsock->sent = 0; 289 } 290 291 static void nbd_size_clear(struct nbd_device *nbd) 292 { 293 if (nbd->config->bytesize) { 294 set_capacity(nbd->disk, 0); 295 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 296 } 297 } 298 299 static void nbd_size_update(struct nbd_device *nbd) 300 { 301 struct nbd_config *config = nbd->config; 302 struct block_device *bdev = bdget_disk(nbd->disk, 0); 303 304 if (config->flags & NBD_FLAG_SEND_TRIM) { 305 nbd->disk->queue->limits.discard_granularity = config->blksize; 306 nbd->disk->queue->limits.discard_alignment = config->blksize; 307 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); 308 } 309 blk_queue_logical_block_size(nbd->disk->queue, config->blksize); 310 blk_queue_physical_block_size(nbd->disk->queue, config->blksize); 311 set_capacity(nbd->disk, config->bytesize >> 9); 312 if (bdev) { 313 if (bdev->bd_disk) { 314 bd_set_size(bdev, config->bytesize); 315 set_blocksize(bdev, config->blksize); 316 } else 317 bdev->bd_invalidated = 1; 318 bdput(bdev); 319 } 320 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 321 } 322 323 static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, 324 loff_t nr_blocks) 325 { 326 struct nbd_config *config = nbd->config; 327 config->blksize = blocksize; 328 config->bytesize = blocksize * nr_blocks; 329 if (nbd->task_recv != NULL) 330 nbd_size_update(nbd); 331 } 332 333 static void nbd_complete_rq(struct request *req) 334 { 335 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 336 337 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, 338 cmd->status ? "failed" : "done"); 339 340 blk_mq_end_request(req, cmd->status); 341 } 342 343 /* 344 * Forcibly shutdown the socket causing all listeners to error 345 */ 346 static void sock_shutdown(struct nbd_device *nbd) 347 { 348 struct nbd_config *config = nbd->config; 349 int i; 350 351 if (config->num_connections == 0) 352 return; 353 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) 354 return; 355 356 for (i = 0; i < config->num_connections; i++) { 357 struct nbd_sock *nsock = config->socks[i]; 358 mutex_lock(&nsock->tx_lock); 359 nbd_mark_nsock_dead(nbd, nsock, 0); 360 mutex_unlock(&nsock->tx_lock); 361 } 362 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); 363 } 364 365 static u32 req_to_nbd_cmd_type(struct request *req) 366 { 367 switch (req_op(req)) { 368 case REQ_OP_DISCARD: 369 return NBD_CMD_TRIM; 370 case REQ_OP_FLUSH: 371 return NBD_CMD_FLUSH; 372 case REQ_OP_WRITE: 373 return NBD_CMD_WRITE; 374 case REQ_OP_READ: 375 return NBD_CMD_READ; 376 default: 377 return U32_MAX; 378 } 379 } 380 381 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, 382 bool reserved) 383 { 384 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 385 struct nbd_device *nbd = cmd->nbd; 386 struct nbd_config *config; 387 388 if (!mutex_trylock(&cmd->lock)) 389 return BLK_EH_RESET_TIMER; 390 391 if (!refcount_inc_not_zero(&nbd->config_refs)) { 392 cmd->status = BLK_STS_TIMEOUT; 393 mutex_unlock(&cmd->lock); 394 goto done; 395 } 396 config = nbd->config; 397 398 if (config->num_connections > 1) { 399 dev_err_ratelimited(nbd_to_dev(nbd), 400 "Connection timed out, retrying (%d/%d alive)\n", 401 atomic_read(&config->live_connections), 402 config->num_connections); 403 /* 404 * Hooray we have more connections, requeue this IO, the submit 405 * path will put it on a real connection. 406 */ 407 if (config->socks && config->num_connections > 1) { 408 if (cmd->index < config->num_connections) { 409 struct nbd_sock *nsock = 410 config->socks[cmd->index]; 411 mutex_lock(&nsock->tx_lock); 412 /* We can have multiple outstanding requests, so 413 * we don't want to mark the nsock dead if we've 414 * already reconnected with a new socket, so 415 * only mark it dead if its the same socket we 416 * were sent out on. 417 */ 418 if (cmd->cookie == nsock->cookie) 419 nbd_mark_nsock_dead(nbd, nsock, 1); 420 mutex_unlock(&nsock->tx_lock); 421 } 422 mutex_unlock(&cmd->lock); 423 nbd_requeue_cmd(cmd); 424 nbd_config_put(nbd); 425 return BLK_EH_DONE; 426 } 427 } 428 429 if (!nbd->tag_set.timeout) { 430 /* 431 * Userspace sets timeout=0 to disable socket disconnection, 432 * so just warn and reset the timer. 433 */ 434 cmd->retries++; 435 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", 436 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)), 437 (unsigned long long)blk_rq_pos(req) << 9, 438 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries); 439 440 mutex_unlock(&cmd->lock); 441 nbd_config_put(nbd); 442 return BLK_EH_RESET_TIMER; 443 } 444 445 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); 446 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags); 447 cmd->status = BLK_STS_IOERR; 448 mutex_unlock(&cmd->lock); 449 sock_shutdown(nbd); 450 nbd_config_put(nbd); 451 done: 452 blk_mq_complete_request(req); 453 return BLK_EH_DONE; 454 } 455 456 /* 457 * Send or receive packet. 458 */ 459 static int sock_xmit(struct nbd_device *nbd, int index, int send, 460 struct iov_iter *iter, int msg_flags, int *sent) 461 { 462 struct nbd_config *config = nbd->config; 463 struct socket *sock = config->socks[index]->sock; 464 int result; 465 struct msghdr msg; 466 unsigned int noreclaim_flag; 467 468 if (unlikely(!sock)) { 469 dev_err_ratelimited(disk_to_dev(nbd->disk), 470 "Attempted %s on closed socket in sock_xmit\n", 471 (send ? "send" : "recv")); 472 return -EINVAL; 473 } 474 475 msg.msg_iter = *iter; 476 477 noreclaim_flag = memalloc_noreclaim_save(); 478 do { 479 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; 480 msg.msg_name = NULL; 481 msg.msg_namelen = 0; 482 msg.msg_control = NULL; 483 msg.msg_controllen = 0; 484 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 485 486 if (send) 487 result = sock_sendmsg(sock, &msg); 488 else 489 result = sock_recvmsg(sock, &msg, msg.msg_flags); 490 491 if (result <= 0) { 492 if (result == 0) 493 result = -EPIPE; /* short read */ 494 break; 495 } 496 if (sent) 497 *sent += result; 498 } while (msg_data_left(&msg)); 499 500 memalloc_noreclaim_restore(noreclaim_flag); 501 502 return result; 503 } 504 505 /* 506 * Different settings for sk->sk_sndtimeo can result in different return values 507 * if there is a signal pending when we enter sendmsg, because reasons? 508 */ 509 static inline int was_interrupted(int result) 510 { 511 return result == -ERESTARTSYS || result == -EINTR; 512 } 513 514 /* always call with the tx_lock held */ 515 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 516 { 517 struct request *req = blk_mq_rq_from_pdu(cmd); 518 struct nbd_config *config = nbd->config; 519 struct nbd_sock *nsock = config->socks[index]; 520 int result; 521 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 522 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 523 struct iov_iter from; 524 unsigned long size = blk_rq_bytes(req); 525 struct bio *bio; 526 u64 handle; 527 u32 type; 528 u32 nbd_cmd_flags = 0; 529 int sent = nsock->sent, skip = 0; 530 531 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); 532 533 type = req_to_nbd_cmd_type(req); 534 if (type == U32_MAX) 535 return -EIO; 536 537 if (rq_data_dir(req) == WRITE && 538 (config->flags & NBD_FLAG_READ_ONLY)) { 539 dev_err_ratelimited(disk_to_dev(nbd->disk), 540 "Write on read-only\n"); 541 return -EIO; 542 } 543 544 if (req->cmd_flags & REQ_FUA) 545 nbd_cmd_flags |= NBD_CMD_FLAG_FUA; 546 547 /* We did a partial send previously, and we at least sent the whole 548 * request struct, so just go and send the rest of the pages in the 549 * request. 550 */ 551 if (sent) { 552 if (sent >= sizeof(request)) { 553 skip = sent - sizeof(request); 554 555 /* initialize handle for tracing purposes */ 556 handle = nbd_cmd_handle(cmd); 557 558 goto send_pages; 559 } 560 iov_iter_advance(&from, sent); 561 } else { 562 cmd->cmd_cookie++; 563 } 564 cmd->index = index; 565 cmd->cookie = nsock->cookie; 566 cmd->retries = 0; 567 request.type = htonl(type | nbd_cmd_flags); 568 if (type != NBD_CMD_FLUSH) { 569 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 570 request.len = htonl(size); 571 } 572 handle = nbd_cmd_handle(cmd); 573 memcpy(request.handle, &handle, sizeof(handle)); 574 575 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); 576 577 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 578 req, nbdcmd_to_ascii(type), 579 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 580 result = sock_xmit(nbd, index, 1, &from, 581 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); 582 trace_nbd_header_sent(req, handle); 583 if (result <= 0) { 584 if (was_interrupted(result)) { 585 /* If we havne't sent anything we can just return BUSY, 586 * however if we have sent something we need to make 587 * sure we only allow this req to be sent until we are 588 * completely done. 589 */ 590 if (sent) { 591 nsock->pending = req; 592 nsock->sent = sent; 593 } 594 set_bit(NBD_CMD_REQUEUED, &cmd->flags); 595 return BLK_STS_RESOURCE; 596 } 597 dev_err_ratelimited(disk_to_dev(nbd->disk), 598 "Send control failed (result %d)\n", result); 599 return -EAGAIN; 600 } 601 send_pages: 602 if (type != NBD_CMD_WRITE) 603 goto out; 604 605 bio = req->bio; 606 while (bio) { 607 struct bio *next = bio->bi_next; 608 struct bvec_iter iter; 609 struct bio_vec bvec; 610 611 bio_for_each_segment(bvec, bio, iter) { 612 bool is_last = !next && bio_iter_last(bvec, iter); 613 int flags = is_last ? 0 : MSG_MORE; 614 615 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 616 req, bvec.bv_len); 617 iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len); 618 if (skip) { 619 if (skip >= iov_iter_count(&from)) { 620 skip -= iov_iter_count(&from); 621 continue; 622 } 623 iov_iter_advance(&from, skip); 624 skip = 0; 625 } 626 result = sock_xmit(nbd, index, 1, &from, flags, &sent); 627 if (result <= 0) { 628 if (was_interrupted(result)) { 629 /* We've already sent the header, we 630 * have no choice but to set pending and 631 * return BUSY. 632 */ 633 nsock->pending = req; 634 nsock->sent = sent; 635 set_bit(NBD_CMD_REQUEUED, &cmd->flags); 636 return BLK_STS_RESOURCE; 637 } 638 dev_err(disk_to_dev(nbd->disk), 639 "Send data failed (result %d)\n", 640 result); 641 return -EAGAIN; 642 } 643 /* 644 * The completion might already have come in, 645 * so break for the last one instead of letting 646 * the iterator do it. This prevents use-after-free 647 * of the bio. 648 */ 649 if (is_last) 650 break; 651 } 652 bio = next; 653 } 654 out: 655 trace_nbd_payload_sent(req, handle); 656 nsock->pending = NULL; 657 nsock->sent = 0; 658 return 0; 659 } 660 661 /* NULL returned = something went wrong, inform userspace */ 662 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) 663 { 664 struct nbd_config *config = nbd->config; 665 int result; 666 struct nbd_reply reply; 667 struct nbd_cmd *cmd; 668 struct request *req = NULL; 669 u64 handle; 670 u16 hwq; 671 u32 tag; 672 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; 673 struct iov_iter to; 674 int ret = 0; 675 676 reply.magic = 0; 677 iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply)); 678 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 679 if (result <= 0) { 680 if (!nbd_disconnected(config)) 681 dev_err(disk_to_dev(nbd->disk), 682 "Receive control failed (result %d)\n", result); 683 return ERR_PTR(result); 684 } 685 686 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { 687 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", 688 (unsigned long)ntohl(reply.magic)); 689 return ERR_PTR(-EPROTO); 690 } 691 692 memcpy(&handle, reply.handle, sizeof(handle)); 693 tag = nbd_handle_to_tag(handle); 694 hwq = blk_mq_unique_tag_to_hwq(tag); 695 if (hwq < nbd->tag_set.nr_hw_queues) 696 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 697 blk_mq_unique_tag_to_tag(tag)); 698 if (!req || !blk_mq_request_started(req)) { 699 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", 700 tag, req); 701 return ERR_PTR(-ENOENT); 702 } 703 trace_nbd_header_received(req, handle); 704 cmd = blk_mq_rq_to_pdu(req); 705 706 mutex_lock(&cmd->lock); 707 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { 708 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", 709 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); 710 ret = -ENOENT; 711 goto out; 712 } 713 if (cmd->status != BLK_STS_OK) { 714 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", 715 req); 716 ret = -ENOENT; 717 goto out; 718 } 719 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { 720 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", 721 req); 722 ret = -ENOENT; 723 goto out; 724 } 725 if (ntohl(reply.error)) { 726 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 727 ntohl(reply.error)); 728 cmd->status = BLK_STS_IOERR; 729 goto out; 730 } 731 732 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); 733 if (rq_data_dir(req) != WRITE) { 734 struct req_iterator iter; 735 struct bio_vec bvec; 736 737 rq_for_each_segment(bvec, req, iter) { 738 iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len); 739 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 740 if (result <= 0) { 741 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 742 result); 743 /* 744 * If we've disconnected or we only have 1 745 * connection then we need to make sure we 746 * complete this request, otherwise error out 747 * and let the timeout stuff handle resubmitting 748 * this request onto another connection. 749 */ 750 if (nbd_disconnected(config) || 751 config->num_connections <= 1) { 752 cmd->status = BLK_STS_IOERR; 753 goto out; 754 } 755 ret = -EIO; 756 goto out; 757 } 758 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 759 req, bvec.bv_len); 760 } 761 } 762 out: 763 trace_nbd_payload_received(req, handle); 764 mutex_unlock(&cmd->lock); 765 return ret ? ERR_PTR(ret) : cmd; 766 } 767 768 static void recv_work(struct work_struct *work) 769 { 770 struct recv_thread_args *args = container_of(work, 771 struct recv_thread_args, 772 work); 773 struct nbd_device *nbd = args->nbd; 774 struct nbd_config *config = nbd->config; 775 struct nbd_cmd *cmd; 776 777 while (1) { 778 cmd = nbd_read_stat(nbd, args->index); 779 if (IS_ERR(cmd)) { 780 struct nbd_sock *nsock = config->socks[args->index]; 781 782 mutex_lock(&nsock->tx_lock); 783 nbd_mark_nsock_dead(nbd, nsock, 1); 784 mutex_unlock(&nsock->tx_lock); 785 break; 786 } 787 788 blk_mq_complete_request(blk_mq_rq_from_pdu(cmd)); 789 } 790 atomic_dec(&config->recv_threads); 791 wake_up(&config->recv_wq); 792 nbd_config_put(nbd); 793 kfree(args); 794 } 795 796 static bool nbd_clear_req(struct request *req, void *data, bool reserved) 797 { 798 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 799 800 mutex_lock(&cmd->lock); 801 cmd->status = BLK_STS_IOERR; 802 mutex_unlock(&cmd->lock); 803 804 blk_mq_complete_request(req); 805 return true; 806 } 807 808 static void nbd_clear_que(struct nbd_device *nbd) 809 { 810 blk_mq_quiesce_queue(nbd->disk->queue); 811 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); 812 blk_mq_unquiesce_queue(nbd->disk->queue); 813 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); 814 } 815 816 static int find_fallback(struct nbd_device *nbd, int index) 817 { 818 struct nbd_config *config = nbd->config; 819 int new_index = -1; 820 struct nbd_sock *nsock = config->socks[index]; 821 int fallback = nsock->fallback_index; 822 823 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) 824 return new_index; 825 826 if (config->num_connections <= 1) { 827 dev_err_ratelimited(disk_to_dev(nbd->disk), 828 "Attempted send on invalid socket\n"); 829 return new_index; 830 } 831 832 if (fallback >= 0 && fallback < config->num_connections && 833 !config->socks[fallback]->dead) 834 return fallback; 835 836 if (nsock->fallback_index < 0 || 837 nsock->fallback_index >= config->num_connections || 838 config->socks[nsock->fallback_index]->dead) { 839 int i; 840 for (i = 0; i < config->num_connections; i++) { 841 if (i == index) 842 continue; 843 if (!config->socks[i]->dead) { 844 new_index = i; 845 break; 846 } 847 } 848 nsock->fallback_index = new_index; 849 if (new_index < 0) { 850 dev_err_ratelimited(disk_to_dev(nbd->disk), 851 "Dead connection, failed to find a fallback\n"); 852 return new_index; 853 } 854 } 855 new_index = nsock->fallback_index; 856 return new_index; 857 } 858 859 static int wait_for_reconnect(struct nbd_device *nbd) 860 { 861 struct nbd_config *config = nbd->config; 862 if (!config->dead_conn_timeout) 863 return 0; 864 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) 865 return 0; 866 return wait_event_timeout(config->conn_wait, 867 atomic_read(&config->live_connections) > 0, 868 config->dead_conn_timeout) > 0; 869 } 870 871 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) 872 { 873 struct request *req = blk_mq_rq_from_pdu(cmd); 874 struct nbd_device *nbd = cmd->nbd; 875 struct nbd_config *config; 876 struct nbd_sock *nsock; 877 int ret; 878 879 if (!refcount_inc_not_zero(&nbd->config_refs)) { 880 dev_err_ratelimited(disk_to_dev(nbd->disk), 881 "Socks array is empty\n"); 882 blk_mq_start_request(req); 883 return -EINVAL; 884 } 885 config = nbd->config; 886 887 if (index >= config->num_connections) { 888 dev_err_ratelimited(disk_to_dev(nbd->disk), 889 "Attempted send on invalid socket\n"); 890 nbd_config_put(nbd); 891 blk_mq_start_request(req); 892 return -EINVAL; 893 } 894 cmd->status = BLK_STS_OK; 895 again: 896 nsock = config->socks[index]; 897 mutex_lock(&nsock->tx_lock); 898 if (nsock->dead) { 899 int old_index = index; 900 index = find_fallback(nbd, index); 901 mutex_unlock(&nsock->tx_lock); 902 if (index < 0) { 903 if (wait_for_reconnect(nbd)) { 904 index = old_index; 905 goto again; 906 } 907 /* All the sockets should already be down at this point, 908 * we just want to make sure that DISCONNECTED is set so 909 * any requests that come in that were queue'ed waiting 910 * for the reconnect timer don't trigger the timer again 911 * and instead just error out. 912 */ 913 sock_shutdown(nbd); 914 nbd_config_put(nbd); 915 blk_mq_start_request(req); 916 return -EIO; 917 } 918 goto again; 919 } 920 921 /* Handle the case that we have a pending request that was partially 922 * transmitted that _has_ to be serviced first. We need to call requeue 923 * here so that it gets put _after_ the request that is already on the 924 * dispatch list. 925 */ 926 blk_mq_start_request(req); 927 if (unlikely(nsock->pending && nsock->pending != req)) { 928 nbd_requeue_cmd(cmd); 929 ret = 0; 930 goto out; 931 } 932 /* 933 * Some failures are related to the link going down, so anything that 934 * returns EAGAIN can be retried on a different socket. 935 */ 936 ret = nbd_send_cmd(nbd, cmd, index); 937 if (ret == -EAGAIN) { 938 dev_err_ratelimited(disk_to_dev(nbd->disk), 939 "Request send failed, requeueing\n"); 940 nbd_mark_nsock_dead(nbd, nsock, 1); 941 nbd_requeue_cmd(cmd); 942 ret = 0; 943 } 944 out: 945 mutex_unlock(&nsock->tx_lock); 946 nbd_config_put(nbd); 947 return ret; 948 } 949 950 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 951 const struct blk_mq_queue_data *bd) 952 { 953 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 954 int ret; 955 956 /* 957 * Since we look at the bio's to send the request over the network we 958 * need to make sure the completion work doesn't mark this request done 959 * before we are done doing our send. This keeps us from dereferencing 960 * freed data if we have particularly fast completions (ie we get the 961 * completion before we exit sock_xmit on the last bvec) or in the case 962 * that the server is misbehaving (or there was an error) before we're 963 * done sending everything over the wire. 964 */ 965 mutex_lock(&cmd->lock); 966 clear_bit(NBD_CMD_REQUEUED, &cmd->flags); 967 968 /* We can be called directly from the user space process, which means we 969 * could possibly have signals pending so our sendmsg will fail. In 970 * this case we need to return that we are busy, otherwise error out as 971 * appropriate. 972 */ 973 ret = nbd_handle_cmd(cmd, hctx->queue_num); 974 if (ret < 0) 975 ret = BLK_STS_IOERR; 976 else if (!ret) 977 ret = BLK_STS_OK; 978 mutex_unlock(&cmd->lock); 979 980 return ret; 981 } 982 983 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, 984 int *err) 985 { 986 struct socket *sock; 987 988 *err = 0; 989 sock = sockfd_lookup(fd, err); 990 if (!sock) 991 return NULL; 992 993 if (sock->ops->shutdown == sock_no_shutdown) { 994 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); 995 *err = -EINVAL; 996 sockfd_put(sock); 997 return NULL; 998 } 999 1000 return sock; 1001 } 1002 1003 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, 1004 bool netlink) 1005 { 1006 struct nbd_config *config = nbd->config; 1007 struct socket *sock; 1008 struct nbd_sock **socks; 1009 struct nbd_sock *nsock; 1010 int err; 1011 1012 sock = nbd_get_socket(nbd, arg, &err); 1013 if (!sock) 1014 return err; 1015 1016 if (!netlink && !nbd->task_setup && 1017 !test_bit(NBD_RT_BOUND, &config->runtime_flags)) 1018 nbd->task_setup = current; 1019 1020 if (!netlink && 1021 (nbd->task_setup != current || 1022 test_bit(NBD_RT_BOUND, &config->runtime_flags))) { 1023 dev_err(disk_to_dev(nbd->disk), 1024 "Device being setup by another task"); 1025 sockfd_put(sock); 1026 return -EBUSY; 1027 } 1028 1029 socks = krealloc(config->socks, (config->num_connections + 1) * 1030 sizeof(struct nbd_sock *), GFP_KERNEL); 1031 if (!socks) { 1032 sockfd_put(sock); 1033 return -ENOMEM; 1034 } 1035 1036 config->socks = socks; 1037 1038 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); 1039 if (!nsock) { 1040 sockfd_put(sock); 1041 return -ENOMEM; 1042 } 1043 1044 nsock->fallback_index = -1; 1045 nsock->dead = false; 1046 mutex_init(&nsock->tx_lock); 1047 nsock->sock = sock; 1048 nsock->pending = NULL; 1049 nsock->sent = 0; 1050 nsock->cookie = 0; 1051 socks[config->num_connections++] = nsock; 1052 atomic_inc(&config->live_connections); 1053 1054 return 0; 1055 } 1056 1057 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) 1058 { 1059 struct nbd_config *config = nbd->config; 1060 struct socket *sock, *old; 1061 struct recv_thread_args *args; 1062 int i; 1063 int err; 1064 1065 sock = nbd_get_socket(nbd, arg, &err); 1066 if (!sock) 1067 return err; 1068 1069 args = kzalloc(sizeof(*args), GFP_KERNEL); 1070 if (!args) { 1071 sockfd_put(sock); 1072 return -ENOMEM; 1073 } 1074 1075 for (i = 0; i < config->num_connections; i++) { 1076 struct nbd_sock *nsock = config->socks[i]; 1077 1078 if (!nsock->dead) 1079 continue; 1080 1081 mutex_lock(&nsock->tx_lock); 1082 if (!nsock->dead) { 1083 mutex_unlock(&nsock->tx_lock); 1084 continue; 1085 } 1086 sk_set_memalloc(sock->sk); 1087 if (nbd->tag_set.timeout) 1088 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; 1089 atomic_inc(&config->recv_threads); 1090 refcount_inc(&nbd->config_refs); 1091 old = nsock->sock; 1092 nsock->fallback_index = -1; 1093 nsock->sock = sock; 1094 nsock->dead = false; 1095 INIT_WORK(&args->work, recv_work); 1096 args->index = i; 1097 args->nbd = nbd; 1098 nsock->cookie++; 1099 mutex_unlock(&nsock->tx_lock); 1100 sockfd_put(old); 1101 1102 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); 1103 1104 /* We take the tx_mutex in an error path in the recv_work, so we 1105 * need to queue_work outside of the tx_mutex. 1106 */ 1107 queue_work(nbd->recv_workq, &args->work); 1108 1109 atomic_inc(&config->live_connections); 1110 wake_up(&config->conn_wait); 1111 return 0; 1112 } 1113 sockfd_put(sock); 1114 kfree(args); 1115 return -ENOSPC; 1116 } 1117 1118 static void nbd_bdev_reset(struct block_device *bdev) 1119 { 1120 if (bdev->bd_openers > 1) 1121 return; 1122 bd_set_size(bdev, 0); 1123 } 1124 1125 static void nbd_parse_flags(struct nbd_device *nbd) 1126 { 1127 struct nbd_config *config = nbd->config; 1128 if (config->flags & NBD_FLAG_READ_ONLY) 1129 set_disk_ro(nbd->disk, true); 1130 else 1131 set_disk_ro(nbd->disk, false); 1132 if (config->flags & NBD_FLAG_SEND_TRIM) 1133 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1134 if (config->flags & NBD_FLAG_SEND_FLUSH) { 1135 if (config->flags & NBD_FLAG_SEND_FUA) 1136 blk_queue_write_cache(nbd->disk->queue, true, true); 1137 else 1138 blk_queue_write_cache(nbd->disk->queue, true, false); 1139 } 1140 else 1141 blk_queue_write_cache(nbd->disk->queue, false, false); 1142 } 1143 1144 static void send_disconnects(struct nbd_device *nbd) 1145 { 1146 struct nbd_config *config = nbd->config; 1147 struct nbd_request request = { 1148 .magic = htonl(NBD_REQUEST_MAGIC), 1149 .type = htonl(NBD_CMD_DISC), 1150 }; 1151 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 1152 struct iov_iter from; 1153 int i, ret; 1154 1155 for (i = 0; i < config->num_connections; i++) { 1156 struct nbd_sock *nsock = config->socks[i]; 1157 1158 iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request)); 1159 mutex_lock(&nsock->tx_lock); 1160 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); 1161 if (ret <= 0) 1162 dev_err(disk_to_dev(nbd->disk), 1163 "Send disconnect failed %d\n", ret); 1164 mutex_unlock(&nsock->tx_lock); 1165 } 1166 } 1167 1168 static int nbd_disconnect(struct nbd_device *nbd) 1169 { 1170 struct nbd_config *config = nbd->config; 1171 1172 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 1173 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); 1174 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); 1175 send_disconnects(nbd); 1176 return 0; 1177 } 1178 1179 static void nbd_clear_sock(struct nbd_device *nbd) 1180 { 1181 sock_shutdown(nbd); 1182 nbd_clear_que(nbd); 1183 nbd->task_setup = NULL; 1184 } 1185 1186 static void nbd_config_put(struct nbd_device *nbd) 1187 { 1188 if (refcount_dec_and_mutex_lock(&nbd->config_refs, 1189 &nbd->config_lock)) { 1190 struct nbd_config *config = nbd->config; 1191 nbd_dev_dbg_close(nbd); 1192 nbd_size_clear(nbd); 1193 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE, 1194 &config->runtime_flags)) 1195 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 1196 nbd->task_recv = NULL; 1197 nbd_clear_sock(nbd); 1198 if (config->num_connections) { 1199 int i; 1200 for (i = 0; i < config->num_connections; i++) { 1201 sockfd_put(config->socks[i]->sock); 1202 kfree(config->socks[i]); 1203 } 1204 kfree(config->socks); 1205 } 1206 kfree(nbd->config); 1207 nbd->config = NULL; 1208 1209 if (nbd->recv_workq) 1210 destroy_workqueue(nbd->recv_workq); 1211 nbd->recv_workq = NULL; 1212 1213 nbd->tag_set.timeout = 0; 1214 nbd->disk->queue->limits.discard_granularity = 0; 1215 nbd->disk->queue->limits.discard_alignment = 0; 1216 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); 1217 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); 1218 1219 mutex_unlock(&nbd->config_lock); 1220 nbd_put(nbd); 1221 module_put(THIS_MODULE); 1222 } 1223 } 1224 1225 static int nbd_start_device(struct nbd_device *nbd) 1226 { 1227 struct nbd_config *config = nbd->config; 1228 int num_connections = config->num_connections; 1229 int error = 0, i; 1230 1231 if (nbd->task_recv) 1232 return -EBUSY; 1233 if (!config->socks) 1234 return -EINVAL; 1235 if (num_connections > 1 && 1236 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) { 1237 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); 1238 return -EINVAL; 1239 } 1240 1241 nbd->recv_workq = alloc_workqueue("knbd%d-recv", 1242 WQ_MEM_RECLAIM | WQ_HIGHPRI | 1243 WQ_UNBOUND, 0, nbd->index); 1244 if (!nbd->recv_workq) { 1245 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); 1246 return -ENOMEM; 1247 } 1248 1249 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); 1250 nbd->task_recv = current; 1251 1252 nbd_parse_flags(nbd); 1253 1254 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 1255 if (error) { 1256 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 1257 return error; 1258 } 1259 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags); 1260 1261 nbd_dev_dbg_init(nbd); 1262 for (i = 0; i < num_connections; i++) { 1263 struct recv_thread_args *args; 1264 1265 args = kzalloc(sizeof(*args), GFP_KERNEL); 1266 if (!args) { 1267 sock_shutdown(nbd); 1268 /* 1269 * If num_connections is m (2 < m), 1270 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful. 1271 * But NO.(n + 1) failed. We still have n recv threads. 1272 * So, add flush_workqueue here to prevent recv threads 1273 * dropping the last config_refs and trying to destroy 1274 * the workqueue from inside the workqueue. 1275 */ 1276 if (i) 1277 flush_workqueue(nbd->recv_workq); 1278 return -ENOMEM; 1279 } 1280 sk_set_memalloc(config->socks[i]->sock->sk); 1281 if (nbd->tag_set.timeout) 1282 config->socks[i]->sock->sk->sk_sndtimeo = 1283 nbd->tag_set.timeout; 1284 atomic_inc(&config->recv_threads); 1285 refcount_inc(&nbd->config_refs); 1286 INIT_WORK(&args->work, recv_work); 1287 args->nbd = nbd; 1288 args->index = i; 1289 queue_work(nbd->recv_workq, &args->work); 1290 } 1291 nbd_size_update(nbd); 1292 return error; 1293 } 1294 1295 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev) 1296 { 1297 struct nbd_config *config = nbd->config; 1298 int ret; 1299 1300 ret = nbd_start_device(nbd); 1301 if (ret) 1302 return ret; 1303 1304 if (max_part) 1305 bdev->bd_invalidated = 1; 1306 mutex_unlock(&nbd->config_lock); 1307 ret = wait_event_interruptible(config->recv_wq, 1308 atomic_read(&config->recv_threads) == 0); 1309 if (ret) 1310 sock_shutdown(nbd); 1311 flush_workqueue(nbd->recv_workq); 1312 1313 mutex_lock(&nbd->config_lock); 1314 nbd_bdev_reset(bdev); 1315 /* user requested, ignore socket errors */ 1316 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags)) 1317 ret = 0; 1318 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags)) 1319 ret = -ETIMEDOUT; 1320 return ret; 1321 } 1322 1323 static void nbd_clear_sock_ioctl(struct nbd_device *nbd, 1324 struct block_device *bdev) 1325 { 1326 sock_shutdown(nbd); 1327 __invalidate_device(bdev, true); 1328 nbd_bdev_reset(bdev); 1329 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, 1330 &nbd->config->runtime_flags)) 1331 nbd_config_put(nbd); 1332 } 1333 1334 static bool nbd_is_valid_blksize(unsigned long blksize) 1335 { 1336 if (!blksize || !is_power_of_2(blksize) || blksize < 512 || 1337 blksize > PAGE_SIZE) 1338 return false; 1339 return true; 1340 } 1341 1342 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) 1343 { 1344 nbd->tag_set.timeout = timeout * HZ; 1345 if (timeout) 1346 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); 1347 } 1348 1349 /* Must be called with config_lock held */ 1350 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, 1351 unsigned int cmd, unsigned long arg) 1352 { 1353 struct nbd_config *config = nbd->config; 1354 1355 switch (cmd) { 1356 case NBD_DISCONNECT: 1357 return nbd_disconnect(nbd); 1358 case NBD_CLEAR_SOCK: 1359 nbd_clear_sock_ioctl(nbd, bdev); 1360 return 0; 1361 case NBD_SET_SOCK: 1362 return nbd_add_socket(nbd, arg, false); 1363 case NBD_SET_BLKSIZE: 1364 if (!arg) 1365 arg = NBD_DEF_BLKSIZE; 1366 if (!nbd_is_valid_blksize(arg)) 1367 return -EINVAL; 1368 nbd_size_set(nbd, arg, 1369 div_s64(config->bytesize, arg)); 1370 return 0; 1371 case NBD_SET_SIZE: 1372 nbd_size_set(nbd, config->blksize, 1373 div_s64(arg, config->blksize)); 1374 return 0; 1375 case NBD_SET_SIZE_BLOCKS: 1376 nbd_size_set(nbd, config->blksize, arg); 1377 return 0; 1378 case NBD_SET_TIMEOUT: 1379 nbd_set_cmd_timeout(nbd, arg); 1380 return 0; 1381 1382 case NBD_SET_FLAGS: 1383 config->flags = arg; 1384 return 0; 1385 case NBD_DO_IT: 1386 return nbd_start_device_ioctl(nbd, bdev); 1387 case NBD_CLEAR_QUE: 1388 /* 1389 * This is for compatibility only. The queue is always cleared 1390 * by NBD_DO_IT or NBD_CLEAR_SOCK. 1391 */ 1392 return 0; 1393 case NBD_PRINT_DEBUG: 1394 /* 1395 * For compatibility only, we no longer keep a list of 1396 * outstanding requests. 1397 */ 1398 return 0; 1399 } 1400 return -ENOTTY; 1401 } 1402 1403 static int nbd_ioctl(struct block_device *bdev, fmode_t mode, 1404 unsigned int cmd, unsigned long arg) 1405 { 1406 struct nbd_device *nbd = bdev->bd_disk->private_data; 1407 struct nbd_config *config = nbd->config; 1408 int error = -EINVAL; 1409 1410 if (!capable(CAP_SYS_ADMIN)) 1411 return -EPERM; 1412 1413 /* The block layer will pass back some non-nbd ioctls in case we have 1414 * special handling for them, but we don't so just return an error. 1415 */ 1416 if (_IOC_TYPE(cmd) != 0xab) 1417 return -EINVAL; 1418 1419 mutex_lock(&nbd->config_lock); 1420 1421 /* Don't allow ioctl operations on a nbd device that was created with 1422 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine. 1423 */ 1424 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || 1425 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK)) 1426 error = __nbd_ioctl(bdev, nbd, cmd, arg); 1427 else 1428 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n"); 1429 mutex_unlock(&nbd->config_lock); 1430 return error; 1431 } 1432 1433 static struct nbd_config *nbd_alloc_config(void) 1434 { 1435 struct nbd_config *config; 1436 1437 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); 1438 if (!config) 1439 return NULL; 1440 atomic_set(&config->recv_threads, 0); 1441 init_waitqueue_head(&config->recv_wq); 1442 init_waitqueue_head(&config->conn_wait); 1443 config->blksize = NBD_DEF_BLKSIZE; 1444 atomic_set(&config->live_connections, 0); 1445 try_module_get(THIS_MODULE); 1446 return config; 1447 } 1448 1449 static int nbd_open(struct block_device *bdev, fmode_t mode) 1450 { 1451 struct nbd_device *nbd; 1452 int ret = 0; 1453 1454 mutex_lock(&nbd_index_mutex); 1455 nbd = bdev->bd_disk->private_data; 1456 if (!nbd) { 1457 ret = -ENXIO; 1458 goto out; 1459 } 1460 if (!refcount_inc_not_zero(&nbd->refs)) { 1461 ret = -ENXIO; 1462 goto out; 1463 } 1464 if (!refcount_inc_not_zero(&nbd->config_refs)) { 1465 struct nbd_config *config; 1466 1467 mutex_lock(&nbd->config_lock); 1468 if (refcount_inc_not_zero(&nbd->config_refs)) { 1469 mutex_unlock(&nbd->config_lock); 1470 goto out; 1471 } 1472 config = nbd->config = nbd_alloc_config(); 1473 if (!config) { 1474 ret = -ENOMEM; 1475 mutex_unlock(&nbd->config_lock); 1476 goto out; 1477 } 1478 refcount_set(&nbd->config_refs, 1); 1479 refcount_inc(&nbd->refs); 1480 mutex_unlock(&nbd->config_lock); 1481 bdev->bd_invalidated = 1; 1482 } else if (nbd_disconnected(nbd->config)) { 1483 bdev->bd_invalidated = 1; 1484 } 1485 out: 1486 mutex_unlock(&nbd_index_mutex); 1487 return ret; 1488 } 1489 1490 static void nbd_release(struct gendisk *disk, fmode_t mode) 1491 { 1492 struct nbd_device *nbd = disk->private_data; 1493 struct block_device *bdev = bdget_disk(disk, 0); 1494 1495 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && 1496 bdev->bd_openers == 0) 1497 nbd_disconnect_and_put(nbd); 1498 1499 nbd_config_put(nbd); 1500 nbd_put(nbd); 1501 } 1502 1503 static const struct block_device_operations nbd_fops = 1504 { 1505 .owner = THIS_MODULE, 1506 .open = nbd_open, 1507 .release = nbd_release, 1508 .ioctl = nbd_ioctl, 1509 .compat_ioctl = nbd_ioctl, 1510 }; 1511 1512 #if IS_ENABLED(CONFIG_DEBUG_FS) 1513 1514 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) 1515 { 1516 struct nbd_device *nbd = s->private; 1517 1518 if (nbd->task_recv) 1519 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); 1520 1521 return 0; 1522 } 1523 1524 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) 1525 { 1526 return single_open(file, nbd_dbg_tasks_show, inode->i_private); 1527 } 1528 1529 static const struct file_operations nbd_dbg_tasks_ops = { 1530 .open = nbd_dbg_tasks_open, 1531 .read = seq_read, 1532 .llseek = seq_lseek, 1533 .release = single_release, 1534 }; 1535 1536 static int nbd_dbg_flags_show(struct seq_file *s, void *unused) 1537 { 1538 struct nbd_device *nbd = s->private; 1539 u32 flags = nbd->config->flags; 1540 1541 seq_printf(s, "Hex: 0x%08x\n\n", flags); 1542 1543 seq_puts(s, "Known flags:\n"); 1544 1545 if (flags & NBD_FLAG_HAS_FLAGS) 1546 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); 1547 if (flags & NBD_FLAG_READ_ONLY) 1548 seq_puts(s, "NBD_FLAG_READ_ONLY\n"); 1549 if (flags & NBD_FLAG_SEND_FLUSH) 1550 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); 1551 if (flags & NBD_FLAG_SEND_FUA) 1552 seq_puts(s, "NBD_FLAG_SEND_FUA\n"); 1553 if (flags & NBD_FLAG_SEND_TRIM) 1554 seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); 1555 1556 return 0; 1557 } 1558 1559 static int nbd_dbg_flags_open(struct inode *inode, struct file *file) 1560 { 1561 return single_open(file, nbd_dbg_flags_show, inode->i_private); 1562 } 1563 1564 static const struct file_operations nbd_dbg_flags_ops = { 1565 .open = nbd_dbg_flags_open, 1566 .read = seq_read, 1567 .llseek = seq_lseek, 1568 .release = single_release, 1569 }; 1570 1571 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1572 { 1573 struct dentry *dir; 1574 struct nbd_config *config = nbd->config; 1575 1576 if (!nbd_dbg_dir) 1577 return -EIO; 1578 1579 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); 1580 if (!dir) { 1581 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", 1582 nbd_name(nbd)); 1583 return -EIO; 1584 } 1585 config->dbg_dir = dir; 1586 1587 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); 1588 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1589 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1590 debugfs_create_u64("blocksize", 0444, dir, &config->blksize); 1591 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); 1592 1593 return 0; 1594 } 1595 1596 static void nbd_dev_dbg_close(struct nbd_device *nbd) 1597 { 1598 debugfs_remove_recursive(nbd->config->dbg_dir); 1599 } 1600 1601 static int nbd_dbg_init(void) 1602 { 1603 struct dentry *dbg_dir; 1604 1605 dbg_dir = debugfs_create_dir("nbd", NULL); 1606 if (!dbg_dir) 1607 return -EIO; 1608 1609 nbd_dbg_dir = dbg_dir; 1610 1611 return 0; 1612 } 1613 1614 static void nbd_dbg_close(void) 1615 { 1616 debugfs_remove_recursive(nbd_dbg_dir); 1617 } 1618 1619 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ 1620 1621 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1622 { 1623 return 0; 1624 } 1625 1626 static void nbd_dev_dbg_close(struct nbd_device *nbd) 1627 { 1628 } 1629 1630 static int nbd_dbg_init(void) 1631 { 1632 return 0; 1633 } 1634 1635 static void nbd_dbg_close(void) 1636 { 1637 } 1638 1639 #endif 1640 1641 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 1642 unsigned int hctx_idx, unsigned int numa_node) 1643 { 1644 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1645 cmd->nbd = set->driver_data; 1646 cmd->flags = 0; 1647 mutex_init(&cmd->lock); 1648 return 0; 1649 } 1650 1651 static const struct blk_mq_ops nbd_mq_ops = { 1652 .queue_rq = nbd_queue_rq, 1653 .complete = nbd_complete_rq, 1654 .init_request = nbd_init_request, 1655 .timeout = nbd_xmit_timeout, 1656 }; 1657 1658 static int nbd_dev_add(int index) 1659 { 1660 struct nbd_device *nbd; 1661 struct gendisk *disk; 1662 struct request_queue *q; 1663 int err = -ENOMEM; 1664 1665 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); 1666 if (!nbd) 1667 goto out; 1668 1669 disk = alloc_disk(1 << part_shift); 1670 if (!disk) 1671 goto out_free_nbd; 1672 1673 if (index >= 0) { 1674 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, 1675 GFP_KERNEL); 1676 if (err == -ENOSPC) 1677 err = -EEXIST; 1678 } else { 1679 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); 1680 if (err >= 0) 1681 index = err; 1682 } 1683 if (err < 0) 1684 goto out_free_disk; 1685 1686 nbd->index = index; 1687 nbd->disk = disk; 1688 nbd->tag_set.ops = &nbd_mq_ops; 1689 nbd->tag_set.nr_hw_queues = 1; 1690 nbd->tag_set.queue_depth = 128; 1691 nbd->tag_set.numa_node = NUMA_NO_NODE; 1692 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); 1693 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 1694 BLK_MQ_F_BLOCKING; 1695 nbd->tag_set.driver_data = nbd; 1696 nbd->destroy_complete = NULL; 1697 1698 err = blk_mq_alloc_tag_set(&nbd->tag_set); 1699 if (err) 1700 goto out_free_idr; 1701 1702 q = blk_mq_init_queue(&nbd->tag_set); 1703 if (IS_ERR(q)) { 1704 err = PTR_ERR(q); 1705 goto out_free_tags; 1706 } 1707 disk->queue = q; 1708 1709 /* 1710 * Tell the block layer that we are not a rotational device 1711 */ 1712 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); 1713 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); 1714 disk->queue->limits.discard_granularity = 0; 1715 disk->queue->limits.discard_alignment = 0; 1716 blk_queue_max_discard_sectors(disk->queue, 0); 1717 blk_queue_max_segment_size(disk->queue, UINT_MAX); 1718 blk_queue_max_segments(disk->queue, USHRT_MAX); 1719 blk_queue_max_hw_sectors(disk->queue, 65536); 1720 disk->queue->limits.max_sectors = 256; 1721 1722 mutex_init(&nbd->config_lock); 1723 refcount_set(&nbd->config_refs, 0); 1724 refcount_set(&nbd->refs, 1); 1725 INIT_LIST_HEAD(&nbd->list); 1726 disk->major = NBD_MAJOR; 1727 disk->first_minor = index << part_shift; 1728 disk->fops = &nbd_fops; 1729 disk->private_data = nbd; 1730 sprintf(disk->disk_name, "nbd%d", index); 1731 add_disk(disk); 1732 nbd_total_devices++; 1733 return index; 1734 1735 out_free_tags: 1736 blk_mq_free_tag_set(&nbd->tag_set); 1737 out_free_idr: 1738 idr_remove(&nbd_index_idr, index); 1739 out_free_disk: 1740 put_disk(disk); 1741 out_free_nbd: 1742 kfree(nbd); 1743 out: 1744 return err; 1745 } 1746 1747 static int find_free_cb(int id, void *ptr, void *data) 1748 { 1749 struct nbd_device *nbd = ptr; 1750 struct nbd_device **found = data; 1751 1752 if (!refcount_read(&nbd->config_refs)) { 1753 *found = nbd; 1754 return 1; 1755 } 1756 return 0; 1757 } 1758 1759 /* Netlink interface. */ 1760 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { 1761 [NBD_ATTR_INDEX] = { .type = NLA_U32 }, 1762 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 }, 1763 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 }, 1764 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 }, 1765 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 }, 1766 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 }, 1767 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED}, 1768 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 }, 1769 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED}, 1770 }; 1771 1772 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { 1773 [NBD_SOCK_FD] = { .type = NLA_U32 }, 1774 }; 1775 1776 /* We don't use this right now since we don't parse the incoming list, but we 1777 * still want it here so userspace knows what to expect. 1778 */ 1779 static const struct nla_policy __attribute__((unused)) 1780 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = { 1781 [NBD_DEVICE_INDEX] = { .type = NLA_U32 }, 1782 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 }, 1783 }; 1784 1785 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) 1786 { 1787 struct nbd_config *config = nbd->config; 1788 u64 bsize = config->blksize; 1789 u64 bytes = config->bytesize; 1790 1791 if (info->attrs[NBD_ATTR_SIZE_BYTES]) 1792 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); 1793 1794 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) { 1795 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); 1796 if (!bsize) 1797 bsize = NBD_DEF_BLKSIZE; 1798 if (!nbd_is_valid_blksize(bsize)) { 1799 printk(KERN_ERR "Invalid block size %llu\n", bsize); 1800 return -EINVAL; 1801 } 1802 } 1803 1804 if (bytes != config->bytesize || bsize != config->blksize) 1805 nbd_size_set(nbd, bsize, div64_u64(bytes, bsize)); 1806 return 0; 1807 } 1808 1809 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) 1810 { 1811 DECLARE_COMPLETION_ONSTACK(destroy_complete); 1812 struct nbd_device *nbd = NULL; 1813 struct nbd_config *config; 1814 int index = -1; 1815 int ret; 1816 bool put_dev = false; 1817 1818 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1819 return -EPERM; 1820 1821 if (info->attrs[NBD_ATTR_INDEX]) 1822 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 1823 if (!info->attrs[NBD_ATTR_SOCKETS]) { 1824 printk(KERN_ERR "nbd: must specify at least one socket\n"); 1825 return -EINVAL; 1826 } 1827 if (!info->attrs[NBD_ATTR_SIZE_BYTES]) { 1828 printk(KERN_ERR "nbd: must specify a size in bytes for the device\n"); 1829 return -EINVAL; 1830 } 1831 again: 1832 mutex_lock(&nbd_index_mutex); 1833 if (index == -1) { 1834 ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd); 1835 if (ret == 0) { 1836 int new_index; 1837 new_index = nbd_dev_add(-1); 1838 if (new_index < 0) { 1839 mutex_unlock(&nbd_index_mutex); 1840 printk(KERN_ERR "nbd: failed to add new device\n"); 1841 return new_index; 1842 } 1843 nbd = idr_find(&nbd_index_idr, new_index); 1844 } 1845 } else { 1846 nbd = idr_find(&nbd_index_idr, index); 1847 if (!nbd) { 1848 ret = nbd_dev_add(index); 1849 if (ret < 0) { 1850 mutex_unlock(&nbd_index_mutex); 1851 printk(KERN_ERR "nbd: failed to add new device\n"); 1852 return ret; 1853 } 1854 nbd = idr_find(&nbd_index_idr, index); 1855 } 1856 } 1857 if (!nbd) { 1858 printk(KERN_ERR "nbd: couldn't find device at index %d\n", 1859 index); 1860 mutex_unlock(&nbd_index_mutex); 1861 return -EINVAL; 1862 } 1863 1864 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && 1865 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) { 1866 nbd->destroy_complete = &destroy_complete; 1867 mutex_unlock(&nbd_index_mutex); 1868 1869 /* Wait untill the the nbd stuff is totally destroyed */ 1870 wait_for_completion(&destroy_complete); 1871 goto again; 1872 } 1873 1874 if (!refcount_inc_not_zero(&nbd->refs)) { 1875 mutex_unlock(&nbd_index_mutex); 1876 if (index == -1) 1877 goto again; 1878 printk(KERN_ERR "nbd: device at index %d is going down\n", 1879 index); 1880 return -EINVAL; 1881 } 1882 mutex_unlock(&nbd_index_mutex); 1883 1884 mutex_lock(&nbd->config_lock); 1885 if (refcount_read(&nbd->config_refs)) { 1886 mutex_unlock(&nbd->config_lock); 1887 nbd_put(nbd); 1888 if (index == -1) 1889 goto again; 1890 printk(KERN_ERR "nbd: nbd%d already in use\n", index); 1891 return -EBUSY; 1892 } 1893 if (WARN_ON(nbd->config)) { 1894 mutex_unlock(&nbd->config_lock); 1895 nbd_put(nbd); 1896 return -EINVAL; 1897 } 1898 config = nbd->config = nbd_alloc_config(); 1899 if (!nbd->config) { 1900 mutex_unlock(&nbd->config_lock); 1901 nbd_put(nbd); 1902 printk(KERN_ERR "nbd: couldn't allocate config\n"); 1903 return -ENOMEM; 1904 } 1905 refcount_set(&nbd->config_refs, 1); 1906 set_bit(NBD_RT_BOUND, &config->runtime_flags); 1907 1908 ret = nbd_genl_size_set(info, nbd); 1909 if (ret) 1910 goto out; 1911 1912 if (info->attrs[NBD_ATTR_TIMEOUT]) 1913 nbd_set_cmd_timeout(nbd, 1914 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); 1915 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { 1916 config->dead_conn_timeout = 1917 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); 1918 config->dead_conn_timeout *= HZ; 1919 } 1920 if (info->attrs[NBD_ATTR_SERVER_FLAGS]) 1921 config->flags = 1922 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]); 1923 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { 1924 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); 1925 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { 1926 set_bit(NBD_RT_DESTROY_ON_DISCONNECT, 1927 &config->runtime_flags); 1928 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); 1929 put_dev = true; 1930 } else { 1931 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); 1932 } 1933 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { 1934 set_bit(NBD_RT_DISCONNECT_ON_CLOSE, 1935 &config->runtime_flags); 1936 } 1937 } 1938 1939 if (info->attrs[NBD_ATTR_SOCKETS]) { 1940 struct nlattr *attr; 1941 int rem, fd; 1942 1943 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], 1944 rem) { 1945 struct nlattr *socks[NBD_SOCK_MAX+1]; 1946 1947 if (nla_type(attr) != NBD_SOCK_ITEM) { 1948 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n"); 1949 ret = -EINVAL; 1950 goto out; 1951 } 1952 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, 1953 attr, 1954 nbd_sock_policy, 1955 info->extack); 1956 if (ret != 0) { 1957 printk(KERN_ERR "nbd: error processing sock list\n"); 1958 ret = -EINVAL; 1959 goto out; 1960 } 1961 if (!socks[NBD_SOCK_FD]) 1962 continue; 1963 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]); 1964 ret = nbd_add_socket(nbd, fd, true); 1965 if (ret) 1966 goto out; 1967 } 1968 } 1969 ret = nbd_start_device(nbd); 1970 out: 1971 mutex_unlock(&nbd->config_lock); 1972 if (!ret) { 1973 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags); 1974 refcount_inc(&nbd->config_refs); 1975 nbd_connect_reply(info, nbd->index); 1976 } 1977 nbd_config_put(nbd); 1978 if (put_dev) 1979 nbd_put(nbd); 1980 return ret; 1981 } 1982 1983 static void nbd_disconnect_and_put(struct nbd_device *nbd) 1984 { 1985 mutex_lock(&nbd->config_lock); 1986 nbd_disconnect(nbd); 1987 nbd_clear_sock(nbd); 1988 mutex_unlock(&nbd->config_lock); 1989 /* 1990 * Make sure recv thread has finished, so it does not drop the last 1991 * config ref and try to destroy the workqueue from inside the work 1992 * queue. 1993 */ 1994 flush_workqueue(nbd->recv_workq); 1995 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, 1996 &nbd->config->runtime_flags)) 1997 nbd_config_put(nbd); 1998 } 1999 2000 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) 2001 { 2002 struct nbd_device *nbd; 2003 int index; 2004 2005 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 2006 return -EPERM; 2007 2008 if (!info->attrs[NBD_ATTR_INDEX]) { 2009 printk(KERN_ERR "nbd: must specify an index to disconnect\n"); 2010 return -EINVAL; 2011 } 2012 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2013 mutex_lock(&nbd_index_mutex); 2014 nbd = idr_find(&nbd_index_idr, index); 2015 if (!nbd) { 2016 mutex_unlock(&nbd_index_mutex); 2017 printk(KERN_ERR "nbd: couldn't find device at index %d\n", 2018 index); 2019 return -EINVAL; 2020 } 2021 if (!refcount_inc_not_zero(&nbd->refs)) { 2022 mutex_unlock(&nbd_index_mutex); 2023 printk(KERN_ERR "nbd: device at index %d is going down\n", 2024 index); 2025 return -EINVAL; 2026 } 2027 mutex_unlock(&nbd_index_mutex); 2028 if (!refcount_inc_not_zero(&nbd->config_refs)) { 2029 nbd_put(nbd); 2030 return 0; 2031 } 2032 nbd_disconnect_and_put(nbd); 2033 nbd_config_put(nbd); 2034 nbd_put(nbd); 2035 return 0; 2036 } 2037 2038 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) 2039 { 2040 struct nbd_device *nbd = NULL; 2041 struct nbd_config *config; 2042 int index; 2043 int ret = 0; 2044 bool put_dev = false; 2045 2046 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 2047 return -EPERM; 2048 2049 if (!info->attrs[NBD_ATTR_INDEX]) { 2050 printk(KERN_ERR "nbd: must specify a device to reconfigure\n"); 2051 return -EINVAL; 2052 } 2053 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2054 mutex_lock(&nbd_index_mutex); 2055 nbd = idr_find(&nbd_index_idr, index); 2056 if (!nbd) { 2057 mutex_unlock(&nbd_index_mutex); 2058 printk(KERN_ERR "nbd: couldn't find a device at index %d\n", 2059 index); 2060 return -EINVAL; 2061 } 2062 if (!refcount_inc_not_zero(&nbd->refs)) { 2063 mutex_unlock(&nbd_index_mutex); 2064 printk(KERN_ERR "nbd: device at index %d is going down\n", 2065 index); 2066 return -EINVAL; 2067 } 2068 mutex_unlock(&nbd_index_mutex); 2069 2070 if (!refcount_inc_not_zero(&nbd->config_refs)) { 2071 dev_err(nbd_to_dev(nbd), 2072 "not configured, cannot reconfigure\n"); 2073 nbd_put(nbd); 2074 return -EINVAL; 2075 } 2076 2077 mutex_lock(&nbd->config_lock); 2078 config = nbd->config; 2079 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || 2080 !nbd->task_recv) { 2081 dev_err(nbd_to_dev(nbd), 2082 "not configured, cannot reconfigure\n"); 2083 ret = -EINVAL; 2084 goto out; 2085 } 2086 2087 ret = nbd_genl_size_set(info, nbd); 2088 if (ret) 2089 goto out; 2090 2091 if (info->attrs[NBD_ATTR_TIMEOUT]) 2092 nbd_set_cmd_timeout(nbd, 2093 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); 2094 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { 2095 config->dead_conn_timeout = 2096 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); 2097 config->dead_conn_timeout *= HZ; 2098 } 2099 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { 2100 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); 2101 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { 2102 if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT, 2103 &config->runtime_flags)) 2104 put_dev = true; 2105 set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); 2106 } else { 2107 if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT, 2108 &config->runtime_flags)) 2109 refcount_inc(&nbd->refs); 2110 clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags); 2111 } 2112 2113 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { 2114 set_bit(NBD_RT_DISCONNECT_ON_CLOSE, 2115 &config->runtime_flags); 2116 } else { 2117 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE, 2118 &config->runtime_flags); 2119 } 2120 } 2121 2122 if (info->attrs[NBD_ATTR_SOCKETS]) { 2123 struct nlattr *attr; 2124 int rem, fd; 2125 2126 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], 2127 rem) { 2128 struct nlattr *socks[NBD_SOCK_MAX+1]; 2129 2130 if (nla_type(attr) != NBD_SOCK_ITEM) { 2131 printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n"); 2132 ret = -EINVAL; 2133 goto out; 2134 } 2135 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, 2136 attr, 2137 nbd_sock_policy, 2138 info->extack); 2139 if (ret != 0) { 2140 printk(KERN_ERR "nbd: error processing sock list\n"); 2141 ret = -EINVAL; 2142 goto out; 2143 } 2144 if (!socks[NBD_SOCK_FD]) 2145 continue; 2146 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]); 2147 ret = nbd_reconnect_socket(nbd, fd); 2148 if (ret) { 2149 if (ret == -ENOSPC) 2150 ret = 0; 2151 goto out; 2152 } 2153 dev_info(nbd_to_dev(nbd), "reconnected socket\n"); 2154 } 2155 } 2156 out: 2157 mutex_unlock(&nbd->config_lock); 2158 nbd_config_put(nbd); 2159 nbd_put(nbd); 2160 if (put_dev) 2161 nbd_put(nbd); 2162 return ret; 2163 } 2164 2165 static const struct genl_ops nbd_connect_genl_ops[] = { 2166 { 2167 .cmd = NBD_CMD_CONNECT, 2168 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2169 .doit = nbd_genl_connect, 2170 }, 2171 { 2172 .cmd = NBD_CMD_DISCONNECT, 2173 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2174 .doit = nbd_genl_disconnect, 2175 }, 2176 { 2177 .cmd = NBD_CMD_RECONFIGURE, 2178 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2179 .doit = nbd_genl_reconfigure, 2180 }, 2181 { 2182 .cmd = NBD_CMD_STATUS, 2183 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2184 .doit = nbd_genl_status, 2185 }, 2186 }; 2187 2188 static const struct genl_multicast_group nbd_mcast_grps[] = { 2189 { .name = NBD_GENL_MCAST_GROUP_NAME, }, 2190 }; 2191 2192 static struct genl_family nbd_genl_family __ro_after_init = { 2193 .hdrsize = 0, 2194 .name = NBD_GENL_FAMILY_NAME, 2195 .version = NBD_GENL_VERSION, 2196 .module = THIS_MODULE, 2197 .ops = nbd_connect_genl_ops, 2198 .n_ops = ARRAY_SIZE(nbd_connect_genl_ops), 2199 .maxattr = NBD_ATTR_MAX, 2200 .policy = nbd_attr_policy, 2201 .mcgrps = nbd_mcast_grps, 2202 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps), 2203 }; 2204 2205 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) 2206 { 2207 struct nlattr *dev_opt; 2208 u8 connected = 0; 2209 int ret; 2210 2211 /* This is a little racey, but for status it's ok. The 2212 * reason we don't take a ref here is because we can't 2213 * take a ref in the index == -1 case as we would need 2214 * to put under the nbd_index_mutex, which could 2215 * deadlock if we are configured to remove ourselves 2216 * once we're disconnected. 2217 */ 2218 if (refcount_read(&nbd->config_refs)) 2219 connected = 1; 2220 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM); 2221 if (!dev_opt) 2222 return -EMSGSIZE; 2223 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); 2224 if (ret) 2225 return -EMSGSIZE; 2226 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED, 2227 connected); 2228 if (ret) 2229 return -EMSGSIZE; 2230 nla_nest_end(reply, dev_opt); 2231 return 0; 2232 } 2233 2234 static int status_cb(int id, void *ptr, void *data) 2235 { 2236 struct nbd_device *nbd = ptr; 2237 return populate_nbd_status(nbd, (struct sk_buff *)data); 2238 } 2239 2240 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) 2241 { 2242 struct nlattr *dev_list; 2243 struct sk_buff *reply; 2244 void *reply_head; 2245 size_t msg_size; 2246 int index = -1; 2247 int ret = -ENOMEM; 2248 2249 if (info->attrs[NBD_ATTR_INDEX]) 2250 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2251 2252 mutex_lock(&nbd_index_mutex); 2253 2254 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) + 2255 nla_attr_size(sizeof(u8))); 2256 msg_size *= (index == -1) ? nbd_total_devices : 1; 2257 2258 reply = genlmsg_new(msg_size, GFP_KERNEL); 2259 if (!reply) 2260 goto out; 2261 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0, 2262 NBD_CMD_STATUS); 2263 if (!reply_head) { 2264 nlmsg_free(reply); 2265 goto out; 2266 } 2267 2268 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST); 2269 if (index == -1) { 2270 ret = idr_for_each(&nbd_index_idr, &status_cb, reply); 2271 if (ret) { 2272 nlmsg_free(reply); 2273 goto out; 2274 } 2275 } else { 2276 struct nbd_device *nbd; 2277 nbd = idr_find(&nbd_index_idr, index); 2278 if (nbd) { 2279 ret = populate_nbd_status(nbd, reply); 2280 if (ret) { 2281 nlmsg_free(reply); 2282 goto out; 2283 } 2284 } 2285 } 2286 nla_nest_end(reply, dev_list); 2287 genlmsg_end(reply, reply_head); 2288 ret = genlmsg_reply(reply, info); 2289 out: 2290 mutex_unlock(&nbd_index_mutex); 2291 return ret; 2292 } 2293 2294 static void nbd_connect_reply(struct genl_info *info, int index) 2295 { 2296 struct sk_buff *skb; 2297 void *msg_head; 2298 int ret; 2299 2300 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL); 2301 if (!skb) 2302 return; 2303 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0, 2304 NBD_CMD_CONNECT); 2305 if (!msg_head) { 2306 nlmsg_free(skb); 2307 return; 2308 } 2309 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index); 2310 if (ret) { 2311 nlmsg_free(skb); 2312 return; 2313 } 2314 genlmsg_end(skb, msg_head); 2315 genlmsg_reply(skb, info); 2316 } 2317 2318 static void nbd_mcast_index(int index) 2319 { 2320 struct sk_buff *skb; 2321 void *msg_head; 2322 int ret; 2323 2324 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL); 2325 if (!skb) 2326 return; 2327 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0, 2328 NBD_CMD_LINK_DEAD); 2329 if (!msg_head) { 2330 nlmsg_free(skb); 2331 return; 2332 } 2333 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index); 2334 if (ret) { 2335 nlmsg_free(skb); 2336 return; 2337 } 2338 genlmsg_end(skb, msg_head); 2339 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL); 2340 } 2341 2342 static void nbd_dead_link_work(struct work_struct *work) 2343 { 2344 struct link_dead_args *args = container_of(work, struct link_dead_args, 2345 work); 2346 nbd_mcast_index(args->index); 2347 kfree(args); 2348 } 2349 2350 static int __init nbd_init(void) 2351 { 2352 int i; 2353 2354 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 2355 2356 if (max_part < 0) { 2357 printk(KERN_ERR "nbd: max_part must be >= 0\n"); 2358 return -EINVAL; 2359 } 2360 2361 part_shift = 0; 2362 if (max_part > 0) { 2363 part_shift = fls(max_part); 2364 2365 /* 2366 * Adjust max_part according to part_shift as it is exported 2367 * to user space so that user can know the max number of 2368 * partition kernel should be able to manage. 2369 * 2370 * Note that -1 is required because partition 0 is reserved 2371 * for the whole disk. 2372 */ 2373 max_part = (1UL << part_shift) - 1; 2374 } 2375 2376 if ((1UL << part_shift) > DISK_MAX_PARTS) 2377 return -EINVAL; 2378 2379 if (nbds_max > 1UL << (MINORBITS - part_shift)) 2380 return -EINVAL; 2381 2382 if (register_blkdev(NBD_MAJOR, "nbd")) 2383 return -EIO; 2384 2385 if (genl_register_family(&nbd_genl_family)) { 2386 unregister_blkdev(NBD_MAJOR, "nbd"); 2387 return -EINVAL; 2388 } 2389 nbd_dbg_init(); 2390 2391 mutex_lock(&nbd_index_mutex); 2392 for (i = 0; i < nbds_max; i++) 2393 nbd_dev_add(i); 2394 mutex_unlock(&nbd_index_mutex); 2395 return 0; 2396 } 2397 2398 static int nbd_exit_cb(int id, void *ptr, void *data) 2399 { 2400 struct list_head *list = (struct list_head *)data; 2401 struct nbd_device *nbd = ptr; 2402 2403 list_add_tail(&nbd->list, list); 2404 return 0; 2405 } 2406 2407 static void __exit nbd_cleanup(void) 2408 { 2409 struct nbd_device *nbd; 2410 LIST_HEAD(del_list); 2411 2412 nbd_dbg_close(); 2413 2414 mutex_lock(&nbd_index_mutex); 2415 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list); 2416 mutex_unlock(&nbd_index_mutex); 2417 2418 while (!list_empty(&del_list)) { 2419 nbd = list_first_entry(&del_list, struct nbd_device, list); 2420 list_del_init(&nbd->list); 2421 if (refcount_read(&nbd->refs) != 1) 2422 printk(KERN_ERR "nbd: possibly leaking a device\n"); 2423 nbd_put(nbd); 2424 } 2425 2426 idr_destroy(&nbd_index_idr); 2427 genl_unregister_family(&nbd_genl_family); 2428 unregister_blkdev(NBD_MAJOR, "nbd"); 2429 } 2430 2431 module_init(nbd_init); 2432 module_exit(nbd_cleanup); 2433 2434 MODULE_DESCRIPTION("Network Block Device"); 2435 MODULE_LICENSE("GPL"); 2436 2437 module_param(nbds_max, int, 0444); 2438 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 2439 module_param(max_part, int, 0444); 2440 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)"); 2441