1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Network block device - make block devices work over TCP 4 * 5 * Note that you can not swap over this thing, yet. Seems to work but 6 * deadlocks sometimes - you can not swap over TCP in general. 7 * 8 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 9 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 10 * 11 * (part of code stolen from loop.c) 12 */ 13 14 #define pr_fmt(fmt) "nbd: " fmt 15 16 #include <linux/major.h> 17 18 #include <linux/blkdev.h> 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/sched.h> 22 #include <linux/sched/mm.h> 23 #include <linux/fs.h> 24 #include <linux/bio.h> 25 #include <linux/stat.h> 26 #include <linux/errno.h> 27 #include <linux/file.h> 28 #include <linux/ioctl.h> 29 #include <linux/mutex.h> 30 #include <linux/compiler.h> 31 #include <linux/completion.h> 32 #include <linux/err.h> 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <net/sock.h> 36 #include <linux/net.h> 37 #include <linux/kthread.h> 38 #include <linux/types.h> 39 #include <linux/debugfs.h> 40 #include <linux/blk-mq.h> 41 42 #include <linux/uaccess.h> 43 #include <asm/types.h> 44 45 #include <linux/nbd.h> 46 #include <linux/nbd-netlink.h> 47 #include <net/genetlink.h> 48 49 #define CREATE_TRACE_POINTS 50 #include <trace/events/nbd.h> 51 52 static DEFINE_IDR(nbd_index_idr); 53 static DEFINE_MUTEX(nbd_index_mutex); 54 static struct workqueue_struct *nbd_del_wq; 55 static int nbd_total_devices = 0; 56 57 struct nbd_sock { 58 struct socket *sock; 59 struct mutex tx_lock; 60 struct request *pending; 61 int sent; 62 bool dead; 63 int fallback_index; 64 int cookie; 65 }; 66 67 struct recv_thread_args { 68 struct work_struct work; 69 struct nbd_device *nbd; 70 int index; 71 }; 72 73 struct link_dead_args { 74 struct work_struct work; 75 int index; 76 }; 77 78 #define NBD_RT_TIMEDOUT 0 79 #define NBD_RT_DISCONNECT_REQUESTED 1 80 #define NBD_RT_DISCONNECTED 2 81 #define NBD_RT_HAS_PID_FILE 3 82 #define NBD_RT_HAS_CONFIG_REF 4 83 #define NBD_RT_BOUND 5 84 #define NBD_RT_DISCONNECT_ON_CLOSE 6 85 #define NBD_RT_HAS_BACKEND_FILE 7 86 87 #define NBD_DESTROY_ON_DISCONNECT 0 88 #define NBD_DISCONNECT_REQUESTED 1 89 90 struct nbd_config { 91 u32 flags; 92 unsigned long runtime_flags; 93 u64 dead_conn_timeout; 94 95 struct nbd_sock **socks; 96 int num_connections; 97 atomic_t live_connections; 98 wait_queue_head_t conn_wait; 99 100 atomic_t recv_threads; 101 wait_queue_head_t recv_wq; 102 unsigned int blksize_bits; 103 loff_t bytesize; 104 #if IS_ENABLED(CONFIG_DEBUG_FS) 105 struct dentry *dbg_dir; 106 #endif 107 }; 108 109 static inline unsigned int nbd_blksize(struct nbd_config *config) 110 { 111 return 1u << config->blksize_bits; 112 } 113 114 struct nbd_device { 115 struct blk_mq_tag_set tag_set; 116 117 int index; 118 refcount_t config_refs; 119 refcount_t refs; 120 struct nbd_config *config; 121 struct mutex config_lock; 122 struct gendisk *disk; 123 struct workqueue_struct *recv_workq; 124 struct work_struct remove_work; 125 126 struct list_head list; 127 struct task_struct *task_setup; 128 129 unsigned long flags; 130 pid_t pid; /* pid of nbd-client, if attached */ 131 132 char *backend; 133 }; 134 135 #define NBD_CMD_REQUEUED 1 136 /* 137 * This flag will be set if nbd_queue_rq() succeed, and will be checked and 138 * cleared in completion. Both setting and clearing of the flag are protected 139 * by cmd->lock. 140 */ 141 #define NBD_CMD_INFLIGHT 2 142 143 struct nbd_cmd { 144 struct nbd_device *nbd; 145 struct mutex lock; 146 int index; 147 int cookie; 148 int retries; 149 blk_status_t status; 150 unsigned long flags; 151 u32 cmd_cookie; 152 }; 153 154 #if IS_ENABLED(CONFIG_DEBUG_FS) 155 static struct dentry *nbd_dbg_dir; 156 #endif 157 158 #define nbd_name(nbd) ((nbd)->disk->disk_name) 159 160 #define NBD_DEF_BLKSIZE_BITS 10 161 162 static unsigned int nbds_max = 16; 163 static int max_part = 16; 164 static int part_shift; 165 166 static int nbd_dev_dbg_init(struct nbd_device *nbd); 167 static void nbd_dev_dbg_close(struct nbd_device *nbd); 168 static void nbd_config_put(struct nbd_device *nbd); 169 static void nbd_connect_reply(struct genl_info *info, int index); 170 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info); 171 static void nbd_dead_link_work(struct work_struct *work); 172 static void nbd_disconnect_and_put(struct nbd_device *nbd); 173 174 static inline struct device *nbd_to_dev(struct nbd_device *nbd) 175 { 176 return disk_to_dev(nbd->disk); 177 } 178 179 static void nbd_requeue_cmd(struct nbd_cmd *cmd) 180 { 181 struct request *req = blk_mq_rq_from_pdu(cmd); 182 183 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags)) 184 blk_mq_requeue_request(req, true); 185 } 186 187 #define NBD_COOKIE_BITS 32 188 189 static u64 nbd_cmd_handle(struct nbd_cmd *cmd) 190 { 191 struct request *req = blk_mq_rq_from_pdu(cmd); 192 u32 tag = blk_mq_unique_tag(req); 193 u64 cookie = cmd->cmd_cookie; 194 195 return (cookie << NBD_COOKIE_BITS) | tag; 196 } 197 198 static u32 nbd_handle_to_tag(u64 handle) 199 { 200 return (u32)handle; 201 } 202 203 static u32 nbd_handle_to_cookie(u64 handle) 204 { 205 return (u32)(handle >> NBD_COOKIE_BITS); 206 } 207 208 static const char *nbdcmd_to_ascii(int cmd) 209 { 210 switch (cmd) { 211 case NBD_CMD_READ: return "read"; 212 case NBD_CMD_WRITE: return "write"; 213 case NBD_CMD_DISC: return "disconnect"; 214 case NBD_CMD_FLUSH: return "flush"; 215 case NBD_CMD_TRIM: return "trim/discard"; 216 } 217 return "invalid"; 218 } 219 220 static ssize_t pid_show(struct device *dev, 221 struct device_attribute *attr, char *buf) 222 { 223 struct gendisk *disk = dev_to_disk(dev); 224 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 225 226 return sprintf(buf, "%d\n", nbd->pid); 227 } 228 229 static const struct device_attribute pid_attr = { 230 .attr = { .name = "pid", .mode = 0444}, 231 .show = pid_show, 232 }; 233 234 static ssize_t backend_show(struct device *dev, 235 struct device_attribute *attr, char *buf) 236 { 237 struct gendisk *disk = dev_to_disk(dev); 238 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 239 240 return sprintf(buf, "%s\n", nbd->backend ?: ""); 241 } 242 243 static const struct device_attribute backend_attr = { 244 .attr = { .name = "backend", .mode = 0444}, 245 .show = backend_show, 246 }; 247 248 static void nbd_dev_remove(struct nbd_device *nbd) 249 { 250 struct gendisk *disk = nbd->disk; 251 252 del_gendisk(disk); 253 blk_mq_free_tag_set(&nbd->tag_set); 254 255 /* 256 * Remove from idr after del_gendisk() completes, so if the same ID is 257 * reused, the following add_disk() will succeed. 258 */ 259 mutex_lock(&nbd_index_mutex); 260 idr_remove(&nbd_index_idr, nbd->index); 261 mutex_unlock(&nbd_index_mutex); 262 destroy_workqueue(nbd->recv_workq); 263 put_disk(disk); 264 } 265 266 static void nbd_dev_remove_work(struct work_struct *work) 267 { 268 nbd_dev_remove(container_of(work, struct nbd_device, remove_work)); 269 } 270 271 static void nbd_put(struct nbd_device *nbd) 272 { 273 if (!refcount_dec_and_test(&nbd->refs)) 274 return; 275 276 /* Call del_gendisk() asynchrounously to prevent deadlock */ 277 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) 278 queue_work(nbd_del_wq, &nbd->remove_work); 279 else 280 nbd_dev_remove(nbd); 281 } 282 283 static int nbd_disconnected(struct nbd_config *config) 284 { 285 return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) || 286 test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); 287 } 288 289 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock, 290 int notify) 291 { 292 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) { 293 struct link_dead_args *args; 294 args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO); 295 if (args) { 296 INIT_WORK(&args->work, nbd_dead_link_work); 297 args->index = nbd->index; 298 queue_work(system_wq, &args->work); 299 } 300 } 301 if (!nsock->dead) { 302 kernel_sock_shutdown(nsock->sock, SHUT_RDWR); 303 if (atomic_dec_return(&nbd->config->live_connections) == 0) { 304 if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED, 305 &nbd->config->runtime_flags)) { 306 set_bit(NBD_RT_DISCONNECTED, 307 &nbd->config->runtime_flags); 308 dev_info(nbd_to_dev(nbd), 309 "Disconnected due to user request.\n"); 310 } 311 } 312 } 313 nsock->dead = true; 314 nsock->pending = NULL; 315 nsock->sent = 0; 316 } 317 318 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, 319 loff_t blksize) 320 { 321 if (!blksize) 322 blksize = 1u << NBD_DEF_BLKSIZE_BITS; 323 324 if (blk_validate_block_size(blksize)) 325 return -EINVAL; 326 327 if (bytesize < 0) 328 return -EINVAL; 329 330 nbd->config->bytesize = bytesize; 331 nbd->config->blksize_bits = __ffs(blksize); 332 333 if (!nbd->pid) 334 return 0; 335 336 if (nbd->config->flags & NBD_FLAG_SEND_TRIM) { 337 nbd->disk->queue->limits.discard_granularity = blksize; 338 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX); 339 } 340 blk_queue_logical_block_size(nbd->disk->queue, blksize); 341 blk_queue_physical_block_size(nbd->disk->queue, blksize); 342 343 if (max_part) 344 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); 345 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9)) 346 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 347 return 0; 348 } 349 350 static void nbd_complete_rq(struct request *req) 351 { 352 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 353 354 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req, 355 cmd->status ? "failed" : "done"); 356 357 blk_mq_end_request(req, cmd->status); 358 } 359 360 /* 361 * Forcibly shutdown the socket causing all listeners to error 362 */ 363 static void sock_shutdown(struct nbd_device *nbd) 364 { 365 struct nbd_config *config = nbd->config; 366 int i; 367 368 if (config->num_connections == 0) 369 return; 370 if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) 371 return; 372 373 for (i = 0; i < config->num_connections; i++) { 374 struct nbd_sock *nsock = config->socks[i]; 375 mutex_lock(&nsock->tx_lock); 376 nbd_mark_nsock_dead(nbd, nsock, 0); 377 mutex_unlock(&nsock->tx_lock); 378 } 379 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); 380 } 381 382 static u32 req_to_nbd_cmd_type(struct request *req) 383 { 384 switch (req_op(req)) { 385 case REQ_OP_DISCARD: 386 return NBD_CMD_TRIM; 387 case REQ_OP_FLUSH: 388 return NBD_CMD_FLUSH; 389 case REQ_OP_WRITE: 390 return NBD_CMD_WRITE; 391 case REQ_OP_READ: 392 return NBD_CMD_READ; 393 default: 394 return U32_MAX; 395 } 396 } 397 398 static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd) 399 { 400 if (refcount_inc_not_zero(&nbd->config_refs)) { 401 /* 402 * Add smp_mb__after_atomic to ensure that reading nbd->config_refs 403 * and reading nbd->config is ordered. The pair is the barrier in 404 * nbd_alloc_and_init_config(), avoid nbd->config_refs is set 405 * before nbd->config. 406 */ 407 smp_mb__after_atomic(); 408 return nbd->config; 409 } 410 411 return NULL; 412 } 413 414 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) 415 { 416 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 417 struct nbd_device *nbd = cmd->nbd; 418 struct nbd_config *config; 419 420 if (!mutex_trylock(&cmd->lock)) 421 return BLK_EH_RESET_TIMER; 422 423 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { 424 mutex_unlock(&cmd->lock); 425 return BLK_EH_DONE; 426 } 427 428 config = nbd_get_config_unlocked(nbd); 429 if (!config) { 430 cmd->status = BLK_STS_TIMEOUT; 431 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); 432 mutex_unlock(&cmd->lock); 433 goto done; 434 } 435 436 if (config->num_connections > 1 || 437 (config->num_connections == 1 && nbd->tag_set.timeout)) { 438 dev_err_ratelimited(nbd_to_dev(nbd), 439 "Connection timed out, retrying (%d/%d alive)\n", 440 atomic_read(&config->live_connections), 441 config->num_connections); 442 /* 443 * Hooray we have more connections, requeue this IO, the submit 444 * path will put it on a real connection. Or if only one 445 * connection is configured, the submit path will wait util 446 * a new connection is reconfigured or util dead timeout. 447 */ 448 if (config->socks) { 449 if (cmd->index < config->num_connections) { 450 struct nbd_sock *nsock = 451 config->socks[cmd->index]; 452 mutex_lock(&nsock->tx_lock); 453 /* We can have multiple outstanding requests, so 454 * we don't want to mark the nsock dead if we've 455 * already reconnected with a new socket, so 456 * only mark it dead if its the same socket we 457 * were sent out on. 458 */ 459 if (cmd->cookie == nsock->cookie) 460 nbd_mark_nsock_dead(nbd, nsock, 1); 461 mutex_unlock(&nsock->tx_lock); 462 } 463 mutex_unlock(&cmd->lock); 464 nbd_requeue_cmd(cmd); 465 nbd_config_put(nbd); 466 return BLK_EH_DONE; 467 } 468 } 469 470 if (!nbd->tag_set.timeout) { 471 /* 472 * Userspace sets timeout=0 to disable socket disconnection, 473 * so just warn and reset the timer. 474 */ 475 struct nbd_sock *nsock = config->socks[cmd->index]; 476 cmd->retries++; 477 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n", 478 req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)), 479 (unsigned long long)blk_rq_pos(req) << 9, 480 blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries); 481 482 mutex_lock(&nsock->tx_lock); 483 if (cmd->cookie != nsock->cookie) { 484 nbd_requeue_cmd(cmd); 485 mutex_unlock(&nsock->tx_lock); 486 mutex_unlock(&cmd->lock); 487 nbd_config_put(nbd); 488 return BLK_EH_DONE; 489 } 490 mutex_unlock(&nsock->tx_lock); 491 mutex_unlock(&cmd->lock); 492 nbd_config_put(nbd); 493 return BLK_EH_RESET_TIMER; 494 } 495 496 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n"); 497 set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags); 498 cmd->status = BLK_STS_IOERR; 499 __clear_bit(NBD_CMD_INFLIGHT, &cmd->flags); 500 mutex_unlock(&cmd->lock); 501 sock_shutdown(nbd); 502 nbd_config_put(nbd); 503 done: 504 blk_mq_complete_request(req); 505 return BLK_EH_DONE; 506 } 507 508 /* 509 * Send or receive packet. Return a positive value on success and 510 * negtive value on failue, and never return 0. 511 */ 512 static int sock_xmit(struct nbd_device *nbd, int index, int send, 513 struct iov_iter *iter, int msg_flags, int *sent) 514 { 515 struct nbd_config *config = nbd->config; 516 struct socket *sock = config->socks[index]->sock; 517 int result; 518 struct msghdr msg; 519 unsigned int noreclaim_flag; 520 521 if (unlikely(!sock)) { 522 dev_err_ratelimited(disk_to_dev(nbd->disk), 523 "Attempted %s on closed socket in sock_xmit\n", 524 (send ? "send" : "recv")); 525 return -EINVAL; 526 } 527 528 msg.msg_iter = *iter; 529 530 noreclaim_flag = memalloc_noreclaim_save(); 531 do { 532 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; 533 sock->sk->sk_use_task_frag = false; 534 msg.msg_name = NULL; 535 msg.msg_namelen = 0; 536 msg.msg_control = NULL; 537 msg.msg_controllen = 0; 538 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 539 540 if (send) 541 result = sock_sendmsg(sock, &msg); 542 else 543 result = sock_recvmsg(sock, &msg, msg.msg_flags); 544 545 if (result <= 0) { 546 if (result == 0) 547 result = -EPIPE; /* short read */ 548 break; 549 } 550 if (sent) 551 *sent += result; 552 } while (msg_data_left(&msg)); 553 554 memalloc_noreclaim_restore(noreclaim_flag); 555 556 return result; 557 } 558 559 /* 560 * Different settings for sk->sk_sndtimeo can result in different return values 561 * if there is a signal pending when we enter sendmsg, because reasons? 562 */ 563 static inline int was_interrupted(int result) 564 { 565 return result == -ERESTARTSYS || result == -EINTR; 566 } 567 568 /* always call with the tx_lock held */ 569 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 570 { 571 struct request *req = blk_mq_rq_from_pdu(cmd); 572 struct nbd_config *config = nbd->config; 573 struct nbd_sock *nsock = config->socks[index]; 574 int result; 575 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 576 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 577 struct iov_iter from; 578 unsigned long size = blk_rq_bytes(req); 579 struct bio *bio; 580 u64 handle; 581 u32 type; 582 u32 nbd_cmd_flags = 0; 583 int sent = nsock->sent, skip = 0; 584 585 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request)); 586 587 type = req_to_nbd_cmd_type(req); 588 if (type == U32_MAX) 589 return -EIO; 590 591 if (rq_data_dir(req) == WRITE && 592 (config->flags & NBD_FLAG_READ_ONLY)) { 593 dev_err_ratelimited(disk_to_dev(nbd->disk), 594 "Write on read-only\n"); 595 return -EIO; 596 } 597 598 if (req->cmd_flags & REQ_FUA) 599 nbd_cmd_flags |= NBD_CMD_FLAG_FUA; 600 601 /* We did a partial send previously, and we at least sent the whole 602 * request struct, so just go and send the rest of the pages in the 603 * request. 604 */ 605 if (sent) { 606 if (sent >= sizeof(request)) { 607 skip = sent - sizeof(request); 608 609 /* initialize handle for tracing purposes */ 610 handle = nbd_cmd_handle(cmd); 611 612 goto send_pages; 613 } 614 iov_iter_advance(&from, sent); 615 } else { 616 cmd->cmd_cookie++; 617 } 618 cmd->index = index; 619 cmd->cookie = nsock->cookie; 620 cmd->retries = 0; 621 request.type = htonl(type | nbd_cmd_flags); 622 if (type != NBD_CMD_FLUSH) { 623 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 624 request.len = htonl(size); 625 } 626 handle = nbd_cmd_handle(cmd); 627 request.cookie = cpu_to_be64(handle); 628 629 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd)); 630 631 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 632 req, nbdcmd_to_ascii(type), 633 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 634 result = sock_xmit(nbd, index, 1, &from, 635 (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); 636 trace_nbd_header_sent(req, handle); 637 if (result < 0) { 638 if (was_interrupted(result)) { 639 /* If we haven't sent anything we can just return BUSY, 640 * however if we have sent something we need to make 641 * sure we only allow this req to be sent until we are 642 * completely done. 643 */ 644 if (sent) { 645 nsock->pending = req; 646 nsock->sent = sent; 647 } 648 set_bit(NBD_CMD_REQUEUED, &cmd->flags); 649 return BLK_STS_RESOURCE; 650 } 651 dev_err_ratelimited(disk_to_dev(nbd->disk), 652 "Send control failed (result %d)\n", result); 653 return -EAGAIN; 654 } 655 send_pages: 656 if (type != NBD_CMD_WRITE) 657 goto out; 658 659 bio = req->bio; 660 while (bio) { 661 struct bio *next = bio->bi_next; 662 struct bvec_iter iter; 663 struct bio_vec bvec; 664 665 bio_for_each_segment(bvec, bio, iter) { 666 bool is_last = !next && bio_iter_last(bvec, iter); 667 int flags = is_last ? 0 : MSG_MORE; 668 669 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 670 req, bvec.bv_len); 671 iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len); 672 if (skip) { 673 if (skip >= iov_iter_count(&from)) { 674 skip -= iov_iter_count(&from); 675 continue; 676 } 677 iov_iter_advance(&from, skip); 678 skip = 0; 679 } 680 result = sock_xmit(nbd, index, 1, &from, flags, &sent); 681 if (result < 0) { 682 if (was_interrupted(result)) { 683 /* We've already sent the header, we 684 * have no choice but to set pending and 685 * return BUSY. 686 */ 687 nsock->pending = req; 688 nsock->sent = sent; 689 set_bit(NBD_CMD_REQUEUED, &cmd->flags); 690 return BLK_STS_RESOURCE; 691 } 692 dev_err(disk_to_dev(nbd->disk), 693 "Send data failed (result %d)\n", 694 result); 695 return -EAGAIN; 696 } 697 /* 698 * The completion might already have come in, 699 * so break for the last one instead of letting 700 * the iterator do it. This prevents use-after-free 701 * of the bio. 702 */ 703 if (is_last) 704 break; 705 } 706 bio = next; 707 } 708 out: 709 trace_nbd_payload_sent(req, handle); 710 nsock->pending = NULL; 711 nsock->sent = 0; 712 return 0; 713 } 714 715 static int nbd_read_reply(struct nbd_device *nbd, int index, 716 struct nbd_reply *reply) 717 { 718 struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)}; 719 struct iov_iter to; 720 int result; 721 722 reply->magic = 0; 723 iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply)); 724 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 725 if (result < 0) { 726 if (!nbd_disconnected(nbd->config)) 727 dev_err(disk_to_dev(nbd->disk), 728 "Receive control failed (result %d)\n", result); 729 return result; 730 } 731 732 if (ntohl(reply->magic) != NBD_REPLY_MAGIC) { 733 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", 734 (unsigned long)ntohl(reply->magic)); 735 return -EPROTO; 736 } 737 738 return 0; 739 } 740 741 /* NULL returned = something went wrong, inform userspace */ 742 static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index, 743 struct nbd_reply *reply) 744 { 745 int result; 746 struct nbd_cmd *cmd; 747 struct request *req = NULL; 748 u64 handle; 749 u16 hwq; 750 u32 tag; 751 int ret = 0; 752 753 handle = be64_to_cpu(reply->cookie); 754 tag = nbd_handle_to_tag(handle); 755 hwq = blk_mq_unique_tag_to_hwq(tag); 756 if (hwq < nbd->tag_set.nr_hw_queues) 757 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 758 blk_mq_unique_tag_to_tag(tag)); 759 if (!req || !blk_mq_request_started(req)) { 760 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", 761 tag, req); 762 return ERR_PTR(-ENOENT); 763 } 764 trace_nbd_header_received(req, handle); 765 cmd = blk_mq_rq_to_pdu(req); 766 767 mutex_lock(&cmd->lock); 768 if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { 769 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)", 770 tag, cmd->status, cmd->flags); 771 ret = -ENOENT; 772 goto out; 773 } 774 if (cmd->index != index) { 775 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)", 776 tag, index, cmd->index); 777 ret = -ENOENT; 778 goto out; 779 } 780 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) { 781 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n", 782 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle)); 783 ret = -ENOENT; 784 goto out; 785 } 786 if (cmd->status != BLK_STS_OK) { 787 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n", 788 req); 789 ret = -ENOENT; 790 goto out; 791 } 792 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) { 793 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n", 794 req); 795 ret = -ENOENT; 796 goto out; 797 } 798 if (ntohl(reply->error)) { 799 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 800 ntohl(reply->error)); 801 cmd->status = BLK_STS_IOERR; 802 goto out; 803 } 804 805 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); 806 if (rq_data_dir(req) != WRITE) { 807 struct req_iterator iter; 808 struct bio_vec bvec; 809 struct iov_iter to; 810 811 rq_for_each_segment(bvec, req, iter) { 812 iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len); 813 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); 814 if (result < 0) { 815 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 816 result); 817 /* 818 * If we've disconnected, we need to make sure we 819 * complete this request, otherwise error out 820 * and let the timeout stuff handle resubmitting 821 * this request onto another connection. 822 */ 823 if (nbd_disconnected(nbd->config)) { 824 cmd->status = BLK_STS_IOERR; 825 goto out; 826 } 827 ret = -EIO; 828 goto out; 829 } 830 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 831 req, bvec.bv_len); 832 } 833 } 834 out: 835 trace_nbd_payload_received(req, handle); 836 mutex_unlock(&cmd->lock); 837 return ret ? ERR_PTR(ret) : cmd; 838 } 839 840 static void recv_work(struct work_struct *work) 841 { 842 struct recv_thread_args *args = container_of(work, 843 struct recv_thread_args, 844 work); 845 struct nbd_device *nbd = args->nbd; 846 struct nbd_config *config = nbd->config; 847 struct request_queue *q = nbd->disk->queue; 848 struct nbd_sock *nsock; 849 struct nbd_cmd *cmd; 850 struct request *rq; 851 852 while (1) { 853 struct nbd_reply reply; 854 855 if (nbd_read_reply(nbd, args->index, &reply)) 856 break; 857 858 /* 859 * Grab .q_usage_counter so request pool won't go away, then no 860 * request use-after-free is possible during nbd_handle_reply(). 861 * If queue is frozen, there won't be any inflight requests, we 862 * needn't to handle the incoming garbage message. 863 */ 864 if (!percpu_ref_tryget(&q->q_usage_counter)) { 865 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n", 866 __func__); 867 break; 868 } 869 870 cmd = nbd_handle_reply(nbd, args->index, &reply); 871 if (IS_ERR(cmd)) { 872 percpu_ref_put(&q->q_usage_counter); 873 break; 874 } 875 876 rq = blk_mq_rq_from_pdu(cmd); 877 if (likely(!blk_should_fake_timeout(rq->q))) { 878 bool complete; 879 880 mutex_lock(&cmd->lock); 881 complete = __test_and_clear_bit(NBD_CMD_INFLIGHT, 882 &cmd->flags); 883 mutex_unlock(&cmd->lock); 884 if (complete) 885 blk_mq_complete_request(rq); 886 } 887 percpu_ref_put(&q->q_usage_counter); 888 } 889 890 nsock = config->socks[args->index]; 891 mutex_lock(&nsock->tx_lock); 892 nbd_mark_nsock_dead(nbd, nsock, 1); 893 mutex_unlock(&nsock->tx_lock); 894 895 nbd_config_put(nbd); 896 atomic_dec(&config->recv_threads); 897 wake_up(&config->recv_wq); 898 kfree(args); 899 } 900 901 static bool nbd_clear_req(struct request *req, void *data) 902 { 903 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 904 905 /* don't abort one completed request */ 906 if (blk_mq_request_completed(req)) 907 return true; 908 909 mutex_lock(&cmd->lock); 910 if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) { 911 mutex_unlock(&cmd->lock); 912 return true; 913 } 914 cmd->status = BLK_STS_IOERR; 915 mutex_unlock(&cmd->lock); 916 917 blk_mq_complete_request(req); 918 return true; 919 } 920 921 static void nbd_clear_que(struct nbd_device *nbd) 922 { 923 blk_mq_quiesce_queue(nbd->disk->queue); 924 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); 925 blk_mq_unquiesce_queue(nbd->disk->queue); 926 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); 927 } 928 929 static int find_fallback(struct nbd_device *nbd, int index) 930 { 931 struct nbd_config *config = nbd->config; 932 int new_index = -1; 933 struct nbd_sock *nsock = config->socks[index]; 934 int fallback = nsock->fallback_index; 935 936 if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags)) 937 return new_index; 938 939 if (config->num_connections <= 1) { 940 dev_err_ratelimited(disk_to_dev(nbd->disk), 941 "Dead connection, failed to find a fallback\n"); 942 return new_index; 943 } 944 945 if (fallback >= 0 && fallback < config->num_connections && 946 !config->socks[fallback]->dead) 947 return fallback; 948 949 if (nsock->fallback_index < 0 || 950 nsock->fallback_index >= config->num_connections || 951 config->socks[nsock->fallback_index]->dead) { 952 int i; 953 for (i = 0; i < config->num_connections; i++) { 954 if (i == index) 955 continue; 956 if (!config->socks[i]->dead) { 957 new_index = i; 958 break; 959 } 960 } 961 nsock->fallback_index = new_index; 962 if (new_index < 0) { 963 dev_err_ratelimited(disk_to_dev(nbd->disk), 964 "Dead connection, failed to find a fallback\n"); 965 return new_index; 966 } 967 } 968 new_index = nsock->fallback_index; 969 return new_index; 970 } 971 972 static int wait_for_reconnect(struct nbd_device *nbd) 973 { 974 struct nbd_config *config = nbd->config; 975 if (!config->dead_conn_timeout) 976 return 0; 977 978 if (!wait_event_timeout(config->conn_wait, 979 test_bit(NBD_RT_DISCONNECTED, 980 &config->runtime_flags) || 981 atomic_read(&config->live_connections) > 0, 982 config->dead_conn_timeout)) 983 return 0; 984 985 return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); 986 } 987 988 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index) 989 { 990 struct request *req = blk_mq_rq_from_pdu(cmd); 991 struct nbd_device *nbd = cmd->nbd; 992 struct nbd_config *config; 993 struct nbd_sock *nsock; 994 int ret; 995 996 config = nbd_get_config_unlocked(nbd); 997 if (!config) { 998 dev_err_ratelimited(disk_to_dev(nbd->disk), 999 "Socks array is empty\n"); 1000 return -EINVAL; 1001 } 1002 1003 if (index >= config->num_connections) { 1004 dev_err_ratelimited(disk_to_dev(nbd->disk), 1005 "Attempted send on invalid socket\n"); 1006 nbd_config_put(nbd); 1007 return -EINVAL; 1008 } 1009 cmd->status = BLK_STS_OK; 1010 again: 1011 nsock = config->socks[index]; 1012 mutex_lock(&nsock->tx_lock); 1013 if (nsock->dead) { 1014 int old_index = index; 1015 index = find_fallback(nbd, index); 1016 mutex_unlock(&nsock->tx_lock); 1017 if (index < 0) { 1018 if (wait_for_reconnect(nbd)) { 1019 index = old_index; 1020 goto again; 1021 } 1022 /* All the sockets should already be down at this point, 1023 * we just want to make sure that DISCONNECTED is set so 1024 * any requests that come in that were queue'ed waiting 1025 * for the reconnect timer don't trigger the timer again 1026 * and instead just error out. 1027 */ 1028 sock_shutdown(nbd); 1029 nbd_config_put(nbd); 1030 return -EIO; 1031 } 1032 goto again; 1033 } 1034 1035 /* Handle the case that we have a pending request that was partially 1036 * transmitted that _has_ to be serviced first. We need to call requeue 1037 * here so that it gets put _after_ the request that is already on the 1038 * dispatch list. 1039 */ 1040 blk_mq_start_request(req); 1041 if (unlikely(nsock->pending && nsock->pending != req)) { 1042 nbd_requeue_cmd(cmd); 1043 ret = 0; 1044 goto out; 1045 } 1046 /* 1047 * Some failures are related to the link going down, so anything that 1048 * returns EAGAIN can be retried on a different socket. 1049 */ 1050 ret = nbd_send_cmd(nbd, cmd, index); 1051 /* 1052 * Access to this flag is protected by cmd->lock, thus it's safe to set 1053 * the flag after nbd_send_cmd() succeed to send request to server. 1054 */ 1055 if (!ret) 1056 __set_bit(NBD_CMD_INFLIGHT, &cmd->flags); 1057 else if (ret == -EAGAIN) { 1058 dev_err_ratelimited(disk_to_dev(nbd->disk), 1059 "Request send failed, requeueing\n"); 1060 nbd_mark_nsock_dead(nbd, nsock, 1); 1061 nbd_requeue_cmd(cmd); 1062 ret = 0; 1063 } 1064 out: 1065 mutex_unlock(&nsock->tx_lock); 1066 nbd_config_put(nbd); 1067 return ret; 1068 } 1069 1070 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 1071 const struct blk_mq_queue_data *bd) 1072 { 1073 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1074 int ret; 1075 1076 /* 1077 * Since we look at the bio's to send the request over the network we 1078 * need to make sure the completion work doesn't mark this request done 1079 * before we are done doing our send. This keeps us from dereferencing 1080 * freed data if we have particularly fast completions (ie we get the 1081 * completion before we exit sock_xmit on the last bvec) or in the case 1082 * that the server is misbehaving (or there was an error) before we're 1083 * done sending everything over the wire. 1084 */ 1085 mutex_lock(&cmd->lock); 1086 clear_bit(NBD_CMD_REQUEUED, &cmd->flags); 1087 1088 /* We can be called directly from the user space process, which means we 1089 * could possibly have signals pending so our sendmsg will fail. In 1090 * this case we need to return that we are busy, otherwise error out as 1091 * appropriate. 1092 */ 1093 ret = nbd_handle_cmd(cmd, hctx->queue_num); 1094 if (ret < 0) 1095 ret = BLK_STS_IOERR; 1096 else if (!ret) 1097 ret = BLK_STS_OK; 1098 mutex_unlock(&cmd->lock); 1099 1100 return ret; 1101 } 1102 1103 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, 1104 int *err) 1105 { 1106 struct socket *sock; 1107 1108 *err = 0; 1109 sock = sockfd_lookup(fd, err); 1110 if (!sock) 1111 return NULL; 1112 1113 if (sock->ops->shutdown == sock_no_shutdown) { 1114 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); 1115 *err = -EINVAL; 1116 sockfd_put(sock); 1117 return NULL; 1118 } 1119 1120 return sock; 1121 } 1122 1123 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, 1124 bool netlink) 1125 { 1126 struct nbd_config *config = nbd->config; 1127 struct socket *sock; 1128 struct nbd_sock **socks; 1129 struct nbd_sock *nsock; 1130 int err; 1131 1132 /* Arg will be cast to int, check it to avoid overflow */ 1133 if (arg > INT_MAX) 1134 return -EINVAL; 1135 sock = nbd_get_socket(nbd, arg, &err); 1136 if (!sock) 1137 return err; 1138 1139 /* 1140 * We need to make sure we don't get any errant requests while we're 1141 * reallocating the ->socks array. 1142 */ 1143 blk_mq_freeze_queue(nbd->disk->queue); 1144 1145 if (!netlink && !nbd->task_setup && 1146 !test_bit(NBD_RT_BOUND, &config->runtime_flags)) 1147 nbd->task_setup = current; 1148 1149 if (!netlink && 1150 (nbd->task_setup != current || 1151 test_bit(NBD_RT_BOUND, &config->runtime_flags))) { 1152 dev_err(disk_to_dev(nbd->disk), 1153 "Device being setup by another task"); 1154 err = -EBUSY; 1155 goto put_socket; 1156 } 1157 1158 nsock = kzalloc(sizeof(*nsock), GFP_KERNEL); 1159 if (!nsock) { 1160 err = -ENOMEM; 1161 goto put_socket; 1162 } 1163 1164 socks = krealloc(config->socks, (config->num_connections + 1) * 1165 sizeof(struct nbd_sock *), GFP_KERNEL); 1166 if (!socks) { 1167 kfree(nsock); 1168 err = -ENOMEM; 1169 goto put_socket; 1170 } 1171 1172 config->socks = socks; 1173 1174 nsock->fallback_index = -1; 1175 nsock->dead = false; 1176 mutex_init(&nsock->tx_lock); 1177 nsock->sock = sock; 1178 nsock->pending = NULL; 1179 nsock->sent = 0; 1180 nsock->cookie = 0; 1181 socks[config->num_connections++] = nsock; 1182 atomic_inc(&config->live_connections); 1183 blk_mq_unfreeze_queue(nbd->disk->queue); 1184 1185 return 0; 1186 1187 put_socket: 1188 blk_mq_unfreeze_queue(nbd->disk->queue); 1189 sockfd_put(sock); 1190 return err; 1191 } 1192 1193 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) 1194 { 1195 struct nbd_config *config = nbd->config; 1196 struct socket *sock, *old; 1197 struct recv_thread_args *args; 1198 int i; 1199 int err; 1200 1201 sock = nbd_get_socket(nbd, arg, &err); 1202 if (!sock) 1203 return err; 1204 1205 args = kzalloc(sizeof(*args), GFP_KERNEL); 1206 if (!args) { 1207 sockfd_put(sock); 1208 return -ENOMEM; 1209 } 1210 1211 for (i = 0; i < config->num_connections; i++) { 1212 struct nbd_sock *nsock = config->socks[i]; 1213 1214 if (!nsock->dead) 1215 continue; 1216 1217 mutex_lock(&nsock->tx_lock); 1218 if (!nsock->dead) { 1219 mutex_unlock(&nsock->tx_lock); 1220 continue; 1221 } 1222 sk_set_memalloc(sock->sk); 1223 if (nbd->tag_set.timeout) 1224 sock->sk->sk_sndtimeo = nbd->tag_set.timeout; 1225 atomic_inc(&config->recv_threads); 1226 refcount_inc(&nbd->config_refs); 1227 old = nsock->sock; 1228 nsock->fallback_index = -1; 1229 nsock->sock = sock; 1230 nsock->dead = false; 1231 INIT_WORK(&args->work, recv_work); 1232 args->index = i; 1233 args->nbd = nbd; 1234 nsock->cookie++; 1235 mutex_unlock(&nsock->tx_lock); 1236 sockfd_put(old); 1237 1238 clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags); 1239 1240 /* We take the tx_mutex in an error path in the recv_work, so we 1241 * need to queue_work outside of the tx_mutex. 1242 */ 1243 queue_work(nbd->recv_workq, &args->work); 1244 1245 atomic_inc(&config->live_connections); 1246 wake_up(&config->conn_wait); 1247 return 0; 1248 } 1249 sockfd_put(sock); 1250 kfree(args); 1251 return -ENOSPC; 1252 } 1253 1254 static void nbd_bdev_reset(struct nbd_device *nbd) 1255 { 1256 if (disk_openers(nbd->disk) > 1) 1257 return; 1258 set_capacity(nbd->disk, 0); 1259 } 1260 1261 static void nbd_parse_flags(struct nbd_device *nbd) 1262 { 1263 struct nbd_config *config = nbd->config; 1264 if (config->flags & NBD_FLAG_READ_ONLY) 1265 set_disk_ro(nbd->disk, true); 1266 else 1267 set_disk_ro(nbd->disk, false); 1268 if (config->flags & NBD_FLAG_SEND_FLUSH) { 1269 if (config->flags & NBD_FLAG_SEND_FUA) 1270 blk_queue_write_cache(nbd->disk->queue, true, true); 1271 else 1272 blk_queue_write_cache(nbd->disk->queue, true, false); 1273 } 1274 else 1275 blk_queue_write_cache(nbd->disk->queue, false, false); 1276 } 1277 1278 static void send_disconnects(struct nbd_device *nbd) 1279 { 1280 struct nbd_config *config = nbd->config; 1281 struct nbd_request request = { 1282 .magic = htonl(NBD_REQUEST_MAGIC), 1283 .type = htonl(NBD_CMD_DISC), 1284 }; 1285 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 1286 struct iov_iter from; 1287 int i, ret; 1288 1289 for (i = 0; i < config->num_connections; i++) { 1290 struct nbd_sock *nsock = config->socks[i]; 1291 1292 iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request)); 1293 mutex_lock(&nsock->tx_lock); 1294 ret = sock_xmit(nbd, i, 1, &from, 0, NULL); 1295 if (ret < 0) 1296 dev_err(disk_to_dev(nbd->disk), 1297 "Send disconnect failed %d\n", ret); 1298 mutex_unlock(&nsock->tx_lock); 1299 } 1300 } 1301 1302 static int nbd_disconnect(struct nbd_device *nbd) 1303 { 1304 struct nbd_config *config = nbd->config; 1305 1306 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 1307 set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags); 1308 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags); 1309 send_disconnects(nbd); 1310 return 0; 1311 } 1312 1313 static void nbd_clear_sock(struct nbd_device *nbd) 1314 { 1315 sock_shutdown(nbd); 1316 nbd_clear_que(nbd); 1317 nbd->task_setup = NULL; 1318 } 1319 1320 static void nbd_config_put(struct nbd_device *nbd) 1321 { 1322 if (refcount_dec_and_mutex_lock(&nbd->config_refs, 1323 &nbd->config_lock)) { 1324 struct nbd_config *config = nbd->config; 1325 nbd_dev_dbg_close(nbd); 1326 invalidate_disk(nbd->disk); 1327 if (nbd->config->bytesize) 1328 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 1329 if (test_and_clear_bit(NBD_RT_HAS_PID_FILE, 1330 &config->runtime_flags)) 1331 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 1332 nbd->pid = 0; 1333 if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE, 1334 &config->runtime_flags)) { 1335 device_remove_file(disk_to_dev(nbd->disk), &backend_attr); 1336 kfree(nbd->backend); 1337 nbd->backend = NULL; 1338 } 1339 nbd_clear_sock(nbd); 1340 if (config->num_connections) { 1341 int i; 1342 for (i = 0; i < config->num_connections; i++) { 1343 sockfd_put(config->socks[i]->sock); 1344 kfree(config->socks[i]); 1345 } 1346 kfree(config->socks); 1347 } 1348 kfree(nbd->config); 1349 nbd->config = NULL; 1350 1351 nbd->tag_set.timeout = 0; 1352 nbd->disk->queue->limits.discard_granularity = 0; 1353 blk_queue_max_discard_sectors(nbd->disk->queue, 0); 1354 1355 mutex_unlock(&nbd->config_lock); 1356 nbd_put(nbd); 1357 module_put(THIS_MODULE); 1358 } 1359 } 1360 1361 static int nbd_start_device(struct nbd_device *nbd) 1362 { 1363 struct nbd_config *config = nbd->config; 1364 int num_connections = config->num_connections; 1365 int error = 0, i; 1366 1367 if (nbd->pid) 1368 return -EBUSY; 1369 if (!config->socks) 1370 return -EINVAL; 1371 if (num_connections > 1 && 1372 !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) { 1373 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); 1374 return -EINVAL; 1375 } 1376 1377 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections); 1378 nbd->pid = task_pid_nr(current); 1379 1380 nbd_parse_flags(nbd); 1381 1382 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 1383 if (error) { 1384 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n"); 1385 return error; 1386 } 1387 set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags); 1388 1389 nbd_dev_dbg_init(nbd); 1390 for (i = 0; i < num_connections; i++) { 1391 struct recv_thread_args *args; 1392 1393 args = kzalloc(sizeof(*args), GFP_KERNEL); 1394 if (!args) { 1395 sock_shutdown(nbd); 1396 /* 1397 * If num_connections is m (2 < m), 1398 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful. 1399 * But NO.(n + 1) failed. We still have n recv threads. 1400 * So, add flush_workqueue here to prevent recv threads 1401 * dropping the last config_refs and trying to destroy 1402 * the workqueue from inside the workqueue. 1403 */ 1404 if (i) 1405 flush_workqueue(nbd->recv_workq); 1406 return -ENOMEM; 1407 } 1408 sk_set_memalloc(config->socks[i]->sock->sk); 1409 if (nbd->tag_set.timeout) 1410 config->socks[i]->sock->sk->sk_sndtimeo = 1411 nbd->tag_set.timeout; 1412 atomic_inc(&config->recv_threads); 1413 refcount_inc(&nbd->config_refs); 1414 INIT_WORK(&args->work, recv_work); 1415 args->nbd = nbd; 1416 args->index = i; 1417 queue_work(nbd->recv_workq, &args->work); 1418 } 1419 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config)); 1420 } 1421 1422 static int nbd_start_device_ioctl(struct nbd_device *nbd) 1423 { 1424 struct nbd_config *config = nbd->config; 1425 int ret; 1426 1427 ret = nbd_start_device(nbd); 1428 if (ret) 1429 return ret; 1430 1431 if (max_part) 1432 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); 1433 mutex_unlock(&nbd->config_lock); 1434 ret = wait_event_interruptible(config->recv_wq, 1435 atomic_read(&config->recv_threads) == 0); 1436 if (ret) { 1437 sock_shutdown(nbd); 1438 nbd_clear_que(nbd); 1439 } 1440 1441 flush_workqueue(nbd->recv_workq); 1442 mutex_lock(&nbd->config_lock); 1443 nbd_bdev_reset(nbd); 1444 /* user requested, ignore socket errors */ 1445 if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags)) 1446 ret = 0; 1447 if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags)) 1448 ret = -ETIMEDOUT; 1449 return ret; 1450 } 1451 1452 static void nbd_clear_sock_ioctl(struct nbd_device *nbd) 1453 { 1454 nbd_clear_sock(nbd); 1455 disk_force_media_change(nbd->disk); 1456 nbd_bdev_reset(nbd); 1457 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, 1458 &nbd->config->runtime_flags)) 1459 nbd_config_put(nbd); 1460 } 1461 1462 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout) 1463 { 1464 nbd->tag_set.timeout = timeout * HZ; 1465 if (timeout) 1466 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ); 1467 else 1468 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ); 1469 } 1470 1471 /* Must be called with config_lock held */ 1472 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, 1473 unsigned int cmd, unsigned long arg) 1474 { 1475 struct nbd_config *config = nbd->config; 1476 loff_t bytesize; 1477 1478 switch (cmd) { 1479 case NBD_DISCONNECT: 1480 return nbd_disconnect(nbd); 1481 case NBD_CLEAR_SOCK: 1482 nbd_clear_sock_ioctl(nbd); 1483 return 0; 1484 case NBD_SET_SOCK: 1485 return nbd_add_socket(nbd, arg, false); 1486 case NBD_SET_BLKSIZE: 1487 return nbd_set_size(nbd, config->bytesize, arg); 1488 case NBD_SET_SIZE: 1489 return nbd_set_size(nbd, arg, nbd_blksize(config)); 1490 case NBD_SET_SIZE_BLOCKS: 1491 if (check_shl_overflow(arg, config->blksize_bits, &bytesize)) 1492 return -EINVAL; 1493 return nbd_set_size(nbd, bytesize, nbd_blksize(config)); 1494 case NBD_SET_TIMEOUT: 1495 nbd_set_cmd_timeout(nbd, arg); 1496 return 0; 1497 1498 case NBD_SET_FLAGS: 1499 config->flags = arg; 1500 return 0; 1501 case NBD_DO_IT: 1502 return nbd_start_device_ioctl(nbd); 1503 case NBD_CLEAR_QUE: 1504 /* 1505 * This is for compatibility only. The queue is always cleared 1506 * by NBD_DO_IT or NBD_CLEAR_SOCK. 1507 */ 1508 return 0; 1509 case NBD_PRINT_DEBUG: 1510 /* 1511 * For compatibility only, we no longer keep a list of 1512 * outstanding requests. 1513 */ 1514 return 0; 1515 } 1516 return -ENOTTY; 1517 } 1518 1519 static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode, 1520 unsigned int cmd, unsigned long arg) 1521 { 1522 struct nbd_device *nbd = bdev->bd_disk->private_data; 1523 struct nbd_config *config = nbd->config; 1524 int error = -EINVAL; 1525 1526 if (!capable(CAP_SYS_ADMIN)) 1527 return -EPERM; 1528 1529 /* The block layer will pass back some non-nbd ioctls in case we have 1530 * special handling for them, but we don't so just return an error. 1531 */ 1532 if (_IOC_TYPE(cmd) != 0xab) 1533 return -EINVAL; 1534 1535 mutex_lock(&nbd->config_lock); 1536 1537 /* Don't allow ioctl operations on a nbd device that was created with 1538 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine. 1539 */ 1540 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || 1541 (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK)) 1542 error = __nbd_ioctl(bdev, nbd, cmd, arg); 1543 else 1544 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n"); 1545 mutex_unlock(&nbd->config_lock); 1546 return error; 1547 } 1548 1549 static int nbd_alloc_and_init_config(struct nbd_device *nbd) 1550 { 1551 struct nbd_config *config; 1552 1553 if (WARN_ON(nbd->config)) 1554 return -EINVAL; 1555 1556 if (!try_module_get(THIS_MODULE)) 1557 return -ENODEV; 1558 1559 config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); 1560 if (!config) { 1561 module_put(THIS_MODULE); 1562 return -ENOMEM; 1563 } 1564 1565 atomic_set(&config->recv_threads, 0); 1566 init_waitqueue_head(&config->recv_wq); 1567 init_waitqueue_head(&config->conn_wait); 1568 config->blksize_bits = NBD_DEF_BLKSIZE_BITS; 1569 atomic_set(&config->live_connections, 0); 1570 1571 nbd->config = config; 1572 /* 1573 * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment, 1574 * its pair is the barrier in nbd_get_config_unlocked(). 1575 * So nbd_get_config_unlocked() won't see nbd->config as null after 1576 * refcount_inc_not_zero() succeed. 1577 */ 1578 smp_mb__before_atomic(); 1579 refcount_set(&nbd->config_refs, 1); 1580 1581 return 0; 1582 } 1583 1584 static int nbd_open(struct gendisk *disk, blk_mode_t mode) 1585 { 1586 struct nbd_device *nbd; 1587 struct nbd_config *config; 1588 int ret = 0; 1589 1590 mutex_lock(&nbd_index_mutex); 1591 nbd = disk->private_data; 1592 if (!nbd) { 1593 ret = -ENXIO; 1594 goto out; 1595 } 1596 if (!refcount_inc_not_zero(&nbd->refs)) { 1597 ret = -ENXIO; 1598 goto out; 1599 } 1600 1601 config = nbd_get_config_unlocked(nbd); 1602 if (!config) { 1603 mutex_lock(&nbd->config_lock); 1604 if (refcount_inc_not_zero(&nbd->config_refs)) { 1605 mutex_unlock(&nbd->config_lock); 1606 goto out; 1607 } 1608 ret = nbd_alloc_and_init_config(nbd); 1609 if (ret) { 1610 mutex_unlock(&nbd->config_lock); 1611 goto out; 1612 } 1613 1614 refcount_inc(&nbd->refs); 1615 mutex_unlock(&nbd->config_lock); 1616 if (max_part) 1617 set_bit(GD_NEED_PART_SCAN, &disk->state); 1618 } else if (nbd_disconnected(config)) { 1619 if (max_part) 1620 set_bit(GD_NEED_PART_SCAN, &disk->state); 1621 } 1622 out: 1623 mutex_unlock(&nbd_index_mutex); 1624 return ret; 1625 } 1626 1627 static void nbd_release(struct gendisk *disk) 1628 { 1629 struct nbd_device *nbd = disk->private_data; 1630 1631 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) && 1632 disk_openers(disk) == 0) 1633 nbd_disconnect_and_put(nbd); 1634 1635 nbd_config_put(nbd); 1636 nbd_put(nbd); 1637 } 1638 1639 static void nbd_free_disk(struct gendisk *disk) 1640 { 1641 struct nbd_device *nbd = disk->private_data; 1642 1643 kfree(nbd); 1644 } 1645 1646 static const struct block_device_operations nbd_fops = 1647 { 1648 .owner = THIS_MODULE, 1649 .open = nbd_open, 1650 .release = nbd_release, 1651 .ioctl = nbd_ioctl, 1652 .compat_ioctl = nbd_ioctl, 1653 .free_disk = nbd_free_disk, 1654 }; 1655 1656 #if IS_ENABLED(CONFIG_DEBUG_FS) 1657 1658 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) 1659 { 1660 struct nbd_device *nbd = s->private; 1661 1662 if (nbd->pid) 1663 seq_printf(s, "recv: %d\n", nbd->pid); 1664 1665 return 0; 1666 } 1667 1668 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks); 1669 1670 static int nbd_dbg_flags_show(struct seq_file *s, void *unused) 1671 { 1672 struct nbd_device *nbd = s->private; 1673 u32 flags = nbd->config->flags; 1674 1675 seq_printf(s, "Hex: 0x%08x\n\n", flags); 1676 1677 seq_puts(s, "Known flags:\n"); 1678 1679 if (flags & NBD_FLAG_HAS_FLAGS) 1680 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); 1681 if (flags & NBD_FLAG_READ_ONLY) 1682 seq_puts(s, "NBD_FLAG_READ_ONLY\n"); 1683 if (flags & NBD_FLAG_SEND_FLUSH) 1684 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); 1685 if (flags & NBD_FLAG_SEND_FUA) 1686 seq_puts(s, "NBD_FLAG_SEND_FUA\n"); 1687 if (flags & NBD_FLAG_SEND_TRIM) 1688 seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); 1689 1690 return 0; 1691 } 1692 1693 DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags); 1694 1695 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1696 { 1697 struct dentry *dir; 1698 struct nbd_config *config = nbd->config; 1699 1700 if (!nbd_dbg_dir) 1701 return -EIO; 1702 1703 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); 1704 if (IS_ERR(dir)) { 1705 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", 1706 nbd_name(nbd)); 1707 return -EIO; 1708 } 1709 config->dbg_dir = dir; 1710 1711 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops); 1712 debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize); 1713 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 1714 debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits); 1715 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops); 1716 1717 return 0; 1718 } 1719 1720 static void nbd_dev_dbg_close(struct nbd_device *nbd) 1721 { 1722 debugfs_remove_recursive(nbd->config->dbg_dir); 1723 } 1724 1725 static int nbd_dbg_init(void) 1726 { 1727 struct dentry *dbg_dir; 1728 1729 dbg_dir = debugfs_create_dir("nbd", NULL); 1730 if (IS_ERR(dbg_dir)) 1731 return -EIO; 1732 1733 nbd_dbg_dir = dbg_dir; 1734 1735 return 0; 1736 } 1737 1738 static void nbd_dbg_close(void) 1739 { 1740 debugfs_remove_recursive(nbd_dbg_dir); 1741 } 1742 1743 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ 1744 1745 static int nbd_dev_dbg_init(struct nbd_device *nbd) 1746 { 1747 return 0; 1748 } 1749 1750 static void nbd_dev_dbg_close(struct nbd_device *nbd) 1751 { 1752 } 1753 1754 static int nbd_dbg_init(void) 1755 { 1756 return 0; 1757 } 1758 1759 static void nbd_dbg_close(void) 1760 { 1761 } 1762 1763 #endif 1764 1765 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq, 1766 unsigned int hctx_idx, unsigned int numa_node) 1767 { 1768 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1769 cmd->nbd = set->driver_data; 1770 cmd->flags = 0; 1771 mutex_init(&cmd->lock); 1772 return 0; 1773 } 1774 1775 static const struct blk_mq_ops nbd_mq_ops = { 1776 .queue_rq = nbd_queue_rq, 1777 .complete = nbd_complete_rq, 1778 .init_request = nbd_init_request, 1779 .timeout = nbd_xmit_timeout, 1780 }; 1781 1782 static struct nbd_device *nbd_dev_add(int index, unsigned int refs) 1783 { 1784 struct nbd_device *nbd; 1785 struct gendisk *disk; 1786 int err = -ENOMEM; 1787 1788 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); 1789 if (!nbd) 1790 goto out; 1791 1792 nbd->tag_set.ops = &nbd_mq_ops; 1793 nbd->tag_set.nr_hw_queues = 1; 1794 nbd->tag_set.queue_depth = 128; 1795 nbd->tag_set.numa_node = NUMA_NO_NODE; 1796 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); 1797 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 1798 BLK_MQ_F_BLOCKING; 1799 nbd->tag_set.driver_data = nbd; 1800 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work); 1801 nbd->backend = NULL; 1802 1803 err = blk_mq_alloc_tag_set(&nbd->tag_set); 1804 if (err) 1805 goto out_free_nbd; 1806 1807 mutex_lock(&nbd_index_mutex); 1808 if (index >= 0) { 1809 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, 1810 GFP_KERNEL); 1811 if (err == -ENOSPC) 1812 err = -EEXIST; 1813 } else { 1814 err = idr_alloc(&nbd_index_idr, nbd, 0, 1815 (MINORMASK >> part_shift) + 1, GFP_KERNEL); 1816 if (err >= 0) 1817 index = err; 1818 } 1819 nbd->index = index; 1820 mutex_unlock(&nbd_index_mutex); 1821 if (err < 0) 1822 goto out_free_tags; 1823 1824 disk = blk_mq_alloc_disk(&nbd->tag_set, NULL); 1825 if (IS_ERR(disk)) { 1826 err = PTR_ERR(disk); 1827 goto out_free_idr; 1828 } 1829 nbd->disk = disk; 1830 1831 nbd->recv_workq = alloc_workqueue("nbd%d-recv", 1832 WQ_MEM_RECLAIM | WQ_HIGHPRI | 1833 WQ_UNBOUND, 0, nbd->index); 1834 if (!nbd->recv_workq) { 1835 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n"); 1836 err = -ENOMEM; 1837 goto out_err_disk; 1838 } 1839 1840 /* 1841 * Tell the block layer that we are not a rotational device 1842 */ 1843 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); 1844 disk->queue->limits.discard_granularity = 0; 1845 blk_queue_max_discard_sectors(disk->queue, 0); 1846 blk_queue_max_segment_size(disk->queue, UINT_MAX); 1847 blk_queue_max_segments(disk->queue, USHRT_MAX); 1848 blk_queue_max_hw_sectors(disk->queue, 65536); 1849 disk->queue->limits.max_sectors = 256; 1850 1851 mutex_init(&nbd->config_lock); 1852 refcount_set(&nbd->config_refs, 0); 1853 /* 1854 * Start out with a zero references to keep other threads from using 1855 * this device until it is fully initialized. 1856 */ 1857 refcount_set(&nbd->refs, 0); 1858 INIT_LIST_HEAD(&nbd->list); 1859 disk->major = NBD_MAJOR; 1860 disk->first_minor = index << part_shift; 1861 disk->minors = 1 << part_shift; 1862 disk->fops = &nbd_fops; 1863 disk->private_data = nbd; 1864 sprintf(disk->disk_name, "nbd%d", index); 1865 err = add_disk(disk); 1866 if (err) 1867 goto out_free_work; 1868 1869 /* 1870 * Now publish the device. 1871 */ 1872 refcount_set(&nbd->refs, refs); 1873 nbd_total_devices++; 1874 return nbd; 1875 1876 out_free_work: 1877 destroy_workqueue(nbd->recv_workq); 1878 out_err_disk: 1879 put_disk(disk); 1880 out_free_idr: 1881 mutex_lock(&nbd_index_mutex); 1882 idr_remove(&nbd_index_idr, index); 1883 mutex_unlock(&nbd_index_mutex); 1884 out_free_tags: 1885 blk_mq_free_tag_set(&nbd->tag_set); 1886 out_free_nbd: 1887 kfree(nbd); 1888 out: 1889 return ERR_PTR(err); 1890 } 1891 1892 static struct nbd_device *nbd_find_get_unused(void) 1893 { 1894 struct nbd_device *nbd; 1895 int id; 1896 1897 lockdep_assert_held(&nbd_index_mutex); 1898 1899 idr_for_each_entry(&nbd_index_idr, nbd, id) { 1900 if (refcount_read(&nbd->config_refs) || 1901 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) 1902 continue; 1903 if (refcount_inc_not_zero(&nbd->refs)) 1904 return nbd; 1905 } 1906 1907 return NULL; 1908 } 1909 1910 /* Netlink interface. */ 1911 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = { 1912 [NBD_ATTR_INDEX] = { .type = NLA_U32 }, 1913 [NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 }, 1914 [NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 }, 1915 [NBD_ATTR_TIMEOUT] = { .type = NLA_U64 }, 1916 [NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 }, 1917 [NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 }, 1918 [NBD_ATTR_SOCKETS] = { .type = NLA_NESTED}, 1919 [NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 }, 1920 [NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED}, 1921 [NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING}, 1922 }; 1923 1924 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = { 1925 [NBD_SOCK_FD] = { .type = NLA_U32 }, 1926 }; 1927 1928 /* We don't use this right now since we don't parse the incoming list, but we 1929 * still want it here so userspace knows what to expect. 1930 */ 1931 static const struct nla_policy __attribute__((unused)) 1932 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = { 1933 [NBD_DEVICE_INDEX] = { .type = NLA_U32 }, 1934 [NBD_DEVICE_CONNECTED] = { .type = NLA_U8 }, 1935 }; 1936 1937 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd) 1938 { 1939 struct nbd_config *config = nbd->config; 1940 u64 bsize = nbd_blksize(config); 1941 u64 bytes = config->bytesize; 1942 1943 if (info->attrs[NBD_ATTR_SIZE_BYTES]) 1944 bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]); 1945 1946 if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) 1947 bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]); 1948 1949 if (bytes != config->bytesize || bsize != nbd_blksize(config)) 1950 return nbd_set_size(nbd, bytes, bsize); 1951 return 0; 1952 } 1953 1954 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) 1955 { 1956 struct nbd_device *nbd; 1957 struct nbd_config *config; 1958 int index = -1; 1959 int ret; 1960 bool put_dev = false; 1961 1962 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 1963 return -EPERM; 1964 1965 if (info->attrs[NBD_ATTR_INDEX]) { 1966 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 1967 1968 /* 1969 * Too big first_minor can cause duplicate creation of 1970 * sysfs files/links, since index << part_shift might overflow, or 1971 * MKDEV() expect that the max bits of first_minor is 20. 1972 */ 1973 if (index < 0 || index > MINORMASK >> part_shift) { 1974 pr_err("illegal input index %d\n", index); 1975 return -EINVAL; 1976 } 1977 } 1978 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) { 1979 pr_err("must specify at least one socket\n"); 1980 return -EINVAL; 1981 } 1982 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) { 1983 pr_err("must specify a size in bytes for the device\n"); 1984 return -EINVAL; 1985 } 1986 again: 1987 mutex_lock(&nbd_index_mutex); 1988 if (index == -1) { 1989 nbd = nbd_find_get_unused(); 1990 } else { 1991 nbd = idr_find(&nbd_index_idr, index); 1992 if (nbd) { 1993 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && 1994 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) || 1995 !refcount_inc_not_zero(&nbd->refs)) { 1996 mutex_unlock(&nbd_index_mutex); 1997 pr_err("device at index %d is going down\n", 1998 index); 1999 return -EINVAL; 2000 } 2001 } 2002 } 2003 mutex_unlock(&nbd_index_mutex); 2004 2005 if (!nbd) { 2006 nbd = nbd_dev_add(index, 2); 2007 if (IS_ERR(nbd)) { 2008 pr_err("failed to add new device\n"); 2009 return PTR_ERR(nbd); 2010 } 2011 } 2012 2013 mutex_lock(&nbd->config_lock); 2014 if (refcount_read(&nbd->config_refs)) { 2015 mutex_unlock(&nbd->config_lock); 2016 nbd_put(nbd); 2017 if (index == -1) 2018 goto again; 2019 pr_err("nbd%d already in use\n", index); 2020 return -EBUSY; 2021 } 2022 2023 ret = nbd_alloc_and_init_config(nbd); 2024 if (ret) { 2025 mutex_unlock(&nbd->config_lock); 2026 nbd_put(nbd); 2027 pr_err("couldn't allocate config\n"); 2028 return ret; 2029 } 2030 2031 config = nbd->config; 2032 set_bit(NBD_RT_BOUND, &config->runtime_flags); 2033 ret = nbd_genl_size_set(info, nbd); 2034 if (ret) 2035 goto out; 2036 2037 if (info->attrs[NBD_ATTR_TIMEOUT]) 2038 nbd_set_cmd_timeout(nbd, 2039 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); 2040 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { 2041 config->dead_conn_timeout = 2042 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); 2043 config->dead_conn_timeout *= HZ; 2044 } 2045 if (info->attrs[NBD_ATTR_SERVER_FLAGS]) 2046 config->flags = 2047 nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]); 2048 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { 2049 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); 2050 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { 2051 /* 2052 * We have 1 ref to keep the device around, and then 1 2053 * ref for our current operation here, which will be 2054 * inherited by the config. If we already have 2055 * DESTROY_ON_DISCONNECT set then we know we don't have 2056 * that extra ref already held so we don't need the 2057 * put_dev. 2058 */ 2059 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, 2060 &nbd->flags)) 2061 put_dev = true; 2062 } else { 2063 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, 2064 &nbd->flags)) 2065 refcount_inc(&nbd->refs); 2066 } 2067 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { 2068 set_bit(NBD_RT_DISCONNECT_ON_CLOSE, 2069 &config->runtime_flags); 2070 } 2071 } 2072 2073 if (info->attrs[NBD_ATTR_SOCKETS]) { 2074 struct nlattr *attr; 2075 int rem, fd; 2076 2077 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], 2078 rem) { 2079 struct nlattr *socks[NBD_SOCK_MAX+1]; 2080 2081 if (nla_type(attr) != NBD_SOCK_ITEM) { 2082 pr_err("socks must be embedded in a SOCK_ITEM attr\n"); 2083 ret = -EINVAL; 2084 goto out; 2085 } 2086 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, 2087 attr, 2088 nbd_sock_policy, 2089 info->extack); 2090 if (ret != 0) { 2091 pr_err("error processing sock list\n"); 2092 ret = -EINVAL; 2093 goto out; 2094 } 2095 if (!socks[NBD_SOCK_FD]) 2096 continue; 2097 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]); 2098 ret = nbd_add_socket(nbd, fd, true); 2099 if (ret) 2100 goto out; 2101 } 2102 } 2103 ret = nbd_start_device(nbd); 2104 if (ret) 2105 goto out; 2106 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { 2107 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], 2108 GFP_KERNEL); 2109 if (!nbd->backend) { 2110 ret = -ENOMEM; 2111 goto out; 2112 } 2113 } 2114 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr); 2115 if (ret) { 2116 dev_err(disk_to_dev(nbd->disk), 2117 "device_create_file failed for backend!\n"); 2118 goto out; 2119 } 2120 set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); 2121 out: 2122 mutex_unlock(&nbd->config_lock); 2123 if (!ret) { 2124 set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags); 2125 refcount_inc(&nbd->config_refs); 2126 nbd_connect_reply(info, nbd->index); 2127 } 2128 nbd_config_put(nbd); 2129 if (put_dev) 2130 nbd_put(nbd); 2131 return ret; 2132 } 2133 2134 static void nbd_disconnect_and_put(struct nbd_device *nbd) 2135 { 2136 mutex_lock(&nbd->config_lock); 2137 nbd_disconnect(nbd); 2138 sock_shutdown(nbd); 2139 wake_up(&nbd->config->conn_wait); 2140 /* 2141 * Make sure recv thread has finished, we can safely call nbd_clear_que() 2142 * to cancel the inflight I/Os. 2143 */ 2144 flush_workqueue(nbd->recv_workq); 2145 nbd_clear_que(nbd); 2146 nbd->task_setup = NULL; 2147 mutex_unlock(&nbd->config_lock); 2148 2149 if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, 2150 &nbd->config->runtime_flags)) 2151 nbd_config_put(nbd); 2152 } 2153 2154 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info) 2155 { 2156 struct nbd_device *nbd; 2157 int index; 2158 2159 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 2160 return -EPERM; 2161 2162 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) { 2163 pr_err("must specify an index to disconnect\n"); 2164 return -EINVAL; 2165 } 2166 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2167 mutex_lock(&nbd_index_mutex); 2168 nbd = idr_find(&nbd_index_idr, index); 2169 if (!nbd) { 2170 mutex_unlock(&nbd_index_mutex); 2171 pr_err("couldn't find device at index %d\n", index); 2172 return -EINVAL; 2173 } 2174 if (!refcount_inc_not_zero(&nbd->refs)) { 2175 mutex_unlock(&nbd_index_mutex); 2176 pr_err("device at index %d is going down\n", index); 2177 return -EINVAL; 2178 } 2179 mutex_unlock(&nbd_index_mutex); 2180 if (!refcount_inc_not_zero(&nbd->config_refs)) 2181 goto put_nbd; 2182 nbd_disconnect_and_put(nbd); 2183 nbd_config_put(nbd); 2184 put_nbd: 2185 nbd_put(nbd); 2186 return 0; 2187 } 2188 2189 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info) 2190 { 2191 struct nbd_device *nbd = NULL; 2192 struct nbd_config *config; 2193 int index; 2194 int ret = 0; 2195 bool put_dev = false; 2196 2197 if (!netlink_capable(skb, CAP_SYS_ADMIN)) 2198 return -EPERM; 2199 2200 if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) { 2201 pr_err("must specify a device to reconfigure\n"); 2202 return -EINVAL; 2203 } 2204 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2205 mutex_lock(&nbd_index_mutex); 2206 nbd = idr_find(&nbd_index_idr, index); 2207 if (!nbd) { 2208 mutex_unlock(&nbd_index_mutex); 2209 pr_err("couldn't find a device at index %d\n", index); 2210 return -EINVAL; 2211 } 2212 if (nbd->backend) { 2213 if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { 2214 if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], 2215 nbd->backend)) { 2216 mutex_unlock(&nbd_index_mutex); 2217 dev_err(nbd_to_dev(nbd), 2218 "backend image doesn't match with %s\n", 2219 nbd->backend); 2220 return -EINVAL; 2221 } 2222 } else { 2223 mutex_unlock(&nbd_index_mutex); 2224 dev_err(nbd_to_dev(nbd), "must specify backend\n"); 2225 return -EINVAL; 2226 } 2227 } 2228 if (!refcount_inc_not_zero(&nbd->refs)) { 2229 mutex_unlock(&nbd_index_mutex); 2230 pr_err("device at index %d is going down\n", index); 2231 return -EINVAL; 2232 } 2233 mutex_unlock(&nbd_index_mutex); 2234 2235 config = nbd_get_config_unlocked(nbd); 2236 if (!config) { 2237 dev_err(nbd_to_dev(nbd), 2238 "not configured, cannot reconfigure\n"); 2239 nbd_put(nbd); 2240 return -EINVAL; 2241 } 2242 2243 mutex_lock(&nbd->config_lock); 2244 if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) || 2245 !nbd->pid) { 2246 dev_err(nbd_to_dev(nbd), 2247 "not configured, cannot reconfigure\n"); 2248 ret = -EINVAL; 2249 goto out; 2250 } 2251 2252 ret = nbd_genl_size_set(info, nbd); 2253 if (ret) 2254 goto out; 2255 2256 if (info->attrs[NBD_ATTR_TIMEOUT]) 2257 nbd_set_cmd_timeout(nbd, 2258 nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT])); 2259 if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) { 2260 config->dead_conn_timeout = 2261 nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]); 2262 config->dead_conn_timeout *= HZ; 2263 } 2264 if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) { 2265 u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]); 2266 if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) { 2267 if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT, 2268 &nbd->flags)) 2269 put_dev = true; 2270 } else { 2271 if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT, 2272 &nbd->flags)) 2273 refcount_inc(&nbd->refs); 2274 } 2275 2276 if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) { 2277 set_bit(NBD_RT_DISCONNECT_ON_CLOSE, 2278 &config->runtime_flags); 2279 } else { 2280 clear_bit(NBD_RT_DISCONNECT_ON_CLOSE, 2281 &config->runtime_flags); 2282 } 2283 } 2284 2285 if (info->attrs[NBD_ATTR_SOCKETS]) { 2286 struct nlattr *attr; 2287 int rem, fd; 2288 2289 nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS], 2290 rem) { 2291 struct nlattr *socks[NBD_SOCK_MAX+1]; 2292 2293 if (nla_type(attr) != NBD_SOCK_ITEM) { 2294 pr_err("socks must be embedded in a SOCK_ITEM attr\n"); 2295 ret = -EINVAL; 2296 goto out; 2297 } 2298 ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX, 2299 attr, 2300 nbd_sock_policy, 2301 info->extack); 2302 if (ret != 0) { 2303 pr_err("error processing sock list\n"); 2304 ret = -EINVAL; 2305 goto out; 2306 } 2307 if (!socks[NBD_SOCK_FD]) 2308 continue; 2309 fd = (int)nla_get_u32(socks[NBD_SOCK_FD]); 2310 ret = nbd_reconnect_socket(nbd, fd); 2311 if (ret) { 2312 if (ret == -ENOSPC) 2313 ret = 0; 2314 goto out; 2315 } 2316 dev_info(nbd_to_dev(nbd), "reconnected socket\n"); 2317 } 2318 } 2319 out: 2320 mutex_unlock(&nbd->config_lock); 2321 nbd_config_put(nbd); 2322 nbd_put(nbd); 2323 if (put_dev) 2324 nbd_put(nbd); 2325 return ret; 2326 } 2327 2328 static const struct genl_small_ops nbd_connect_genl_ops[] = { 2329 { 2330 .cmd = NBD_CMD_CONNECT, 2331 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2332 .doit = nbd_genl_connect, 2333 }, 2334 { 2335 .cmd = NBD_CMD_DISCONNECT, 2336 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2337 .doit = nbd_genl_disconnect, 2338 }, 2339 { 2340 .cmd = NBD_CMD_RECONFIGURE, 2341 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2342 .doit = nbd_genl_reconfigure, 2343 }, 2344 { 2345 .cmd = NBD_CMD_STATUS, 2346 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 2347 .doit = nbd_genl_status, 2348 }, 2349 }; 2350 2351 static const struct genl_multicast_group nbd_mcast_grps[] = { 2352 { .name = NBD_GENL_MCAST_GROUP_NAME, }, 2353 }; 2354 2355 static struct genl_family nbd_genl_family __ro_after_init = { 2356 .hdrsize = 0, 2357 .name = NBD_GENL_FAMILY_NAME, 2358 .version = NBD_GENL_VERSION, 2359 .module = THIS_MODULE, 2360 .small_ops = nbd_connect_genl_ops, 2361 .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops), 2362 .resv_start_op = NBD_CMD_STATUS + 1, 2363 .maxattr = NBD_ATTR_MAX, 2364 .netnsok = 1, 2365 .policy = nbd_attr_policy, 2366 .mcgrps = nbd_mcast_grps, 2367 .n_mcgrps = ARRAY_SIZE(nbd_mcast_grps), 2368 }; 2369 MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME); 2370 2371 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply) 2372 { 2373 struct nlattr *dev_opt; 2374 u8 connected = 0; 2375 int ret; 2376 2377 /* This is a little racey, but for status it's ok. The 2378 * reason we don't take a ref here is because we can't 2379 * take a ref in the index == -1 case as we would need 2380 * to put under the nbd_index_mutex, which could 2381 * deadlock if we are configured to remove ourselves 2382 * once we're disconnected. 2383 */ 2384 if (refcount_read(&nbd->config_refs)) 2385 connected = 1; 2386 dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM); 2387 if (!dev_opt) 2388 return -EMSGSIZE; 2389 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index); 2390 if (ret) 2391 return -EMSGSIZE; 2392 ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED, 2393 connected); 2394 if (ret) 2395 return -EMSGSIZE; 2396 nla_nest_end(reply, dev_opt); 2397 return 0; 2398 } 2399 2400 static int status_cb(int id, void *ptr, void *data) 2401 { 2402 struct nbd_device *nbd = ptr; 2403 return populate_nbd_status(nbd, (struct sk_buff *)data); 2404 } 2405 2406 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info) 2407 { 2408 struct nlattr *dev_list; 2409 struct sk_buff *reply; 2410 void *reply_head; 2411 size_t msg_size; 2412 int index = -1; 2413 int ret = -ENOMEM; 2414 2415 if (info->attrs[NBD_ATTR_INDEX]) 2416 index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]); 2417 2418 mutex_lock(&nbd_index_mutex); 2419 2420 msg_size = nla_total_size(nla_attr_size(sizeof(u32)) + 2421 nla_attr_size(sizeof(u8))); 2422 msg_size *= (index == -1) ? nbd_total_devices : 1; 2423 2424 reply = genlmsg_new(msg_size, GFP_KERNEL); 2425 if (!reply) 2426 goto out; 2427 reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0, 2428 NBD_CMD_STATUS); 2429 if (!reply_head) { 2430 nlmsg_free(reply); 2431 goto out; 2432 } 2433 2434 dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST); 2435 if (index == -1) { 2436 ret = idr_for_each(&nbd_index_idr, &status_cb, reply); 2437 if (ret) { 2438 nlmsg_free(reply); 2439 goto out; 2440 } 2441 } else { 2442 struct nbd_device *nbd; 2443 nbd = idr_find(&nbd_index_idr, index); 2444 if (nbd) { 2445 ret = populate_nbd_status(nbd, reply); 2446 if (ret) { 2447 nlmsg_free(reply); 2448 goto out; 2449 } 2450 } 2451 } 2452 nla_nest_end(reply, dev_list); 2453 genlmsg_end(reply, reply_head); 2454 ret = genlmsg_reply(reply, info); 2455 out: 2456 mutex_unlock(&nbd_index_mutex); 2457 return ret; 2458 } 2459 2460 static void nbd_connect_reply(struct genl_info *info, int index) 2461 { 2462 struct sk_buff *skb; 2463 void *msg_head; 2464 int ret; 2465 2466 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL); 2467 if (!skb) 2468 return; 2469 msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0, 2470 NBD_CMD_CONNECT); 2471 if (!msg_head) { 2472 nlmsg_free(skb); 2473 return; 2474 } 2475 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index); 2476 if (ret) { 2477 nlmsg_free(skb); 2478 return; 2479 } 2480 genlmsg_end(skb, msg_head); 2481 genlmsg_reply(skb, info); 2482 } 2483 2484 static void nbd_mcast_index(int index) 2485 { 2486 struct sk_buff *skb; 2487 void *msg_head; 2488 int ret; 2489 2490 skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL); 2491 if (!skb) 2492 return; 2493 msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0, 2494 NBD_CMD_LINK_DEAD); 2495 if (!msg_head) { 2496 nlmsg_free(skb); 2497 return; 2498 } 2499 ret = nla_put_u32(skb, NBD_ATTR_INDEX, index); 2500 if (ret) { 2501 nlmsg_free(skb); 2502 return; 2503 } 2504 genlmsg_end(skb, msg_head); 2505 genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL); 2506 } 2507 2508 static void nbd_dead_link_work(struct work_struct *work) 2509 { 2510 struct link_dead_args *args = container_of(work, struct link_dead_args, 2511 work); 2512 nbd_mcast_index(args->index); 2513 kfree(args); 2514 } 2515 2516 static int __init nbd_init(void) 2517 { 2518 int i; 2519 2520 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 2521 2522 if (max_part < 0) { 2523 pr_err("max_part must be >= 0\n"); 2524 return -EINVAL; 2525 } 2526 2527 part_shift = 0; 2528 if (max_part > 0) { 2529 part_shift = fls(max_part); 2530 2531 /* 2532 * Adjust max_part according to part_shift as it is exported 2533 * to user space so that user can know the max number of 2534 * partition kernel should be able to manage. 2535 * 2536 * Note that -1 is required because partition 0 is reserved 2537 * for the whole disk. 2538 */ 2539 max_part = (1UL << part_shift) - 1; 2540 } 2541 2542 if ((1UL << part_shift) > DISK_MAX_PARTS) 2543 return -EINVAL; 2544 2545 if (nbds_max > 1UL << (MINORBITS - part_shift)) 2546 return -EINVAL; 2547 2548 if (register_blkdev(NBD_MAJOR, "nbd")) 2549 return -EIO; 2550 2551 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0); 2552 if (!nbd_del_wq) { 2553 unregister_blkdev(NBD_MAJOR, "nbd"); 2554 return -ENOMEM; 2555 } 2556 2557 if (genl_register_family(&nbd_genl_family)) { 2558 destroy_workqueue(nbd_del_wq); 2559 unregister_blkdev(NBD_MAJOR, "nbd"); 2560 return -EINVAL; 2561 } 2562 nbd_dbg_init(); 2563 2564 for (i = 0; i < nbds_max; i++) 2565 nbd_dev_add(i, 1); 2566 return 0; 2567 } 2568 2569 static int nbd_exit_cb(int id, void *ptr, void *data) 2570 { 2571 struct list_head *list = (struct list_head *)data; 2572 struct nbd_device *nbd = ptr; 2573 2574 /* Skip nbd that is being removed asynchronously */ 2575 if (refcount_read(&nbd->refs)) 2576 list_add_tail(&nbd->list, list); 2577 2578 return 0; 2579 } 2580 2581 static void __exit nbd_cleanup(void) 2582 { 2583 struct nbd_device *nbd; 2584 LIST_HEAD(del_list); 2585 2586 /* 2587 * Unregister netlink interface prior to waiting 2588 * for the completion of netlink commands. 2589 */ 2590 genl_unregister_family(&nbd_genl_family); 2591 2592 nbd_dbg_close(); 2593 2594 mutex_lock(&nbd_index_mutex); 2595 idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list); 2596 mutex_unlock(&nbd_index_mutex); 2597 2598 while (!list_empty(&del_list)) { 2599 nbd = list_first_entry(&del_list, struct nbd_device, list); 2600 list_del_init(&nbd->list); 2601 if (refcount_read(&nbd->config_refs)) 2602 pr_err("possibly leaking nbd_config (ref %d)\n", 2603 refcount_read(&nbd->config_refs)); 2604 if (refcount_read(&nbd->refs) != 1) 2605 pr_err("possibly leaking a device\n"); 2606 nbd_put(nbd); 2607 } 2608 2609 /* Also wait for nbd_dev_remove_work() completes */ 2610 destroy_workqueue(nbd_del_wq); 2611 2612 idr_destroy(&nbd_index_idr); 2613 unregister_blkdev(NBD_MAJOR, "nbd"); 2614 } 2615 2616 module_init(nbd_init); 2617 module_exit(nbd_cleanup); 2618 2619 MODULE_DESCRIPTION("Network Block Device"); 2620 MODULE_LICENSE("GPL"); 2621 2622 module_param(nbds_max, int, 0444); 2623 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 2624 module_param(max_part, int, 0444); 2625 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)"); 2626