1 /* 2 * Network block device - make block devices work over TCP 3 * 4 * Note that you can not swap over this thing, yet. Seems to work but 5 * deadlocks sometimes - you can not swap over TCP in general. 6 * 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 9 * 10 * This file is released under GPLv2 or later. 11 * 12 * (part of code stolen from loop.c) 13 */ 14 15 #include <linux/major.h> 16 17 #include <linux/blkdev.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/sched.h> 21 #include <linux/fs.h> 22 #include <linux/bio.h> 23 #include <linux/stat.h> 24 #include <linux/errno.h> 25 #include <linux/file.h> 26 #include <linux/ioctl.h> 27 #include <linux/mutex.h> 28 #include <linux/compiler.h> 29 #include <linux/err.h> 30 #include <linux/kernel.h> 31 #include <linux/slab.h> 32 #include <net/sock.h> 33 #include <linux/net.h> 34 #include <linux/kthread.h> 35 #include <linux/types.h> 36 #include <linux/debugfs.h> 37 #include <linux/blk-mq.h> 38 39 #include <linux/uaccess.h> 40 #include <asm/types.h> 41 42 #include <linux/nbd.h> 43 44 static DEFINE_IDR(nbd_index_idr); 45 static DEFINE_MUTEX(nbd_index_mutex); 46 47 struct nbd_sock { 48 struct socket *sock; 49 struct mutex tx_lock; 50 }; 51 52 #define NBD_TIMEDOUT 0 53 #define NBD_DISCONNECT_REQUESTED 1 54 #define NBD_DISCONNECTED 2 55 #define NBD_RUNNING 3 56 57 struct nbd_device { 58 u32 flags; 59 unsigned long runtime_flags; 60 struct nbd_sock **socks; 61 int magic; 62 63 struct blk_mq_tag_set tag_set; 64 65 struct mutex config_lock; 66 struct gendisk *disk; 67 int num_connections; 68 atomic_t recv_threads; 69 wait_queue_head_t recv_wq; 70 loff_t blksize; 71 loff_t bytesize; 72 73 struct task_struct *task_recv; 74 struct task_struct *task_setup; 75 76 #if IS_ENABLED(CONFIG_DEBUG_FS) 77 struct dentry *dbg_dir; 78 #endif 79 }; 80 81 struct nbd_cmd { 82 struct nbd_device *nbd; 83 struct completion send_complete; 84 }; 85 86 #if IS_ENABLED(CONFIG_DEBUG_FS) 87 static struct dentry *nbd_dbg_dir; 88 #endif 89 90 #define nbd_name(nbd) ((nbd)->disk->disk_name) 91 92 #define NBD_MAGIC 0x68797548 93 94 static unsigned int nbds_max = 16; 95 static int max_part; 96 static struct workqueue_struct *recv_workqueue; 97 static int part_shift; 98 99 static int nbd_dev_dbg_init(struct nbd_device *nbd); 100 static void nbd_dev_dbg_close(struct nbd_device *nbd); 101 102 103 static inline struct device *nbd_to_dev(struct nbd_device *nbd) 104 { 105 return disk_to_dev(nbd->disk); 106 } 107 108 static bool nbd_is_connected(struct nbd_device *nbd) 109 { 110 return !!nbd->task_recv; 111 } 112 113 static const char *nbdcmd_to_ascii(int cmd) 114 { 115 switch (cmd) { 116 case NBD_CMD_READ: return "read"; 117 case NBD_CMD_WRITE: return "write"; 118 case NBD_CMD_DISC: return "disconnect"; 119 case NBD_CMD_FLUSH: return "flush"; 120 case NBD_CMD_TRIM: return "trim/discard"; 121 } 122 return "invalid"; 123 } 124 125 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) 126 { 127 bd_set_size(bdev, 0); 128 set_capacity(nbd->disk, 0); 129 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 130 131 return 0; 132 } 133 134 static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev) 135 { 136 blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize); 137 blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize); 138 bd_set_size(bdev, nbd->bytesize); 139 set_capacity(nbd->disk, nbd->bytesize >> 9); 140 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 141 } 142 143 static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev, 144 loff_t blocksize, loff_t nr_blocks) 145 { 146 nbd->blksize = blocksize; 147 nbd->bytesize = blocksize * nr_blocks; 148 if (nbd_is_connected(nbd)) 149 nbd_size_update(nbd, bdev); 150 } 151 152 static void nbd_end_request(struct nbd_cmd *cmd) 153 { 154 struct nbd_device *nbd = cmd->nbd; 155 struct request *req = blk_mq_rq_from_pdu(cmd); 156 int error = req->errors ? -EIO : 0; 157 158 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd, 159 error ? "failed" : "done"); 160 161 blk_mq_complete_request(req, error); 162 } 163 164 /* 165 * Forcibly shutdown the socket causing all listeners to error 166 */ 167 static void sock_shutdown(struct nbd_device *nbd) 168 { 169 int i; 170 171 if (nbd->num_connections == 0) 172 return; 173 if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) 174 return; 175 176 for (i = 0; i < nbd->num_connections; i++) { 177 struct nbd_sock *nsock = nbd->socks[i]; 178 mutex_lock(&nsock->tx_lock); 179 kernel_sock_shutdown(nsock->sock, SHUT_RDWR); 180 mutex_unlock(&nsock->tx_lock); 181 } 182 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); 183 } 184 185 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, 186 bool reserved) 187 { 188 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 189 struct nbd_device *nbd = cmd->nbd; 190 191 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); 192 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); 193 req->errors++; 194 195 mutex_lock(&nbd->config_lock); 196 sock_shutdown(nbd); 197 mutex_unlock(&nbd->config_lock); 198 return BLK_EH_HANDLED; 199 } 200 201 /* 202 * Send or receive packet. 203 */ 204 static int sock_xmit(struct nbd_device *nbd, int index, int send, void *buf, 205 int size, int msg_flags) 206 { 207 struct socket *sock = nbd->socks[index]->sock; 208 int result; 209 struct msghdr msg; 210 struct kvec iov; 211 unsigned long pflags = current->flags; 212 213 if (unlikely(!sock)) { 214 dev_err_ratelimited(disk_to_dev(nbd->disk), 215 "Attempted %s on closed socket in sock_xmit\n", 216 (send ? "send" : "recv")); 217 return -EINVAL; 218 } 219 220 current->flags |= PF_MEMALLOC; 221 do { 222 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; 223 iov.iov_base = buf; 224 iov.iov_len = size; 225 msg.msg_name = NULL; 226 msg.msg_namelen = 0; 227 msg.msg_control = NULL; 228 msg.msg_controllen = 0; 229 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 230 231 if (send) 232 result = kernel_sendmsg(sock, &msg, &iov, 1, size); 233 else 234 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 235 msg.msg_flags); 236 237 if (result <= 0) { 238 if (result == 0) 239 result = -EPIPE; /* short read */ 240 break; 241 } 242 size -= result; 243 buf += result; 244 } while (size > 0); 245 246 tsk_restore_flags(current, pflags, PF_MEMALLOC); 247 248 return result; 249 } 250 251 static inline int sock_send_bvec(struct nbd_device *nbd, int index, 252 struct bio_vec *bvec, int flags) 253 { 254 int result; 255 void *kaddr = kmap(bvec->bv_page); 256 result = sock_xmit(nbd, index, 1, kaddr + bvec->bv_offset, 257 bvec->bv_len, flags); 258 kunmap(bvec->bv_page); 259 return result; 260 } 261 262 /* always call with the tx_lock held */ 263 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 264 { 265 struct request *req = blk_mq_rq_from_pdu(cmd); 266 int result; 267 struct nbd_request request; 268 unsigned long size = blk_rq_bytes(req); 269 struct bio *bio; 270 u32 type; 271 u32 tag = blk_mq_unique_tag(req); 272 273 switch (req_op(req)) { 274 case REQ_OP_DISCARD: 275 type = NBD_CMD_TRIM; 276 break; 277 case REQ_OP_FLUSH: 278 type = NBD_CMD_FLUSH; 279 break; 280 case REQ_OP_WRITE: 281 type = NBD_CMD_WRITE; 282 break; 283 case REQ_OP_READ: 284 type = NBD_CMD_READ; 285 break; 286 default: 287 return -EIO; 288 } 289 290 if (rq_data_dir(req) == WRITE && 291 (nbd->flags & NBD_FLAG_READ_ONLY)) { 292 dev_err_ratelimited(disk_to_dev(nbd->disk), 293 "Write on read-only\n"); 294 return -EIO; 295 } 296 297 memset(&request, 0, sizeof(request)); 298 request.magic = htonl(NBD_REQUEST_MAGIC); 299 request.type = htonl(type); 300 if (type != NBD_CMD_FLUSH) { 301 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 302 request.len = htonl(size); 303 } 304 memcpy(request.handle, &tag, sizeof(tag)); 305 306 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 307 cmd, nbdcmd_to_ascii(type), 308 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 309 result = sock_xmit(nbd, index, 1, &request, sizeof(request), 310 (type == NBD_CMD_WRITE) ? MSG_MORE : 0); 311 if (result <= 0) { 312 dev_err_ratelimited(disk_to_dev(nbd->disk), 313 "Send control failed (result %d)\n", result); 314 return -EIO; 315 } 316 317 if (type != NBD_CMD_WRITE) 318 return 0; 319 320 bio = req->bio; 321 while (bio) { 322 struct bio *next = bio->bi_next; 323 struct bvec_iter iter; 324 struct bio_vec bvec; 325 326 bio_for_each_segment(bvec, bio, iter) { 327 bool is_last = !next && bio_iter_last(bvec, iter); 328 int flags = is_last ? 0 : MSG_MORE; 329 330 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 331 cmd, bvec.bv_len); 332 result = sock_send_bvec(nbd, index, &bvec, flags); 333 if (result <= 0) { 334 dev_err(disk_to_dev(nbd->disk), 335 "Send data failed (result %d)\n", 336 result); 337 return -EIO; 338 } 339 /* 340 * The completion might already have come in, 341 * so break for the last one instead of letting 342 * the iterator do it. This prevents use-after-free 343 * of the bio. 344 */ 345 if (is_last) 346 break; 347 } 348 bio = next; 349 } 350 return 0; 351 } 352 353 static inline int sock_recv_bvec(struct nbd_device *nbd, int index, 354 struct bio_vec *bvec) 355 { 356 int result; 357 void *kaddr = kmap(bvec->bv_page); 358 result = sock_xmit(nbd, index, 0, kaddr + bvec->bv_offset, 359 bvec->bv_len, MSG_WAITALL); 360 kunmap(bvec->bv_page); 361 return result; 362 } 363 364 /* NULL returned = something went wrong, inform userspace */ 365 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) 366 { 367 int result; 368 struct nbd_reply reply; 369 struct nbd_cmd *cmd; 370 struct request *req = NULL; 371 u16 hwq; 372 u32 tag; 373 374 reply.magic = 0; 375 result = sock_xmit(nbd, index, 0, &reply, sizeof(reply), MSG_WAITALL); 376 if (result <= 0) { 377 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && 378 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 379 dev_err(disk_to_dev(nbd->disk), 380 "Receive control failed (result %d)\n", result); 381 return ERR_PTR(result); 382 } 383 384 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { 385 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", 386 (unsigned long)ntohl(reply.magic)); 387 return ERR_PTR(-EPROTO); 388 } 389 390 memcpy(&tag, reply.handle, sizeof(u32)); 391 392 hwq = blk_mq_unique_tag_to_hwq(tag); 393 if (hwq < nbd->tag_set.nr_hw_queues) 394 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 395 blk_mq_unique_tag_to_tag(tag)); 396 if (!req || !blk_mq_request_started(req)) { 397 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", 398 tag, req); 399 return ERR_PTR(-ENOENT); 400 } 401 cmd = blk_mq_rq_to_pdu(req); 402 if (ntohl(reply.error)) { 403 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 404 ntohl(reply.error)); 405 req->errors++; 406 return cmd; 407 } 408 409 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); 410 if (rq_data_dir(req) != WRITE) { 411 struct req_iterator iter; 412 struct bio_vec bvec; 413 414 rq_for_each_segment(bvec, req, iter) { 415 result = sock_recv_bvec(nbd, index, &bvec); 416 if (result <= 0) { 417 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 418 result); 419 req->errors++; 420 return cmd; 421 } 422 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 423 cmd, bvec.bv_len); 424 } 425 } else { 426 /* See the comment in nbd_queue_rq. */ 427 wait_for_completion(&cmd->send_complete); 428 } 429 return cmd; 430 } 431 432 static ssize_t pid_show(struct device *dev, 433 struct device_attribute *attr, char *buf) 434 { 435 struct gendisk *disk = dev_to_disk(dev); 436 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 437 438 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); 439 } 440 441 static struct device_attribute pid_attr = { 442 .attr = { .name = "pid", .mode = S_IRUGO}, 443 .show = pid_show, 444 }; 445 446 struct recv_thread_args { 447 struct work_struct work; 448 struct nbd_device *nbd; 449 int index; 450 }; 451 452 static void recv_work(struct work_struct *work) 453 { 454 struct recv_thread_args *args = container_of(work, 455 struct recv_thread_args, 456 work); 457 struct nbd_device *nbd = args->nbd; 458 struct nbd_cmd *cmd; 459 int ret = 0; 460 461 BUG_ON(nbd->magic != NBD_MAGIC); 462 while (1) { 463 cmd = nbd_read_stat(nbd, args->index); 464 if (IS_ERR(cmd)) { 465 ret = PTR_ERR(cmd); 466 break; 467 } 468 469 nbd_end_request(cmd); 470 } 471 472 /* 473 * We got an error, shut everybody down if this wasn't the result of a 474 * disconnect request. 475 */ 476 if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 477 sock_shutdown(nbd); 478 atomic_dec(&nbd->recv_threads); 479 wake_up(&nbd->recv_wq); 480 } 481 482 static void nbd_clear_req(struct request *req, void *data, bool reserved) 483 { 484 struct nbd_cmd *cmd; 485 486 if (!blk_mq_request_started(req)) 487 return; 488 cmd = blk_mq_rq_to_pdu(req); 489 req->errors++; 490 nbd_end_request(cmd); 491 } 492 493 static void nbd_clear_que(struct nbd_device *nbd) 494 { 495 BUG_ON(nbd->magic != NBD_MAGIC); 496 497 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); 498 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); 499 } 500 501 502 static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) 503 { 504 struct request *req = blk_mq_rq_from_pdu(cmd); 505 struct nbd_device *nbd = cmd->nbd; 506 struct nbd_sock *nsock; 507 508 if (index >= nbd->num_connections) { 509 dev_err_ratelimited(disk_to_dev(nbd->disk), 510 "Attempted send on invalid socket\n"); 511 goto error_out; 512 } 513 514 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { 515 dev_err_ratelimited(disk_to_dev(nbd->disk), 516 "Attempted send on closed socket\n"); 517 goto error_out; 518 } 519 520 req->errors = 0; 521 522 nsock = nbd->socks[index]; 523 mutex_lock(&nsock->tx_lock); 524 if (unlikely(!nsock->sock)) { 525 mutex_unlock(&nsock->tx_lock); 526 dev_err_ratelimited(disk_to_dev(nbd->disk), 527 "Attempted send on closed socket\n"); 528 goto error_out; 529 } 530 531 if (nbd_send_cmd(nbd, cmd, index) != 0) { 532 dev_err_ratelimited(disk_to_dev(nbd->disk), 533 "Request send failed\n"); 534 req->errors++; 535 nbd_end_request(cmd); 536 } 537 538 mutex_unlock(&nsock->tx_lock); 539 540 return; 541 542 error_out: 543 req->errors++; 544 nbd_end_request(cmd); 545 } 546 547 static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 548 const struct blk_mq_queue_data *bd) 549 { 550 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 551 552 /* 553 * Since we look at the bio's to send the request over the network we 554 * need to make sure the completion work doesn't mark this request done 555 * before we are done doing our send. This keeps us from dereferencing 556 * freed data if we have particularly fast completions (ie we get the 557 * completion before we exit sock_xmit on the last bvec) or in the case 558 * that the server is misbehaving (or there was an error) before we're 559 * done sending everything over the wire. 560 */ 561 init_completion(&cmd->send_complete); 562 blk_mq_start_request(bd->rq); 563 nbd_handle_cmd(cmd, hctx->queue_num); 564 complete(&cmd->send_complete); 565 566 return BLK_MQ_RQ_QUEUE_OK; 567 } 568 569 static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, 570 unsigned long arg) 571 { 572 struct socket *sock; 573 struct nbd_sock **socks; 574 struct nbd_sock *nsock; 575 int err; 576 577 sock = sockfd_lookup(arg, &err); 578 if (!sock) 579 return err; 580 581 if (!nbd->task_setup) 582 nbd->task_setup = current; 583 if (nbd->task_setup != current) { 584 dev_err(disk_to_dev(nbd->disk), 585 "Device being setup by another task"); 586 return -EINVAL; 587 } 588 589 socks = krealloc(nbd->socks, (nbd->num_connections + 1) * 590 sizeof(struct nbd_sock *), GFP_KERNEL); 591 if (!socks) 592 return -ENOMEM; 593 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); 594 if (!nsock) 595 return -ENOMEM; 596 597 nbd->socks = socks; 598 599 mutex_init(&nsock->tx_lock); 600 nsock->sock = sock; 601 socks[nbd->num_connections++] = nsock; 602 603 if (max_part) 604 bdev->bd_invalidated = 1; 605 return 0; 606 } 607 608 /* Reset all properties of an NBD device */ 609 static void nbd_reset(struct nbd_device *nbd) 610 { 611 nbd->runtime_flags = 0; 612 nbd->blksize = 1024; 613 nbd->bytesize = 0; 614 set_capacity(nbd->disk, 0); 615 nbd->flags = 0; 616 nbd->tag_set.timeout = 0; 617 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 618 } 619 620 static void nbd_bdev_reset(struct block_device *bdev) 621 { 622 set_device_ro(bdev, false); 623 bdev->bd_inode->i_size = 0; 624 if (max_part > 0) { 625 blkdev_reread_part(bdev); 626 bdev->bd_invalidated = 1; 627 } 628 } 629 630 static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev) 631 { 632 if (nbd->flags & NBD_FLAG_READ_ONLY) 633 set_device_ro(bdev, true); 634 if (nbd->flags & NBD_FLAG_SEND_TRIM) 635 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 636 if (nbd->flags & NBD_FLAG_SEND_FLUSH) 637 blk_queue_write_cache(nbd->disk->queue, true, false); 638 else 639 blk_queue_write_cache(nbd->disk->queue, false, false); 640 } 641 642 static void send_disconnects(struct nbd_device *nbd) 643 { 644 struct nbd_request request = {}; 645 int i, ret; 646 647 request.magic = htonl(NBD_REQUEST_MAGIC); 648 request.type = htonl(NBD_CMD_DISC); 649 650 for (i = 0; i < nbd->num_connections; i++) { 651 ret = sock_xmit(nbd, i, 1, &request, sizeof(request), 0); 652 if (ret <= 0) 653 dev_err(disk_to_dev(nbd->disk), 654 "Send disconnect failed %d\n", ret); 655 } 656 } 657 658 static int nbd_disconnect(struct nbd_device *nbd, struct block_device *bdev) 659 { 660 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 661 if (!nbd->socks) 662 return -EINVAL; 663 664 mutex_unlock(&nbd->config_lock); 665 fsync_bdev(bdev); 666 mutex_lock(&nbd->config_lock); 667 668 /* Check again after getting mutex back. */ 669 if (!nbd->socks) 670 return -EINVAL; 671 672 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, 673 &nbd->runtime_flags)) 674 send_disconnects(nbd); 675 return 0; 676 } 677 678 static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev) 679 { 680 sock_shutdown(nbd); 681 nbd_clear_que(nbd); 682 kill_bdev(bdev); 683 nbd_bdev_reset(bdev); 684 /* 685 * We want to give the run thread a chance to wait for everybody 686 * to clean up and then do it's own cleanup. 687 */ 688 if (!test_bit(NBD_RUNNING, &nbd->runtime_flags) && 689 nbd->num_connections) { 690 int i; 691 692 for (i = 0; i < nbd->num_connections; i++) 693 kfree(nbd->socks[i]); 694 kfree(nbd->socks); 695 nbd->socks = NULL; 696 nbd->num_connections = 0; 697 } 698 nbd->task_setup = NULL; 699 700 return 0; 701 } 702 703 static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev) 704 { 705 struct recv_thread_args *args; 706 int num_connections = nbd->num_connections; 707 int error = 0, i; 708 709 if (nbd->task_recv) 710 return -EBUSY; 711 if (!nbd->socks) 712 return -EINVAL; 713 if (num_connections > 1 && 714 !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) { 715 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); 716 error = -EINVAL; 717 goto out_err; 718 } 719 720 set_bit(NBD_RUNNING, &nbd->runtime_flags); 721 blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections); 722 args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL); 723 if (!args) { 724 error = -ENOMEM; 725 goto out_err; 726 } 727 nbd->task_recv = current; 728 mutex_unlock(&nbd->config_lock); 729 730 nbd_parse_flags(nbd, bdev); 731 732 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 733 if (error) { 734 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 735 goto out_recv; 736 } 737 738 nbd_size_update(nbd, bdev); 739 740 nbd_dev_dbg_init(nbd); 741 for (i = 0; i < num_connections; i++) { 742 sk_set_memalloc(nbd->socks[i]->sock->sk); 743 atomic_inc(&nbd->recv_threads); 744 INIT_WORK(&args[i].work, recv_work); 745 args[i].nbd = nbd; 746 args[i].index = i; 747 queue_work(recv_workqueue, &args[i].work); 748 } 749 wait_event_interruptible(nbd->recv_wq, 750 atomic_read(&nbd->recv_threads) == 0); 751 for (i = 0; i < num_connections; i++) 752 flush_work(&args[i].work); 753 nbd_dev_dbg_close(nbd); 754 nbd_size_clear(nbd, bdev); 755 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 756 out_recv: 757 mutex_lock(&nbd->config_lock); 758 nbd->task_recv = NULL; 759 out_err: 760 clear_bit(NBD_RUNNING, &nbd->runtime_flags); 761 nbd_clear_sock(nbd, bdev); 762 763 /* user requested, ignore socket errors */ 764 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 765 error = 0; 766 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags)) 767 error = -ETIMEDOUT; 768 769 nbd_reset(nbd); 770 return error; 771 } 772 773 /* Must be called with config_lock held */ 774 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, 775 unsigned int cmd, unsigned long arg) 776 { 777 switch (cmd) { 778 case NBD_DISCONNECT: 779 return nbd_disconnect(nbd, bdev); 780 case NBD_CLEAR_SOCK: 781 return nbd_clear_sock(nbd, bdev); 782 case NBD_SET_SOCK: 783 return nbd_add_socket(nbd, bdev, arg); 784 case NBD_SET_BLKSIZE: 785 nbd_size_set(nbd, bdev, arg, 786 div_s64(nbd->bytesize, arg)); 787 return 0; 788 case NBD_SET_SIZE: 789 nbd_size_set(nbd, bdev, nbd->blksize, 790 div_s64(arg, nbd->blksize)); 791 return 0; 792 case NBD_SET_SIZE_BLOCKS: 793 nbd_size_set(nbd, bdev, nbd->blksize, arg); 794 return 0; 795 case NBD_SET_TIMEOUT: 796 nbd->tag_set.timeout = arg * HZ; 797 return 0; 798 799 case NBD_SET_FLAGS: 800 nbd->flags = arg; 801 return 0; 802 case NBD_DO_IT: 803 return nbd_start_device(nbd, bdev); 804 case NBD_CLEAR_QUE: 805 /* 806 * This is for compatibility only. The queue is always cleared 807 * by NBD_DO_IT or NBD_CLEAR_SOCK. 808 */ 809 return 0; 810 case NBD_PRINT_DEBUG: 811 /* 812 * For compatibility only, we no longer keep a list of 813 * outstanding requests. 814 */ 815 return 0; 816 } 817 return -ENOTTY; 818 } 819 820 static int nbd_ioctl(struct block_device *bdev, fmode_t mode, 821 unsigned int cmd, unsigned long arg) 822 { 823 struct nbd_device *nbd = bdev->bd_disk->private_data; 824 int error; 825 826 if (!capable(CAP_SYS_ADMIN)) 827 return -EPERM; 828 829 BUG_ON(nbd->magic != NBD_MAGIC); 830 831 mutex_lock(&nbd->config_lock); 832 error = __nbd_ioctl(bdev, nbd, cmd, arg); 833 mutex_unlock(&nbd->config_lock); 834 835 return error; 836 } 837 838 static const struct block_device_operations nbd_fops = 839 { 840 .owner = THIS_MODULE, 841 .ioctl = nbd_ioctl, 842 .compat_ioctl = nbd_ioctl, 843 }; 844 845 #if IS_ENABLED(CONFIG_DEBUG_FS) 846 847 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) 848 { 849 struct nbd_device *nbd = s->private; 850 851 if (nbd->task_recv) 852 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); 853 854 return 0; 855 } 856 857 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) 858 { 859 return single_open(file, nbd_dbg_tasks_show, inode->i_private); 860 } 861 862 static const struct file_operations nbd_dbg_tasks_ops = { 863 .open = nbd_dbg_tasks_open, 864 .read = seq_read, 865 .llseek = seq_lseek, 866 .release = single_release, 867 }; 868 869 static int nbd_dbg_flags_show(struct seq_file *s, void *unused) 870 { 871 struct nbd_device *nbd = s->private; 872 u32 flags = nbd->flags; 873 874 seq_printf(s, "Hex: 0x%08x\n\n", flags); 875 876 seq_puts(s, "Known flags:\n"); 877 878 if (flags & NBD_FLAG_HAS_FLAGS) 879 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); 880 if (flags & NBD_FLAG_READ_ONLY) 881 seq_puts(s, "NBD_FLAG_READ_ONLY\n"); 882 if (flags & NBD_FLAG_SEND_FLUSH) 883 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); 884 if (flags & NBD_FLAG_SEND_TRIM) 885 seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); 886 887 return 0; 888 } 889 890 static int nbd_dbg_flags_open(struct inode *inode, struct file *file) 891 { 892 return single_open(file, nbd_dbg_flags_show, inode->i_private); 893 } 894 895 static const struct file_operations nbd_dbg_flags_ops = { 896 .open = nbd_dbg_flags_open, 897 .read = seq_read, 898 .llseek = seq_lseek, 899 .release = single_release, 900 }; 901 902 static int nbd_dev_dbg_init(struct nbd_device *nbd) 903 { 904 struct dentry *dir; 905 906 if (!nbd_dbg_dir) 907 return -EIO; 908 909 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); 910 if (!dir) { 911 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", 912 nbd_name(nbd)); 913 return -EIO; 914 } 915 nbd->dbg_dir = dir; 916 917 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); 918 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); 919 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 920 debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize); 921 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); 922 923 return 0; 924 } 925 926 static void nbd_dev_dbg_close(struct nbd_device *nbd) 927 { 928 debugfs_remove_recursive(nbd->dbg_dir); 929 } 930 931 static int nbd_dbg_init(void) 932 { 933 struct dentry *dbg_dir; 934 935 dbg_dir = debugfs_create_dir("nbd", NULL); 936 if (!dbg_dir) 937 return -EIO; 938 939 nbd_dbg_dir = dbg_dir; 940 941 return 0; 942 } 943 944 static void nbd_dbg_close(void) 945 { 946 debugfs_remove_recursive(nbd_dbg_dir); 947 } 948 949 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ 950 951 static int nbd_dev_dbg_init(struct nbd_device *nbd) 952 { 953 return 0; 954 } 955 956 static void nbd_dev_dbg_close(struct nbd_device *nbd) 957 { 958 } 959 960 static int nbd_dbg_init(void) 961 { 962 return 0; 963 } 964 965 static void nbd_dbg_close(void) 966 { 967 } 968 969 #endif 970 971 static int nbd_init_request(void *data, struct request *rq, 972 unsigned int hctx_idx, unsigned int request_idx, 973 unsigned int numa_node) 974 { 975 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 976 cmd->nbd = data; 977 return 0; 978 } 979 980 static struct blk_mq_ops nbd_mq_ops = { 981 .queue_rq = nbd_queue_rq, 982 .init_request = nbd_init_request, 983 .timeout = nbd_xmit_timeout, 984 }; 985 986 static void nbd_dev_remove(struct nbd_device *nbd) 987 { 988 struct gendisk *disk = nbd->disk; 989 nbd->magic = 0; 990 if (disk) { 991 del_gendisk(disk); 992 blk_cleanup_queue(disk->queue); 993 blk_mq_free_tag_set(&nbd->tag_set); 994 put_disk(disk); 995 } 996 kfree(nbd); 997 } 998 999 static int nbd_dev_add(int index) 1000 { 1001 struct nbd_device *nbd; 1002 struct gendisk *disk; 1003 struct request_queue *q; 1004 int err = -ENOMEM; 1005 1006 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); 1007 if (!nbd) 1008 goto out; 1009 1010 disk = alloc_disk(1 << part_shift); 1011 if (!disk) 1012 goto out_free_nbd; 1013 1014 if (index >= 0) { 1015 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, 1016 GFP_KERNEL); 1017 if (err == -ENOSPC) 1018 err = -EEXIST; 1019 } else { 1020 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); 1021 if (err >= 0) 1022 index = err; 1023 } 1024 if (err < 0) 1025 goto out_free_disk; 1026 1027 nbd->disk = disk; 1028 nbd->tag_set.ops = &nbd_mq_ops; 1029 nbd->tag_set.nr_hw_queues = 1; 1030 nbd->tag_set.queue_depth = 128; 1031 nbd->tag_set.numa_node = NUMA_NO_NODE; 1032 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); 1033 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 1034 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; 1035 nbd->tag_set.driver_data = nbd; 1036 1037 err = blk_mq_alloc_tag_set(&nbd->tag_set); 1038 if (err) 1039 goto out_free_idr; 1040 1041 q = blk_mq_init_queue(&nbd->tag_set); 1042 if (IS_ERR(q)) { 1043 err = PTR_ERR(q); 1044 goto out_free_tags; 1045 } 1046 disk->queue = q; 1047 1048 /* 1049 * Tell the block layer that we are not a rotational device 1050 */ 1051 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); 1052 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); 1053 disk->queue->limits.discard_granularity = 512; 1054 blk_queue_max_discard_sectors(disk->queue, UINT_MAX); 1055 disk->queue->limits.discard_zeroes_data = 0; 1056 blk_queue_max_hw_sectors(disk->queue, 65536); 1057 disk->queue->limits.max_sectors = 256; 1058 1059 nbd->magic = NBD_MAGIC; 1060 mutex_init(&nbd->config_lock); 1061 disk->major = NBD_MAJOR; 1062 disk->first_minor = index << part_shift; 1063 disk->fops = &nbd_fops; 1064 disk->private_data = nbd; 1065 sprintf(disk->disk_name, "nbd%d", index); 1066 init_waitqueue_head(&nbd->recv_wq); 1067 nbd_reset(nbd); 1068 add_disk(disk); 1069 return index; 1070 1071 out_free_tags: 1072 blk_mq_free_tag_set(&nbd->tag_set); 1073 out_free_idr: 1074 idr_remove(&nbd_index_idr, index); 1075 out_free_disk: 1076 put_disk(disk); 1077 out_free_nbd: 1078 kfree(nbd); 1079 out: 1080 return err; 1081 } 1082 1083 /* 1084 * And here should be modules and kernel interface 1085 * (Just smiley confuses emacs :-) 1086 */ 1087 1088 static int __init nbd_init(void) 1089 { 1090 int i; 1091 1092 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 1093 1094 if (max_part < 0) { 1095 printk(KERN_ERR "nbd: max_part must be >= 0\n"); 1096 return -EINVAL; 1097 } 1098 1099 part_shift = 0; 1100 if (max_part > 0) { 1101 part_shift = fls(max_part); 1102 1103 /* 1104 * Adjust max_part according to part_shift as it is exported 1105 * to user space so that user can know the max number of 1106 * partition kernel should be able to manage. 1107 * 1108 * Note that -1 is required because partition 0 is reserved 1109 * for the whole disk. 1110 */ 1111 max_part = (1UL << part_shift) - 1; 1112 } 1113 1114 if ((1UL << part_shift) > DISK_MAX_PARTS) 1115 return -EINVAL; 1116 1117 if (nbds_max > 1UL << (MINORBITS - part_shift)) 1118 return -EINVAL; 1119 recv_workqueue = alloc_workqueue("knbd-recv", 1120 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1121 if (!recv_workqueue) 1122 return -ENOMEM; 1123 1124 if (register_blkdev(NBD_MAJOR, "nbd")) { 1125 destroy_workqueue(recv_workqueue); 1126 return -EIO; 1127 } 1128 1129 nbd_dbg_init(); 1130 1131 mutex_lock(&nbd_index_mutex); 1132 for (i = 0; i < nbds_max; i++) 1133 nbd_dev_add(i); 1134 mutex_unlock(&nbd_index_mutex); 1135 return 0; 1136 } 1137 1138 static int nbd_exit_cb(int id, void *ptr, void *data) 1139 { 1140 struct nbd_device *nbd = ptr; 1141 nbd_dev_remove(nbd); 1142 return 0; 1143 } 1144 1145 static void __exit nbd_cleanup(void) 1146 { 1147 nbd_dbg_close(); 1148 1149 idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL); 1150 idr_destroy(&nbd_index_idr); 1151 destroy_workqueue(recv_workqueue); 1152 unregister_blkdev(NBD_MAJOR, "nbd"); 1153 } 1154 1155 module_init(nbd_init); 1156 module_exit(nbd_cleanup); 1157 1158 MODULE_DESCRIPTION("Network Block Device"); 1159 MODULE_LICENSE("GPL"); 1160 1161 module_param(nbds_max, int, 0444); 1162 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 1163 module_param(max_part, int, 0444); 1164 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); 1165