1 /* 2 * Network block device - make block devices work over TCP 3 * 4 * Note that you can not swap over this thing, yet. Seems to work but 5 * deadlocks sometimes - you can not swap over TCP in general. 6 * 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 9 * 10 * This file is released under GPLv2 or later. 11 * 12 * (part of code stolen from loop.c) 13 */ 14 15 #include <linux/major.h> 16 17 #include <linux/blkdev.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/sched.h> 21 #include <linux/fs.h> 22 #include <linux/bio.h> 23 #include <linux/stat.h> 24 #include <linux/errno.h> 25 #include <linux/file.h> 26 #include <linux/ioctl.h> 27 #include <linux/mutex.h> 28 #include <linux/compiler.h> 29 #include <linux/err.h> 30 #include <linux/kernel.h> 31 #include <linux/slab.h> 32 #include <net/sock.h> 33 #include <linux/net.h> 34 #include <linux/kthread.h> 35 #include <linux/types.h> 36 #include <linux/debugfs.h> 37 #include <linux/blk-mq.h> 38 39 #include <linux/uaccess.h> 40 #include <asm/types.h> 41 42 #include <linux/nbd.h> 43 44 static DEFINE_IDR(nbd_index_idr); 45 static DEFINE_MUTEX(nbd_index_mutex); 46 47 struct nbd_sock { 48 struct socket *sock; 49 struct mutex tx_lock; 50 }; 51 52 #define NBD_TIMEDOUT 0 53 #define NBD_DISCONNECT_REQUESTED 1 54 #define NBD_DISCONNECTED 2 55 #define NBD_RUNNING 3 56 57 struct nbd_device { 58 u32 flags; 59 unsigned long runtime_flags; 60 struct nbd_sock **socks; 61 int magic; 62 63 struct blk_mq_tag_set tag_set; 64 65 struct mutex config_lock; 66 struct gendisk *disk; 67 int num_connections; 68 atomic_t recv_threads; 69 wait_queue_head_t recv_wq; 70 loff_t blksize; 71 loff_t bytesize; 72 73 struct task_struct *task_recv; 74 struct task_struct *task_setup; 75 76 #if IS_ENABLED(CONFIG_DEBUG_FS) 77 struct dentry *dbg_dir; 78 #endif 79 }; 80 81 struct nbd_cmd { 82 struct nbd_device *nbd; 83 struct completion send_complete; 84 }; 85 86 #if IS_ENABLED(CONFIG_DEBUG_FS) 87 static struct dentry *nbd_dbg_dir; 88 #endif 89 90 #define nbd_name(nbd) ((nbd)->disk->disk_name) 91 92 #define NBD_MAGIC 0x68797548 93 94 static unsigned int nbds_max = 16; 95 static int max_part; 96 static struct workqueue_struct *recv_workqueue; 97 static int part_shift; 98 99 static int nbd_dev_dbg_init(struct nbd_device *nbd); 100 static void nbd_dev_dbg_close(struct nbd_device *nbd); 101 102 103 static inline struct device *nbd_to_dev(struct nbd_device *nbd) 104 { 105 return disk_to_dev(nbd->disk); 106 } 107 108 static bool nbd_is_connected(struct nbd_device *nbd) 109 { 110 return !!nbd->task_recv; 111 } 112 113 static const char *nbdcmd_to_ascii(int cmd) 114 { 115 switch (cmd) { 116 case NBD_CMD_READ: return "read"; 117 case NBD_CMD_WRITE: return "write"; 118 case NBD_CMD_DISC: return "disconnect"; 119 case NBD_CMD_FLUSH: return "flush"; 120 case NBD_CMD_TRIM: return "trim/discard"; 121 } 122 return "invalid"; 123 } 124 125 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev) 126 { 127 bd_set_size(bdev, 0); 128 set_capacity(nbd->disk, 0); 129 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 130 131 return 0; 132 } 133 134 static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev) 135 { 136 blk_queue_logical_block_size(nbd->disk->queue, nbd->blksize); 137 blk_queue_physical_block_size(nbd->disk->queue, nbd->blksize); 138 bd_set_size(bdev, nbd->bytesize); 139 set_capacity(nbd->disk, nbd->bytesize >> 9); 140 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); 141 } 142 143 static void nbd_size_set(struct nbd_device *nbd, struct block_device *bdev, 144 loff_t blocksize, loff_t nr_blocks) 145 { 146 nbd->blksize = blocksize; 147 nbd->bytesize = blocksize * nr_blocks; 148 if (nbd_is_connected(nbd)) 149 nbd_size_update(nbd, bdev); 150 } 151 152 static void nbd_end_request(struct nbd_cmd *cmd) 153 { 154 struct nbd_device *nbd = cmd->nbd; 155 struct request *req = blk_mq_rq_from_pdu(cmd); 156 int error = req->errors ? -EIO : 0; 157 158 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd, 159 error ? "failed" : "done"); 160 161 blk_mq_complete_request(req, error); 162 } 163 164 /* 165 * Forcibly shutdown the socket causing all listeners to error 166 */ 167 static void sock_shutdown(struct nbd_device *nbd) 168 { 169 int i; 170 171 if (nbd->num_connections == 0) 172 return; 173 if (test_and_set_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) 174 return; 175 176 for (i = 0; i < nbd->num_connections; i++) { 177 struct nbd_sock *nsock = nbd->socks[i]; 178 mutex_lock(&nsock->tx_lock); 179 kernel_sock_shutdown(nsock->sock, SHUT_RDWR); 180 mutex_unlock(&nsock->tx_lock); 181 } 182 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n"); 183 } 184 185 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, 186 bool reserved) 187 { 188 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); 189 struct nbd_device *nbd = cmd->nbd; 190 191 dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); 192 set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); 193 req->errors++; 194 195 mutex_lock(&nbd->config_lock); 196 sock_shutdown(nbd); 197 mutex_unlock(&nbd->config_lock); 198 return BLK_EH_HANDLED; 199 } 200 201 /* 202 * Send or receive packet. 203 */ 204 static int sock_xmit(struct nbd_device *nbd, int index, int send, 205 struct iov_iter *iter, int msg_flags) 206 { 207 struct socket *sock = nbd->socks[index]->sock; 208 int result; 209 struct msghdr msg; 210 unsigned long pflags = current->flags; 211 212 if (unlikely(!sock)) { 213 dev_err_ratelimited(disk_to_dev(nbd->disk), 214 "Attempted %s on closed socket in sock_xmit\n", 215 (send ? "send" : "recv")); 216 return -EINVAL; 217 } 218 219 msg.msg_iter = *iter; 220 221 current->flags |= PF_MEMALLOC; 222 do { 223 sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC; 224 msg.msg_name = NULL; 225 msg.msg_namelen = 0; 226 msg.msg_control = NULL; 227 msg.msg_controllen = 0; 228 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 229 230 if (send) 231 result = sock_sendmsg(sock, &msg); 232 else 233 result = sock_recvmsg(sock, &msg, msg.msg_flags); 234 235 if (result <= 0) { 236 if (result == 0) 237 result = -EPIPE; /* short read */ 238 break; 239 } 240 } while (msg_data_left(&msg)); 241 242 tsk_restore_flags(current, pflags, PF_MEMALLOC); 243 244 return result; 245 } 246 247 /* always call with the tx_lock held */ 248 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) 249 { 250 struct request *req = blk_mq_rq_from_pdu(cmd); 251 int result; 252 struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)}; 253 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 254 struct iov_iter from; 255 unsigned long size = blk_rq_bytes(req); 256 struct bio *bio; 257 u32 type; 258 u32 tag = blk_mq_unique_tag(req); 259 260 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 261 262 switch (req_op(req)) { 263 case REQ_OP_DISCARD: 264 type = NBD_CMD_TRIM; 265 break; 266 case REQ_OP_FLUSH: 267 type = NBD_CMD_FLUSH; 268 break; 269 case REQ_OP_WRITE: 270 type = NBD_CMD_WRITE; 271 break; 272 case REQ_OP_READ: 273 type = NBD_CMD_READ; 274 break; 275 default: 276 return -EIO; 277 } 278 279 if (rq_data_dir(req) == WRITE && 280 (nbd->flags & NBD_FLAG_READ_ONLY)) { 281 dev_err_ratelimited(disk_to_dev(nbd->disk), 282 "Write on read-only\n"); 283 return -EIO; 284 } 285 286 request.type = htonl(type); 287 if (type != NBD_CMD_FLUSH) { 288 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 289 request.len = htonl(size); 290 } 291 memcpy(request.handle, &tag, sizeof(tag)); 292 293 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 294 cmd, nbdcmd_to_ascii(type), 295 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); 296 result = sock_xmit(nbd, index, 1, &from, 297 (type == NBD_CMD_WRITE) ? MSG_MORE : 0); 298 if (result <= 0) { 299 dev_err_ratelimited(disk_to_dev(nbd->disk), 300 "Send control failed (result %d)\n", result); 301 return -EIO; 302 } 303 304 if (type != NBD_CMD_WRITE) 305 return 0; 306 307 bio = req->bio; 308 while (bio) { 309 struct bio *next = bio->bi_next; 310 struct bvec_iter iter; 311 struct bio_vec bvec; 312 313 bio_for_each_segment(bvec, bio, iter) { 314 bool is_last = !next && bio_iter_last(bvec, iter); 315 int flags = is_last ? 0 : MSG_MORE; 316 317 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", 318 cmd, bvec.bv_len); 319 iov_iter_bvec(&from, ITER_BVEC | WRITE, 320 &bvec, 1, bvec.bv_len); 321 result = sock_xmit(nbd, index, 1, &from, flags); 322 if (result <= 0) { 323 dev_err(disk_to_dev(nbd->disk), 324 "Send data failed (result %d)\n", 325 result); 326 return -EIO; 327 } 328 /* 329 * The completion might already have come in, 330 * so break for the last one instead of letting 331 * the iterator do it. This prevents use-after-free 332 * of the bio. 333 */ 334 if (is_last) 335 break; 336 } 337 bio = next; 338 } 339 return 0; 340 } 341 342 /* NULL returned = something went wrong, inform userspace */ 343 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index) 344 { 345 int result; 346 struct nbd_reply reply; 347 struct nbd_cmd *cmd; 348 struct request *req = NULL; 349 u16 hwq; 350 u32 tag; 351 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; 352 struct iov_iter to; 353 354 reply.magic = 0; 355 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 356 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 357 if (result <= 0) { 358 if (!test_bit(NBD_DISCONNECTED, &nbd->runtime_flags) && 359 !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 360 dev_err(disk_to_dev(nbd->disk), 361 "Receive control failed (result %d)\n", result); 362 return ERR_PTR(result); 363 } 364 365 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { 366 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", 367 (unsigned long)ntohl(reply.magic)); 368 return ERR_PTR(-EPROTO); 369 } 370 371 memcpy(&tag, reply.handle, sizeof(u32)); 372 373 hwq = blk_mq_unique_tag_to_hwq(tag); 374 if (hwq < nbd->tag_set.nr_hw_queues) 375 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 376 blk_mq_unique_tag_to_tag(tag)); 377 if (!req || !blk_mq_request_started(req)) { 378 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", 379 tag, req); 380 return ERR_PTR(-ENOENT); 381 } 382 cmd = blk_mq_rq_to_pdu(req); 383 if (ntohl(reply.error)) { 384 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 385 ntohl(reply.error)); 386 req->errors++; 387 return cmd; 388 } 389 390 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); 391 if (rq_data_dir(req) != WRITE) { 392 struct req_iterator iter; 393 struct bio_vec bvec; 394 395 rq_for_each_segment(bvec, req, iter) { 396 iov_iter_bvec(&to, ITER_BVEC | READ, 397 &bvec, 1, bvec.bv_len); 398 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL); 399 if (result <= 0) { 400 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 401 result); 402 req->errors++; 403 return cmd; 404 } 405 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 406 cmd, bvec.bv_len); 407 } 408 } else { 409 /* See the comment in nbd_queue_rq. */ 410 wait_for_completion(&cmd->send_complete); 411 } 412 return cmd; 413 } 414 415 static ssize_t pid_show(struct device *dev, 416 struct device_attribute *attr, char *buf) 417 { 418 struct gendisk *disk = dev_to_disk(dev); 419 struct nbd_device *nbd = (struct nbd_device *)disk->private_data; 420 421 return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv)); 422 } 423 424 static struct device_attribute pid_attr = { 425 .attr = { .name = "pid", .mode = S_IRUGO}, 426 .show = pid_show, 427 }; 428 429 struct recv_thread_args { 430 struct work_struct work; 431 struct nbd_device *nbd; 432 int index; 433 }; 434 435 static void recv_work(struct work_struct *work) 436 { 437 struct recv_thread_args *args = container_of(work, 438 struct recv_thread_args, 439 work); 440 struct nbd_device *nbd = args->nbd; 441 struct nbd_cmd *cmd; 442 int ret = 0; 443 444 BUG_ON(nbd->magic != NBD_MAGIC); 445 while (1) { 446 cmd = nbd_read_stat(nbd, args->index); 447 if (IS_ERR(cmd)) { 448 ret = PTR_ERR(cmd); 449 break; 450 } 451 452 nbd_end_request(cmd); 453 } 454 455 /* 456 * We got an error, shut everybody down if this wasn't the result of a 457 * disconnect request. 458 */ 459 if (ret && !test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 460 sock_shutdown(nbd); 461 atomic_dec(&nbd->recv_threads); 462 wake_up(&nbd->recv_wq); 463 } 464 465 static void nbd_clear_req(struct request *req, void *data, bool reserved) 466 { 467 struct nbd_cmd *cmd; 468 469 if (!blk_mq_request_started(req)) 470 return; 471 cmd = blk_mq_rq_to_pdu(req); 472 req->errors++; 473 nbd_end_request(cmd); 474 } 475 476 static void nbd_clear_que(struct nbd_device *nbd) 477 { 478 BUG_ON(nbd->magic != NBD_MAGIC); 479 480 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); 481 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); 482 } 483 484 485 static void nbd_handle_cmd(struct nbd_cmd *cmd, int index) 486 { 487 struct request *req = blk_mq_rq_from_pdu(cmd); 488 struct nbd_device *nbd = cmd->nbd; 489 struct nbd_sock *nsock; 490 491 if (index >= nbd->num_connections) { 492 dev_err_ratelimited(disk_to_dev(nbd->disk), 493 "Attempted send on invalid socket\n"); 494 goto error_out; 495 } 496 497 if (test_bit(NBD_DISCONNECTED, &nbd->runtime_flags)) { 498 dev_err_ratelimited(disk_to_dev(nbd->disk), 499 "Attempted send on closed socket\n"); 500 goto error_out; 501 } 502 503 req->errors = 0; 504 505 nsock = nbd->socks[index]; 506 mutex_lock(&nsock->tx_lock); 507 if (unlikely(!nsock->sock)) { 508 mutex_unlock(&nsock->tx_lock); 509 dev_err_ratelimited(disk_to_dev(nbd->disk), 510 "Attempted send on closed socket\n"); 511 goto error_out; 512 } 513 514 if (nbd_send_cmd(nbd, cmd, index) != 0) { 515 dev_err_ratelimited(disk_to_dev(nbd->disk), 516 "Request send failed\n"); 517 req->errors++; 518 nbd_end_request(cmd); 519 } 520 521 mutex_unlock(&nsock->tx_lock); 522 523 return; 524 525 error_out: 526 req->errors++; 527 nbd_end_request(cmd); 528 } 529 530 static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 531 const struct blk_mq_queue_data *bd) 532 { 533 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 534 535 /* 536 * Since we look at the bio's to send the request over the network we 537 * need to make sure the completion work doesn't mark this request done 538 * before we are done doing our send. This keeps us from dereferencing 539 * freed data if we have particularly fast completions (ie we get the 540 * completion before we exit sock_xmit on the last bvec) or in the case 541 * that the server is misbehaving (or there was an error) before we're 542 * done sending everything over the wire. 543 */ 544 init_completion(&cmd->send_complete); 545 blk_mq_start_request(bd->rq); 546 nbd_handle_cmd(cmd, hctx->queue_num); 547 complete(&cmd->send_complete); 548 549 return BLK_MQ_RQ_QUEUE_OK; 550 } 551 552 static int nbd_add_socket(struct nbd_device *nbd, struct block_device *bdev, 553 unsigned long arg) 554 { 555 struct socket *sock; 556 struct nbd_sock **socks; 557 struct nbd_sock *nsock; 558 int err; 559 560 sock = sockfd_lookup(arg, &err); 561 if (!sock) 562 return err; 563 564 if (!nbd->task_setup) 565 nbd->task_setup = current; 566 if (nbd->task_setup != current) { 567 dev_err(disk_to_dev(nbd->disk), 568 "Device being setup by another task"); 569 return -EINVAL; 570 } 571 572 socks = krealloc(nbd->socks, (nbd->num_connections + 1) * 573 sizeof(struct nbd_sock *), GFP_KERNEL); 574 if (!socks) 575 return -ENOMEM; 576 nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL); 577 if (!nsock) 578 return -ENOMEM; 579 580 nbd->socks = socks; 581 582 mutex_init(&nsock->tx_lock); 583 nsock->sock = sock; 584 socks[nbd->num_connections++] = nsock; 585 586 if (max_part) 587 bdev->bd_invalidated = 1; 588 return 0; 589 } 590 591 /* Reset all properties of an NBD device */ 592 static void nbd_reset(struct nbd_device *nbd) 593 { 594 nbd->runtime_flags = 0; 595 nbd->blksize = 1024; 596 nbd->bytesize = 0; 597 set_capacity(nbd->disk, 0); 598 nbd->flags = 0; 599 nbd->tag_set.timeout = 0; 600 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 601 } 602 603 static void nbd_bdev_reset(struct block_device *bdev) 604 { 605 set_device_ro(bdev, false); 606 bdev->bd_inode->i_size = 0; 607 if (max_part > 0) { 608 blkdev_reread_part(bdev); 609 bdev->bd_invalidated = 1; 610 } 611 } 612 613 static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev) 614 { 615 if (nbd->flags & NBD_FLAG_READ_ONLY) 616 set_device_ro(bdev, true); 617 if (nbd->flags & NBD_FLAG_SEND_TRIM) 618 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); 619 if (nbd->flags & NBD_FLAG_SEND_FLUSH) 620 blk_queue_write_cache(nbd->disk->queue, true, false); 621 else 622 blk_queue_write_cache(nbd->disk->queue, false, false); 623 } 624 625 static void send_disconnects(struct nbd_device *nbd) 626 { 627 struct nbd_request request = { 628 .magic = htonl(NBD_REQUEST_MAGIC), 629 .type = htonl(NBD_CMD_DISC), 630 }; 631 struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)}; 632 struct iov_iter from; 633 int i, ret; 634 635 for (i = 0; i < nbd->num_connections; i++) { 636 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 637 ret = sock_xmit(nbd, i, 1, &from, 0); 638 if (ret <= 0) 639 dev_err(disk_to_dev(nbd->disk), 640 "Send disconnect failed %d\n", ret); 641 } 642 } 643 644 static int nbd_disconnect(struct nbd_device *nbd, struct block_device *bdev) 645 { 646 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); 647 if (!nbd->socks) 648 return -EINVAL; 649 650 mutex_unlock(&nbd->config_lock); 651 fsync_bdev(bdev); 652 mutex_lock(&nbd->config_lock); 653 654 /* Check again after getting mutex back. */ 655 if (!nbd->socks) 656 return -EINVAL; 657 658 if (!test_and_set_bit(NBD_DISCONNECT_REQUESTED, 659 &nbd->runtime_flags)) 660 send_disconnects(nbd); 661 return 0; 662 } 663 664 static int nbd_clear_sock(struct nbd_device *nbd, struct block_device *bdev) 665 { 666 sock_shutdown(nbd); 667 nbd_clear_que(nbd); 668 kill_bdev(bdev); 669 nbd_bdev_reset(bdev); 670 /* 671 * We want to give the run thread a chance to wait for everybody 672 * to clean up and then do it's own cleanup. 673 */ 674 if (!test_bit(NBD_RUNNING, &nbd->runtime_flags) && 675 nbd->num_connections) { 676 int i; 677 678 for (i = 0; i < nbd->num_connections; i++) { 679 sockfd_put(nbd->socks[i]->sock); 680 kfree(nbd->socks[i]); 681 } 682 kfree(nbd->socks); 683 nbd->socks = NULL; 684 nbd->num_connections = 0; 685 } 686 nbd->task_setup = NULL; 687 688 return 0; 689 } 690 691 static int nbd_start_device(struct nbd_device *nbd, struct block_device *bdev) 692 { 693 struct recv_thread_args *args; 694 int num_connections = nbd->num_connections; 695 int error = 0, i; 696 697 if (nbd->task_recv) 698 return -EBUSY; 699 if (!nbd->socks) 700 return -EINVAL; 701 if (num_connections > 1 && 702 !(nbd->flags & NBD_FLAG_CAN_MULTI_CONN)) { 703 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n"); 704 error = -EINVAL; 705 goto out_err; 706 } 707 708 set_bit(NBD_RUNNING, &nbd->runtime_flags); 709 blk_mq_update_nr_hw_queues(&nbd->tag_set, nbd->num_connections); 710 args = kcalloc(num_connections, sizeof(*args), GFP_KERNEL); 711 if (!args) { 712 error = -ENOMEM; 713 goto out_err; 714 } 715 nbd->task_recv = current; 716 mutex_unlock(&nbd->config_lock); 717 718 nbd_parse_flags(nbd, bdev); 719 720 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr); 721 if (error) { 722 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); 723 goto out_recv; 724 } 725 726 nbd_size_update(nbd, bdev); 727 728 nbd_dev_dbg_init(nbd); 729 for (i = 0; i < num_connections; i++) { 730 sk_set_memalloc(nbd->socks[i]->sock->sk); 731 atomic_inc(&nbd->recv_threads); 732 INIT_WORK(&args[i].work, recv_work); 733 args[i].nbd = nbd; 734 args[i].index = i; 735 queue_work(recv_workqueue, &args[i].work); 736 } 737 wait_event_interruptible(nbd->recv_wq, 738 atomic_read(&nbd->recv_threads) == 0); 739 for (i = 0; i < num_connections; i++) 740 flush_work(&args[i].work); 741 nbd_dev_dbg_close(nbd); 742 nbd_size_clear(nbd, bdev); 743 device_remove_file(disk_to_dev(nbd->disk), &pid_attr); 744 out_recv: 745 mutex_lock(&nbd->config_lock); 746 nbd->task_recv = NULL; 747 out_err: 748 clear_bit(NBD_RUNNING, &nbd->runtime_flags); 749 nbd_clear_sock(nbd, bdev); 750 751 /* user requested, ignore socket errors */ 752 if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) 753 error = 0; 754 if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags)) 755 error = -ETIMEDOUT; 756 757 nbd_reset(nbd); 758 return error; 759 } 760 761 /* Must be called with config_lock held */ 762 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, 763 unsigned int cmd, unsigned long arg) 764 { 765 switch (cmd) { 766 case NBD_DISCONNECT: 767 return nbd_disconnect(nbd, bdev); 768 case NBD_CLEAR_SOCK: 769 return nbd_clear_sock(nbd, bdev); 770 case NBD_SET_SOCK: 771 return nbd_add_socket(nbd, bdev, arg); 772 case NBD_SET_BLKSIZE: 773 nbd_size_set(nbd, bdev, arg, 774 div_s64(nbd->bytesize, arg)); 775 return 0; 776 case NBD_SET_SIZE: 777 nbd_size_set(nbd, bdev, nbd->blksize, 778 div_s64(arg, nbd->blksize)); 779 return 0; 780 case NBD_SET_SIZE_BLOCKS: 781 nbd_size_set(nbd, bdev, nbd->blksize, arg); 782 return 0; 783 case NBD_SET_TIMEOUT: 784 nbd->tag_set.timeout = arg * HZ; 785 return 0; 786 787 case NBD_SET_FLAGS: 788 nbd->flags = arg; 789 return 0; 790 case NBD_DO_IT: 791 return nbd_start_device(nbd, bdev); 792 case NBD_CLEAR_QUE: 793 /* 794 * This is for compatibility only. The queue is always cleared 795 * by NBD_DO_IT or NBD_CLEAR_SOCK. 796 */ 797 return 0; 798 case NBD_PRINT_DEBUG: 799 /* 800 * For compatibility only, we no longer keep a list of 801 * outstanding requests. 802 */ 803 return 0; 804 } 805 return -ENOTTY; 806 } 807 808 static int nbd_ioctl(struct block_device *bdev, fmode_t mode, 809 unsigned int cmd, unsigned long arg) 810 { 811 struct nbd_device *nbd = bdev->bd_disk->private_data; 812 int error; 813 814 if (!capable(CAP_SYS_ADMIN)) 815 return -EPERM; 816 817 BUG_ON(nbd->magic != NBD_MAGIC); 818 819 mutex_lock(&nbd->config_lock); 820 error = __nbd_ioctl(bdev, nbd, cmd, arg); 821 mutex_unlock(&nbd->config_lock); 822 823 return error; 824 } 825 826 static const struct block_device_operations nbd_fops = 827 { 828 .owner = THIS_MODULE, 829 .ioctl = nbd_ioctl, 830 .compat_ioctl = nbd_ioctl, 831 }; 832 833 #if IS_ENABLED(CONFIG_DEBUG_FS) 834 835 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused) 836 { 837 struct nbd_device *nbd = s->private; 838 839 if (nbd->task_recv) 840 seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv)); 841 842 return 0; 843 } 844 845 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file) 846 { 847 return single_open(file, nbd_dbg_tasks_show, inode->i_private); 848 } 849 850 static const struct file_operations nbd_dbg_tasks_ops = { 851 .open = nbd_dbg_tasks_open, 852 .read = seq_read, 853 .llseek = seq_lseek, 854 .release = single_release, 855 }; 856 857 static int nbd_dbg_flags_show(struct seq_file *s, void *unused) 858 { 859 struct nbd_device *nbd = s->private; 860 u32 flags = nbd->flags; 861 862 seq_printf(s, "Hex: 0x%08x\n\n", flags); 863 864 seq_puts(s, "Known flags:\n"); 865 866 if (flags & NBD_FLAG_HAS_FLAGS) 867 seq_puts(s, "NBD_FLAG_HAS_FLAGS\n"); 868 if (flags & NBD_FLAG_READ_ONLY) 869 seq_puts(s, "NBD_FLAG_READ_ONLY\n"); 870 if (flags & NBD_FLAG_SEND_FLUSH) 871 seq_puts(s, "NBD_FLAG_SEND_FLUSH\n"); 872 if (flags & NBD_FLAG_SEND_TRIM) 873 seq_puts(s, "NBD_FLAG_SEND_TRIM\n"); 874 875 return 0; 876 } 877 878 static int nbd_dbg_flags_open(struct inode *inode, struct file *file) 879 { 880 return single_open(file, nbd_dbg_flags_show, inode->i_private); 881 } 882 883 static const struct file_operations nbd_dbg_flags_ops = { 884 .open = nbd_dbg_flags_open, 885 .read = seq_read, 886 .llseek = seq_lseek, 887 .release = single_release, 888 }; 889 890 static int nbd_dev_dbg_init(struct nbd_device *nbd) 891 { 892 struct dentry *dir; 893 894 if (!nbd_dbg_dir) 895 return -EIO; 896 897 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir); 898 if (!dir) { 899 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n", 900 nbd_name(nbd)); 901 return -EIO; 902 } 903 nbd->dbg_dir = dir; 904 905 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); 906 debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); 907 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); 908 debugfs_create_u64("blocksize", 0444, dir, &nbd->blksize); 909 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); 910 911 return 0; 912 } 913 914 static void nbd_dev_dbg_close(struct nbd_device *nbd) 915 { 916 debugfs_remove_recursive(nbd->dbg_dir); 917 } 918 919 static int nbd_dbg_init(void) 920 { 921 struct dentry *dbg_dir; 922 923 dbg_dir = debugfs_create_dir("nbd", NULL); 924 if (!dbg_dir) 925 return -EIO; 926 927 nbd_dbg_dir = dbg_dir; 928 929 return 0; 930 } 931 932 static void nbd_dbg_close(void) 933 { 934 debugfs_remove_recursive(nbd_dbg_dir); 935 } 936 937 #else /* IS_ENABLED(CONFIG_DEBUG_FS) */ 938 939 static int nbd_dev_dbg_init(struct nbd_device *nbd) 940 { 941 return 0; 942 } 943 944 static void nbd_dev_dbg_close(struct nbd_device *nbd) 945 { 946 } 947 948 static int nbd_dbg_init(void) 949 { 950 return 0; 951 } 952 953 static void nbd_dbg_close(void) 954 { 955 } 956 957 #endif 958 959 static int nbd_init_request(void *data, struct request *rq, 960 unsigned int hctx_idx, unsigned int request_idx, 961 unsigned int numa_node) 962 { 963 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 964 cmd->nbd = data; 965 return 0; 966 } 967 968 static struct blk_mq_ops nbd_mq_ops = { 969 .queue_rq = nbd_queue_rq, 970 .init_request = nbd_init_request, 971 .timeout = nbd_xmit_timeout, 972 }; 973 974 static void nbd_dev_remove(struct nbd_device *nbd) 975 { 976 struct gendisk *disk = nbd->disk; 977 nbd->magic = 0; 978 if (disk) { 979 del_gendisk(disk); 980 blk_cleanup_queue(disk->queue); 981 blk_mq_free_tag_set(&nbd->tag_set); 982 put_disk(disk); 983 } 984 kfree(nbd); 985 } 986 987 static int nbd_dev_add(int index) 988 { 989 struct nbd_device *nbd; 990 struct gendisk *disk; 991 struct request_queue *q; 992 int err = -ENOMEM; 993 994 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL); 995 if (!nbd) 996 goto out; 997 998 disk = alloc_disk(1 << part_shift); 999 if (!disk) 1000 goto out_free_nbd; 1001 1002 if (index >= 0) { 1003 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1, 1004 GFP_KERNEL); 1005 if (err == -ENOSPC) 1006 err = -EEXIST; 1007 } else { 1008 err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL); 1009 if (err >= 0) 1010 index = err; 1011 } 1012 if (err < 0) 1013 goto out_free_disk; 1014 1015 nbd->disk = disk; 1016 nbd->tag_set.ops = &nbd_mq_ops; 1017 nbd->tag_set.nr_hw_queues = 1; 1018 nbd->tag_set.queue_depth = 128; 1019 nbd->tag_set.numa_node = NUMA_NO_NODE; 1020 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd); 1021 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | 1022 BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; 1023 nbd->tag_set.driver_data = nbd; 1024 1025 err = blk_mq_alloc_tag_set(&nbd->tag_set); 1026 if (err) 1027 goto out_free_idr; 1028 1029 q = blk_mq_init_queue(&nbd->tag_set); 1030 if (IS_ERR(q)) { 1031 err = PTR_ERR(q); 1032 goto out_free_tags; 1033 } 1034 disk->queue = q; 1035 1036 /* 1037 * Tell the block layer that we are not a rotational device 1038 */ 1039 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); 1040 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); 1041 disk->queue->limits.discard_granularity = 512; 1042 blk_queue_max_discard_sectors(disk->queue, UINT_MAX); 1043 disk->queue->limits.discard_zeroes_data = 0; 1044 blk_queue_max_hw_sectors(disk->queue, 65536); 1045 disk->queue->limits.max_sectors = 256; 1046 1047 nbd->magic = NBD_MAGIC; 1048 mutex_init(&nbd->config_lock); 1049 disk->major = NBD_MAJOR; 1050 disk->first_minor = index << part_shift; 1051 disk->fops = &nbd_fops; 1052 disk->private_data = nbd; 1053 sprintf(disk->disk_name, "nbd%d", index); 1054 init_waitqueue_head(&nbd->recv_wq); 1055 nbd_reset(nbd); 1056 add_disk(disk); 1057 return index; 1058 1059 out_free_tags: 1060 blk_mq_free_tag_set(&nbd->tag_set); 1061 out_free_idr: 1062 idr_remove(&nbd_index_idr, index); 1063 out_free_disk: 1064 put_disk(disk); 1065 out_free_nbd: 1066 kfree(nbd); 1067 out: 1068 return err; 1069 } 1070 1071 /* 1072 * And here should be modules and kernel interface 1073 * (Just smiley confuses emacs :-) 1074 */ 1075 1076 static int __init nbd_init(void) 1077 { 1078 int i; 1079 1080 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 1081 1082 if (max_part < 0) { 1083 printk(KERN_ERR "nbd: max_part must be >= 0\n"); 1084 return -EINVAL; 1085 } 1086 1087 part_shift = 0; 1088 if (max_part > 0) { 1089 part_shift = fls(max_part); 1090 1091 /* 1092 * Adjust max_part according to part_shift as it is exported 1093 * to user space so that user can know the max number of 1094 * partition kernel should be able to manage. 1095 * 1096 * Note that -1 is required because partition 0 is reserved 1097 * for the whole disk. 1098 */ 1099 max_part = (1UL << part_shift) - 1; 1100 } 1101 1102 if ((1UL << part_shift) > DISK_MAX_PARTS) 1103 return -EINVAL; 1104 1105 if (nbds_max > 1UL << (MINORBITS - part_shift)) 1106 return -EINVAL; 1107 recv_workqueue = alloc_workqueue("knbd-recv", 1108 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 1109 if (!recv_workqueue) 1110 return -ENOMEM; 1111 1112 if (register_blkdev(NBD_MAJOR, "nbd")) { 1113 destroy_workqueue(recv_workqueue); 1114 return -EIO; 1115 } 1116 1117 nbd_dbg_init(); 1118 1119 mutex_lock(&nbd_index_mutex); 1120 for (i = 0; i < nbds_max; i++) 1121 nbd_dev_add(i); 1122 mutex_unlock(&nbd_index_mutex); 1123 return 0; 1124 } 1125 1126 static int nbd_exit_cb(int id, void *ptr, void *data) 1127 { 1128 struct nbd_device *nbd = ptr; 1129 nbd_dev_remove(nbd); 1130 return 0; 1131 } 1132 1133 static void __exit nbd_cleanup(void) 1134 { 1135 nbd_dbg_close(); 1136 1137 idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL); 1138 idr_destroy(&nbd_index_idr); 1139 destroy_workqueue(recv_workqueue); 1140 unregister_blkdev(NBD_MAJOR, "nbd"); 1141 } 1142 1143 module_init(nbd_init); 1144 module_exit(nbd_cleanup); 1145 1146 MODULE_DESCRIPTION("Network Block Device"); 1147 MODULE_LICENSE("GPL"); 1148 1149 module_param(nbds_max, int, 0444); 1150 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 1151 module_param(max_part, int, 0444); 1152 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); 1153