1 /* 2 * Network block device - make block devices work over TCP 3 * 4 * Note that you can not swap over this thing, yet. Seems to work but 5 * deadlocks sometimes - you can not swap over TCP in general. 6 * 7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> 8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> 9 * 10 * This file is released under GPLv2 or later. 11 * 12 * (part of code stolen from loop.c) 13 */ 14 15 #include <linux/major.h> 16 17 #include <linux/blkdev.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/sched.h> 21 #include <linux/fs.h> 22 #include <linux/bio.h> 23 #include <linux/stat.h> 24 #include <linux/errno.h> 25 #include <linux/file.h> 26 #include <linux/ioctl.h> 27 #include <linux/mutex.h> 28 #include <linux/compiler.h> 29 #include <linux/err.h> 30 #include <linux/kernel.h> 31 #include <linux/slab.h> 32 #include <net/sock.h> 33 #include <linux/net.h> 34 #include <linux/kthread.h> 35 36 #include <asm/uaccess.h> 37 #include <asm/system.h> 38 #include <asm/types.h> 39 40 #include <linux/nbd.h> 41 42 #define LO_MAGIC 0x68797548 43 44 #ifdef NDEBUG 45 #define dprintk(flags, fmt...) 46 #else /* NDEBUG */ 47 #define dprintk(flags, fmt...) do { \ 48 if (debugflags & (flags)) printk(KERN_DEBUG fmt); \ 49 } while (0) 50 #define DBG_IOCTL 0x0004 51 #define DBG_INIT 0x0010 52 #define DBG_EXIT 0x0020 53 #define DBG_BLKDEV 0x0100 54 #define DBG_RX 0x0200 55 #define DBG_TX 0x0400 56 static unsigned int debugflags; 57 #endif /* NDEBUG */ 58 59 static unsigned int nbds_max = 16; 60 static struct nbd_device *nbd_dev; 61 static int max_part; 62 63 /* 64 * Use just one lock (or at most 1 per NIC). Two arguments for this: 65 * 1. Each NIC is essentially a synchronization point for all servers 66 * accessed through that NIC so there's no need to have more locks 67 * than NICs anyway. 68 * 2. More locks lead to more "Dirty cache line bouncing" which will slow 69 * down each lock to the point where they're actually slower than just 70 * a single lock. 71 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this! 72 */ 73 static DEFINE_SPINLOCK(nbd_lock); 74 75 #ifndef NDEBUG 76 static const char *ioctl_cmd_to_ascii(int cmd) 77 { 78 switch (cmd) { 79 case NBD_SET_SOCK: return "set-sock"; 80 case NBD_SET_BLKSIZE: return "set-blksize"; 81 case NBD_SET_SIZE: return "set-size"; 82 case NBD_DO_IT: return "do-it"; 83 case NBD_CLEAR_SOCK: return "clear-sock"; 84 case NBD_CLEAR_QUE: return "clear-que"; 85 case NBD_PRINT_DEBUG: return "print-debug"; 86 case NBD_SET_SIZE_BLOCKS: return "set-size-blocks"; 87 case NBD_DISCONNECT: return "disconnect"; 88 case BLKROSET: return "set-read-only"; 89 case BLKFLSBUF: return "flush-buffer-cache"; 90 } 91 return "unknown"; 92 } 93 94 static const char *nbdcmd_to_ascii(int cmd) 95 { 96 switch (cmd) { 97 case NBD_CMD_READ: return "read"; 98 case NBD_CMD_WRITE: return "write"; 99 case NBD_CMD_DISC: return "disconnect"; 100 } 101 return "invalid"; 102 } 103 #endif /* NDEBUG */ 104 105 static void nbd_end_request(struct request *req) 106 { 107 int error = req->errors ? -EIO : 0; 108 struct request_queue *q = req->q; 109 unsigned long flags; 110 111 dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, 112 req, error ? "failed" : "done"); 113 114 spin_lock_irqsave(q->queue_lock, flags); 115 __blk_end_request_all(req, error); 116 spin_unlock_irqrestore(q->queue_lock, flags); 117 } 118 119 static void sock_shutdown(struct nbd_device *lo, int lock) 120 { 121 /* Forcibly shutdown the socket causing all listeners 122 * to error 123 * 124 * FIXME: This code is duplicated from sys_shutdown, but 125 * there should be a more generic interface rather than 126 * calling socket ops directly here */ 127 if (lock) 128 mutex_lock(&lo->tx_lock); 129 if (lo->sock) { 130 printk(KERN_WARNING "%s: shutting down socket\n", 131 lo->disk->disk_name); 132 kernel_sock_shutdown(lo->sock, SHUT_RDWR); 133 lo->sock = NULL; 134 } 135 if (lock) 136 mutex_unlock(&lo->tx_lock); 137 } 138 139 static void nbd_xmit_timeout(unsigned long arg) 140 { 141 struct task_struct *task = (struct task_struct *)arg; 142 143 printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n", 144 task->comm, task->pid); 145 force_sig(SIGKILL, task); 146 } 147 148 /* 149 * Send or receive packet. 150 */ 151 static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, 152 int msg_flags) 153 { 154 struct socket *sock = lo->sock; 155 int result; 156 struct msghdr msg; 157 struct kvec iov; 158 sigset_t blocked, oldset; 159 160 if (unlikely(!sock)) { 161 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n", 162 lo->disk->disk_name, (send ? "send" : "recv")); 163 return -EINVAL; 164 } 165 166 /* Allow interception of SIGKILL only 167 * Don't allow other signals to interrupt the transmission */ 168 siginitsetinv(&blocked, sigmask(SIGKILL)); 169 sigprocmask(SIG_SETMASK, &blocked, &oldset); 170 171 do { 172 sock->sk->sk_allocation = GFP_NOIO; 173 iov.iov_base = buf; 174 iov.iov_len = size; 175 msg.msg_name = NULL; 176 msg.msg_namelen = 0; 177 msg.msg_control = NULL; 178 msg.msg_controllen = 0; 179 msg.msg_flags = msg_flags | MSG_NOSIGNAL; 180 181 if (send) { 182 struct timer_list ti; 183 184 if (lo->xmit_timeout) { 185 init_timer(&ti); 186 ti.function = nbd_xmit_timeout; 187 ti.data = (unsigned long)current; 188 ti.expires = jiffies + lo->xmit_timeout; 189 add_timer(&ti); 190 } 191 result = kernel_sendmsg(sock, &msg, &iov, 1, size); 192 if (lo->xmit_timeout) 193 del_timer_sync(&ti); 194 } else 195 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 196 msg.msg_flags); 197 198 if (signal_pending(current)) { 199 siginfo_t info; 200 printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", 201 task_pid_nr(current), current->comm, 202 dequeue_signal_lock(current, ¤t->blocked, &info)); 203 result = -EINTR; 204 sock_shutdown(lo, !send); 205 break; 206 } 207 208 if (result <= 0) { 209 if (result == 0) 210 result = -EPIPE; /* short read */ 211 break; 212 } 213 size -= result; 214 buf += result; 215 } while (size > 0); 216 217 sigprocmask(SIG_SETMASK, &oldset, NULL); 218 219 return result; 220 } 221 222 static inline int sock_send_bvec(struct nbd_device *lo, struct bio_vec *bvec, 223 int flags) 224 { 225 int result; 226 void *kaddr = kmap(bvec->bv_page); 227 result = sock_xmit(lo, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags); 228 kunmap(bvec->bv_page); 229 return result; 230 } 231 232 /* always call with the tx_lock held */ 233 static int nbd_send_req(struct nbd_device *lo, struct request *req) 234 { 235 int result, flags; 236 struct nbd_request request; 237 unsigned long size = blk_rq_bytes(req); 238 239 request.magic = htonl(NBD_REQUEST_MAGIC); 240 request.type = htonl(nbd_cmd(req)); 241 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 242 request.len = htonl(size); 243 memcpy(request.handle, &req, sizeof(req)); 244 245 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", 246 lo->disk->disk_name, req, 247 nbdcmd_to_ascii(nbd_cmd(req)), 248 (unsigned long long)blk_rq_pos(req) << 9, 249 blk_rq_bytes(req)); 250 result = sock_xmit(lo, 1, &request, sizeof(request), 251 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 252 if (result <= 0) { 253 printk(KERN_ERR "%s: Send control failed (result %d)\n", 254 lo->disk->disk_name, result); 255 goto error_out; 256 } 257 258 if (nbd_cmd(req) == NBD_CMD_WRITE) { 259 struct req_iterator iter; 260 struct bio_vec *bvec; 261 /* 262 * we are really probing at internals to determine 263 * whether to set MSG_MORE or not... 264 */ 265 rq_for_each_segment(bvec, req, iter) { 266 flags = 0; 267 if (!rq_iter_last(req, iter)) 268 flags = MSG_MORE; 269 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 270 lo->disk->disk_name, req, bvec->bv_len); 271 result = sock_send_bvec(lo, bvec, flags); 272 if (result <= 0) { 273 printk(KERN_ERR "%s: Send data failed (result %d)\n", 274 lo->disk->disk_name, result); 275 goto error_out; 276 } 277 } 278 } 279 return 0; 280 281 error_out: 282 return -EIO; 283 } 284 285 static struct request *nbd_find_request(struct nbd_device *lo, 286 struct request *xreq) 287 { 288 struct request *req, *tmp; 289 int err; 290 291 err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); 292 if (unlikely(err)) 293 goto out; 294 295 spin_lock(&lo->queue_lock); 296 list_for_each_entry_safe(req, tmp, &lo->queue_head, queuelist) { 297 if (req != xreq) 298 continue; 299 list_del_init(&req->queuelist); 300 spin_unlock(&lo->queue_lock); 301 return req; 302 } 303 spin_unlock(&lo->queue_lock); 304 305 err = -ENOENT; 306 307 out: 308 return ERR_PTR(err); 309 } 310 311 static inline int sock_recv_bvec(struct nbd_device *lo, struct bio_vec *bvec) 312 { 313 int result; 314 void *kaddr = kmap(bvec->bv_page); 315 result = sock_xmit(lo, 0, kaddr + bvec->bv_offset, bvec->bv_len, 316 MSG_WAITALL); 317 kunmap(bvec->bv_page); 318 return result; 319 } 320 321 /* NULL returned = something went wrong, inform userspace */ 322 static struct request *nbd_read_stat(struct nbd_device *lo) 323 { 324 int result; 325 struct nbd_reply reply; 326 struct request *req; 327 328 reply.magic = 0; 329 result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL); 330 if (result <= 0) { 331 printk(KERN_ERR "%s: Receive control failed (result %d)\n", 332 lo->disk->disk_name, result); 333 goto harderror; 334 } 335 336 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { 337 printk(KERN_ERR "%s: Wrong magic (0x%lx)\n", 338 lo->disk->disk_name, 339 (unsigned long)ntohl(reply.magic)); 340 result = -EPROTO; 341 goto harderror; 342 } 343 344 req = nbd_find_request(lo, *(struct request **)reply.handle); 345 if (IS_ERR(req)) { 346 result = PTR_ERR(req); 347 if (result != -ENOENT) 348 goto harderror; 349 350 printk(KERN_ERR "%s: Unexpected reply (%p)\n", 351 lo->disk->disk_name, reply.handle); 352 result = -EBADR; 353 goto harderror; 354 } 355 356 if (ntohl(reply.error)) { 357 printk(KERN_ERR "%s: Other side returned error (%d)\n", 358 lo->disk->disk_name, ntohl(reply.error)); 359 req->errors++; 360 return req; 361 } 362 363 dprintk(DBG_RX, "%s: request %p: got reply\n", 364 lo->disk->disk_name, req); 365 if (nbd_cmd(req) == NBD_CMD_READ) { 366 struct req_iterator iter; 367 struct bio_vec *bvec; 368 369 rq_for_each_segment(bvec, req, iter) { 370 result = sock_recv_bvec(lo, bvec); 371 if (result <= 0) { 372 printk(KERN_ERR "%s: Receive data failed (result %d)\n", 373 lo->disk->disk_name, result); 374 req->errors++; 375 return req; 376 } 377 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 378 lo->disk->disk_name, req, bvec->bv_len); 379 } 380 } 381 return req; 382 harderror: 383 lo->harderror = result; 384 return NULL; 385 } 386 387 static ssize_t pid_show(struct device *dev, 388 struct device_attribute *attr, char *buf) 389 { 390 struct gendisk *disk = dev_to_disk(dev); 391 392 return sprintf(buf, "%ld\n", 393 (long) ((struct nbd_device *)disk->private_data)->pid); 394 } 395 396 static struct device_attribute pid_attr = { 397 .attr = { .name = "pid", .mode = S_IRUGO}, 398 .show = pid_show, 399 }; 400 401 static int nbd_do_it(struct nbd_device *lo) 402 { 403 struct request *req; 404 int ret; 405 406 BUG_ON(lo->magic != LO_MAGIC); 407 408 lo->pid = current->pid; 409 ret = sysfs_create_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr); 410 if (ret) { 411 printk(KERN_ERR "nbd: sysfs_create_file failed!"); 412 lo->pid = 0; 413 return ret; 414 } 415 416 while ((req = nbd_read_stat(lo)) != NULL) 417 nbd_end_request(req); 418 419 sysfs_remove_file(&disk_to_dev(lo->disk)->kobj, &pid_attr.attr); 420 lo->pid = 0; 421 return 0; 422 } 423 424 static void nbd_clear_que(struct nbd_device *lo) 425 { 426 struct request *req; 427 428 BUG_ON(lo->magic != LO_MAGIC); 429 430 /* 431 * Because we have set lo->sock to NULL under the tx_lock, all 432 * modifications to the list must have completed by now. For 433 * the same reason, the active_req must be NULL. 434 * 435 * As a consequence, we don't need to take the spin lock while 436 * purging the list here. 437 */ 438 BUG_ON(lo->sock); 439 BUG_ON(lo->active_req); 440 441 while (!list_empty(&lo->queue_head)) { 442 req = list_entry(lo->queue_head.next, struct request, 443 queuelist); 444 list_del_init(&req->queuelist); 445 req->errors++; 446 nbd_end_request(req); 447 } 448 } 449 450 451 static void nbd_handle_req(struct nbd_device *lo, struct request *req) 452 { 453 if (req->cmd_type != REQ_TYPE_FS) 454 goto error_out; 455 456 nbd_cmd(req) = NBD_CMD_READ; 457 if (rq_data_dir(req) == WRITE) { 458 nbd_cmd(req) = NBD_CMD_WRITE; 459 if (lo->flags & NBD_READ_ONLY) { 460 printk(KERN_ERR "%s: Write on read-only\n", 461 lo->disk->disk_name); 462 goto error_out; 463 } 464 } 465 466 req->errors = 0; 467 468 mutex_lock(&lo->tx_lock); 469 if (unlikely(!lo->sock)) { 470 mutex_unlock(&lo->tx_lock); 471 printk(KERN_ERR "%s: Attempted send on closed socket\n", 472 lo->disk->disk_name); 473 goto error_out; 474 } 475 476 lo->active_req = req; 477 478 if (nbd_send_req(lo, req) != 0) { 479 printk(KERN_ERR "%s: Request send failed\n", 480 lo->disk->disk_name); 481 req->errors++; 482 nbd_end_request(req); 483 } else { 484 spin_lock(&lo->queue_lock); 485 list_add(&req->queuelist, &lo->queue_head); 486 spin_unlock(&lo->queue_lock); 487 } 488 489 lo->active_req = NULL; 490 mutex_unlock(&lo->tx_lock); 491 wake_up_all(&lo->active_wq); 492 493 return; 494 495 error_out: 496 req->errors++; 497 nbd_end_request(req); 498 } 499 500 static int nbd_thread(void *data) 501 { 502 struct nbd_device *lo = data; 503 struct request *req; 504 505 set_user_nice(current, -20); 506 while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) { 507 /* wait for something to do */ 508 wait_event_interruptible(lo->waiting_wq, 509 kthread_should_stop() || 510 !list_empty(&lo->waiting_queue)); 511 512 /* extract request */ 513 if (list_empty(&lo->waiting_queue)) 514 continue; 515 516 spin_lock_irq(&lo->queue_lock); 517 req = list_entry(lo->waiting_queue.next, struct request, 518 queuelist); 519 list_del_init(&req->queuelist); 520 spin_unlock_irq(&lo->queue_lock); 521 522 /* handle request */ 523 nbd_handle_req(lo, req); 524 } 525 return 0; 526 } 527 528 /* 529 * We always wait for result of write, for now. It would be nice to make it optional 530 * in future 531 * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK)) 532 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } 533 */ 534 535 static void do_nbd_request(struct request_queue *q) 536 { 537 struct request *req; 538 539 while ((req = blk_fetch_request(q)) != NULL) { 540 struct nbd_device *lo; 541 542 spin_unlock_irq(q->queue_lock); 543 544 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 545 req->rq_disk->disk_name, req, req->cmd_type); 546 547 lo = req->rq_disk->private_data; 548 549 BUG_ON(lo->magic != LO_MAGIC); 550 551 if (unlikely(!lo->sock)) { 552 printk(KERN_ERR "%s: Attempted send on closed socket\n", 553 lo->disk->disk_name); 554 req->errors++; 555 nbd_end_request(req); 556 spin_lock_irq(q->queue_lock); 557 continue; 558 } 559 560 spin_lock_irq(&lo->queue_lock); 561 list_add_tail(&req->queuelist, &lo->waiting_queue); 562 spin_unlock_irq(&lo->queue_lock); 563 564 wake_up(&lo->waiting_wq); 565 566 spin_lock_irq(q->queue_lock); 567 } 568 } 569 570 /* Must be called with tx_lock held */ 571 572 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, 573 unsigned int cmd, unsigned long arg) 574 { 575 switch (cmd) { 576 case NBD_DISCONNECT: { 577 struct request sreq; 578 579 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); 580 581 blk_rq_init(NULL, &sreq); 582 sreq.cmd_type = REQ_TYPE_SPECIAL; 583 nbd_cmd(&sreq) = NBD_CMD_DISC; 584 if (!lo->sock) 585 return -EINVAL; 586 nbd_send_req(lo, &sreq); 587 return 0; 588 } 589 590 case NBD_CLEAR_SOCK: { 591 struct file *file; 592 593 lo->sock = NULL; 594 file = lo->file; 595 lo->file = NULL; 596 nbd_clear_que(lo); 597 BUG_ON(!list_empty(&lo->queue_head)); 598 if (file) 599 fput(file); 600 return 0; 601 } 602 603 case NBD_SET_SOCK: { 604 struct file *file; 605 if (lo->file) 606 return -EBUSY; 607 file = fget(arg); 608 if (file) { 609 struct inode *inode = file->f_path.dentry->d_inode; 610 if (S_ISSOCK(inode->i_mode)) { 611 lo->file = file; 612 lo->sock = SOCKET_I(inode); 613 if (max_part > 0) 614 bdev->bd_invalidated = 1; 615 return 0; 616 } else { 617 fput(file); 618 } 619 } 620 return -EINVAL; 621 } 622 623 case NBD_SET_BLKSIZE: 624 lo->blksize = arg; 625 lo->bytesize &= ~(lo->blksize-1); 626 bdev->bd_inode->i_size = lo->bytesize; 627 set_blocksize(bdev, lo->blksize); 628 set_capacity(lo->disk, lo->bytesize >> 9); 629 return 0; 630 631 case NBD_SET_SIZE: 632 lo->bytesize = arg & ~(lo->blksize-1); 633 bdev->bd_inode->i_size = lo->bytesize; 634 set_blocksize(bdev, lo->blksize); 635 set_capacity(lo->disk, lo->bytesize >> 9); 636 return 0; 637 638 case NBD_SET_TIMEOUT: 639 lo->xmit_timeout = arg * HZ; 640 return 0; 641 642 case NBD_SET_SIZE_BLOCKS: 643 lo->bytesize = ((u64) arg) * lo->blksize; 644 bdev->bd_inode->i_size = lo->bytesize; 645 set_blocksize(bdev, lo->blksize); 646 set_capacity(lo->disk, lo->bytesize >> 9); 647 return 0; 648 649 case NBD_DO_IT: { 650 struct task_struct *thread; 651 struct file *file; 652 int error; 653 654 if (lo->pid) 655 return -EBUSY; 656 if (!lo->file) 657 return -EINVAL; 658 659 mutex_unlock(&lo->tx_lock); 660 661 thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); 662 if (IS_ERR(thread)) { 663 mutex_lock(&lo->tx_lock); 664 return PTR_ERR(thread); 665 } 666 wake_up_process(thread); 667 error = nbd_do_it(lo); 668 kthread_stop(thread); 669 670 mutex_lock(&lo->tx_lock); 671 if (error) 672 return error; 673 sock_shutdown(lo, 0); 674 file = lo->file; 675 lo->file = NULL; 676 nbd_clear_que(lo); 677 printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name); 678 if (file) 679 fput(file); 680 lo->bytesize = 0; 681 bdev->bd_inode->i_size = 0; 682 set_capacity(lo->disk, 0); 683 if (max_part > 0) 684 ioctl_by_bdev(bdev, BLKRRPART, 0); 685 return lo->harderror; 686 } 687 688 case NBD_CLEAR_QUE: 689 /* 690 * This is for compatibility only. The queue is always cleared 691 * by NBD_DO_IT or NBD_CLEAR_SOCK. 692 */ 693 BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); 694 return 0; 695 696 case NBD_PRINT_DEBUG: 697 printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n", 698 bdev->bd_disk->disk_name, 699 lo->queue_head.next, lo->queue_head.prev, 700 &lo->queue_head); 701 return 0; 702 } 703 return -ENOTTY; 704 } 705 706 static int nbd_ioctl(struct block_device *bdev, fmode_t mode, 707 unsigned int cmd, unsigned long arg) 708 { 709 struct nbd_device *lo = bdev->bd_disk->private_data; 710 int error; 711 712 if (!capable(CAP_SYS_ADMIN)) 713 return -EPERM; 714 715 BUG_ON(lo->magic != LO_MAGIC); 716 717 /* Anyone capable of this syscall can do *real bad* things */ 718 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", 719 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); 720 721 mutex_lock(&lo->tx_lock); 722 error = __nbd_ioctl(bdev, lo, cmd, arg); 723 mutex_unlock(&lo->tx_lock); 724 725 return error; 726 } 727 728 static const struct block_device_operations nbd_fops = 729 { 730 .owner = THIS_MODULE, 731 .ioctl = nbd_ioctl, 732 }; 733 734 /* 735 * And here should be modules and kernel interface 736 * (Just smiley confuses emacs :-) 737 */ 738 739 static int __init nbd_init(void) 740 { 741 int err = -ENOMEM; 742 int i; 743 int part_shift; 744 745 BUILD_BUG_ON(sizeof(struct nbd_request) != 28); 746 747 if (max_part < 0) { 748 printk(KERN_CRIT "nbd: max_part must be >= 0\n"); 749 return -EINVAL; 750 } 751 752 nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); 753 if (!nbd_dev) 754 return -ENOMEM; 755 756 part_shift = 0; 757 if (max_part > 0) { 758 part_shift = fls(max_part); 759 760 /* 761 * Adjust max_part according to part_shift as it is exported 762 * to user space so that user can know the max number of 763 * partition kernel should be able to manage. 764 * 765 * Note that -1 is required because partition 0 is reserved 766 * for the whole disk. 767 */ 768 max_part = (1UL << part_shift) - 1; 769 } 770 771 if ((1UL << part_shift) > DISK_MAX_PARTS) 772 return -EINVAL; 773 774 if (nbds_max > 1UL << (MINORBITS - part_shift)) 775 return -EINVAL; 776 777 for (i = 0; i < nbds_max; i++) { 778 struct gendisk *disk = alloc_disk(1 << part_shift); 779 if (!disk) 780 goto out; 781 nbd_dev[i].disk = disk; 782 /* 783 * The new linux 2.5 block layer implementation requires 784 * every gendisk to have its very own request_queue struct. 785 * These structs are big so we dynamically allocate them. 786 */ 787 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock); 788 if (!disk->queue) { 789 put_disk(disk); 790 goto out; 791 } 792 /* 793 * Tell the block layer that we are not a rotational device 794 */ 795 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); 796 } 797 798 if (register_blkdev(NBD_MAJOR, "nbd")) { 799 err = -EIO; 800 goto out; 801 } 802 803 printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); 804 dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags); 805 806 for (i = 0; i < nbds_max; i++) { 807 struct gendisk *disk = nbd_dev[i].disk; 808 nbd_dev[i].file = NULL; 809 nbd_dev[i].magic = LO_MAGIC; 810 nbd_dev[i].flags = 0; 811 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); 812 spin_lock_init(&nbd_dev[i].queue_lock); 813 INIT_LIST_HEAD(&nbd_dev[i].queue_head); 814 mutex_init(&nbd_dev[i].tx_lock); 815 init_waitqueue_head(&nbd_dev[i].active_wq); 816 init_waitqueue_head(&nbd_dev[i].waiting_wq); 817 nbd_dev[i].blksize = 1024; 818 nbd_dev[i].bytesize = 0; 819 disk->major = NBD_MAJOR; 820 disk->first_minor = i << part_shift; 821 disk->fops = &nbd_fops; 822 disk->private_data = &nbd_dev[i]; 823 sprintf(disk->disk_name, "nbd%d", i); 824 set_capacity(disk, 0); 825 add_disk(disk); 826 } 827 828 return 0; 829 out: 830 while (i--) { 831 blk_cleanup_queue(nbd_dev[i].disk->queue); 832 put_disk(nbd_dev[i].disk); 833 } 834 kfree(nbd_dev); 835 return err; 836 } 837 838 static void __exit nbd_cleanup(void) 839 { 840 int i; 841 for (i = 0; i < nbds_max; i++) { 842 struct gendisk *disk = nbd_dev[i].disk; 843 nbd_dev[i].magic = 0; 844 if (disk) { 845 del_gendisk(disk); 846 blk_cleanup_queue(disk->queue); 847 put_disk(disk); 848 } 849 } 850 unregister_blkdev(NBD_MAJOR, "nbd"); 851 kfree(nbd_dev); 852 printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); 853 } 854 855 module_init(nbd_init); 856 module_exit(nbd_cleanup); 857 858 MODULE_DESCRIPTION("Network Block Device"); 859 MODULE_LICENSE("GPL"); 860 861 module_param(nbds_max, int, 0444); 862 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); 863 module_param(max_part, int, 0444); 864 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); 865 #ifndef NDEBUG 866 module_param(debugflags, int, 0644); 867 MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); 868 #endif 869