xref: /openbmc/linux/drivers/block/nbd.c (revision 56a0eccd)
1 /*
2  * Network block device - make block devices work over TCP
3  *
4  * Note that you can not swap over this thing, yet. Seems to work but
5  * deadlocks sometimes - you can not swap over TCP in general.
6  *
7  * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8  * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
9  *
10  * This file is released under GPLv2 or later.
11  *
12  * (part of code stolen from loop.c)
13  */
14 
15 #include <linux/major.h>
16 
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
32 #include <net/sock.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
36 #include <linux/debugfs.h>
37 
38 #include <asm/uaccess.h>
39 #include <asm/types.h>
40 
41 #include <linux/nbd.h>
42 
43 struct nbd_device {
44 	u32 flags;
45 	struct socket * sock;	/* If == NULL, device is not ready, yet	*/
46 	int magic;
47 
48 	spinlock_t queue_lock;
49 	struct list_head queue_head;	/* Requests waiting result */
50 	struct request *active_req;
51 	wait_queue_head_t active_wq;
52 	struct list_head waiting_queue;	/* Requests to be sent */
53 	wait_queue_head_t waiting_wq;
54 
55 	struct mutex tx_lock;
56 	struct gendisk *disk;
57 	int blksize;
58 	loff_t bytesize;
59 	int xmit_timeout;
60 	bool timedout;
61 	bool disconnect; /* a disconnect has been requested by user */
62 
63 	struct timer_list timeout_timer;
64 	/* protects initialization and shutdown of the socket */
65 	spinlock_t sock_lock;
66 	struct task_struct *task_recv;
67 	struct task_struct *task_send;
68 
69 #if IS_ENABLED(CONFIG_DEBUG_FS)
70 	struct dentry *dbg_dir;
71 #endif
72 };
73 
74 #if IS_ENABLED(CONFIG_DEBUG_FS)
75 static struct dentry *nbd_dbg_dir;
76 #endif
77 
78 #define nbd_name(nbd) ((nbd)->disk->disk_name)
79 
80 #define NBD_MAGIC 0x68797548
81 
82 static unsigned int nbds_max = 16;
83 static struct nbd_device *nbd_dev;
84 static int max_part;
85 
86 /*
87  * Use just one lock (or at most 1 per NIC). Two arguments for this:
88  * 1. Each NIC is essentially a synchronization point for all servers
89  *    accessed through that NIC so there's no need to have more locks
90  *    than NICs anyway.
91  * 2. More locks lead to more "Dirty cache line bouncing" which will slow
92  *    down each lock to the point where they're actually slower than just
93  *    a single lock.
94  * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
95  */
96 static DEFINE_SPINLOCK(nbd_lock);
97 
98 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
99 {
100 	return disk_to_dev(nbd->disk);
101 }
102 
103 static bool nbd_is_connected(struct nbd_device *nbd)
104 {
105 	return !!nbd->task_recv;
106 }
107 
108 static const char *nbdcmd_to_ascii(int cmd)
109 {
110 	switch (cmd) {
111 	case  NBD_CMD_READ: return "read";
112 	case NBD_CMD_WRITE: return "write";
113 	case  NBD_CMD_DISC: return "disconnect";
114 	case NBD_CMD_FLUSH: return "flush";
115 	case  NBD_CMD_TRIM: return "trim/discard";
116 	}
117 	return "invalid";
118 }
119 
120 static int nbd_size_clear(struct nbd_device *nbd, struct block_device *bdev)
121 {
122 	bdev->bd_inode->i_size = 0;
123 	set_capacity(nbd->disk, 0);
124 	kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
125 
126 	return 0;
127 }
128 
129 static void nbd_size_update(struct nbd_device *nbd, struct block_device *bdev)
130 {
131 	if (!nbd_is_connected(nbd))
132 		return;
133 
134 	bdev->bd_inode->i_size = nbd->bytesize;
135 	set_capacity(nbd->disk, nbd->bytesize >> 9);
136 	kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
137 }
138 
139 static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev,
140 			int blocksize, int nr_blocks)
141 {
142 	int ret;
143 
144 	ret = set_blocksize(bdev, blocksize);
145 	if (ret)
146 		return ret;
147 
148 	nbd->blksize = blocksize;
149 	nbd->bytesize = (loff_t)blocksize * (loff_t)nr_blocks;
150 
151 	nbd_size_update(nbd, bdev);
152 
153 	return 0;
154 }
155 
156 static void nbd_end_request(struct nbd_device *nbd, struct request *req)
157 {
158 	int error = req->errors ? -EIO : 0;
159 	struct request_queue *q = req->q;
160 	unsigned long flags;
161 
162 	dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req,
163 		error ? "failed" : "done");
164 
165 	spin_lock_irqsave(q->queue_lock, flags);
166 	__blk_end_request_all(req, error);
167 	spin_unlock_irqrestore(q->queue_lock, flags);
168 }
169 
170 /*
171  * Forcibly shutdown the socket causing all listeners to error
172  */
173 static void sock_shutdown(struct nbd_device *nbd)
174 {
175 	spin_lock_irq(&nbd->sock_lock);
176 
177 	if (!nbd->sock) {
178 		spin_unlock_irq(&nbd->sock_lock);
179 		return;
180 	}
181 
182 	dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
183 	kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
184 	sockfd_put(nbd->sock);
185 	nbd->sock = NULL;
186 	spin_unlock_irq(&nbd->sock_lock);
187 
188 	del_timer(&nbd->timeout_timer);
189 }
190 
191 static void nbd_xmit_timeout(unsigned long arg)
192 {
193 	struct nbd_device *nbd = (struct nbd_device *)arg;
194 	unsigned long flags;
195 
196 	if (list_empty(&nbd->queue_head))
197 		return;
198 
199 	spin_lock_irqsave(&nbd->sock_lock, flags);
200 
201 	nbd->timedout = true;
202 
203 	if (nbd->sock)
204 		kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
205 
206 	spin_unlock_irqrestore(&nbd->sock_lock, flags);
207 
208 	dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n");
209 }
210 
211 /*
212  *  Send or receive packet.
213  */
214 static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
215 		int msg_flags)
216 {
217 	struct socket *sock = nbd->sock;
218 	int result;
219 	struct msghdr msg;
220 	struct kvec iov;
221 	unsigned long pflags = current->flags;
222 
223 	if (unlikely(!sock)) {
224 		dev_err(disk_to_dev(nbd->disk),
225 			"Attempted %s on closed socket in sock_xmit\n",
226 			(send ? "send" : "recv"));
227 		return -EINVAL;
228 	}
229 
230 	current->flags |= PF_MEMALLOC;
231 	do {
232 		sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
233 		iov.iov_base = buf;
234 		iov.iov_len = size;
235 		msg.msg_name = NULL;
236 		msg.msg_namelen = 0;
237 		msg.msg_control = NULL;
238 		msg.msg_controllen = 0;
239 		msg.msg_flags = msg_flags | MSG_NOSIGNAL;
240 
241 		if (send)
242 			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
243 		else
244 			result = kernel_recvmsg(sock, &msg, &iov, 1, size,
245 						msg.msg_flags);
246 
247 		if (result <= 0) {
248 			if (result == 0)
249 				result = -EPIPE; /* short read */
250 			break;
251 		}
252 		size -= result;
253 		buf += result;
254 	} while (size > 0);
255 
256 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
257 
258 	if (!send && nbd->xmit_timeout)
259 		mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
260 
261 	return result;
262 }
263 
264 static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
265 		int flags)
266 {
267 	int result;
268 	void *kaddr = kmap(bvec->bv_page);
269 	result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
270 			   bvec->bv_len, flags);
271 	kunmap(bvec->bv_page);
272 	return result;
273 }
274 
275 /* always call with the tx_lock held */
276 static int nbd_send_req(struct nbd_device *nbd, struct request *req)
277 {
278 	int result, flags;
279 	struct nbd_request request;
280 	unsigned long size = blk_rq_bytes(req);
281 	u32 type;
282 
283 	if (req->cmd_type == REQ_TYPE_DRV_PRIV)
284 		type = NBD_CMD_DISC;
285 	else if (req->cmd_flags & REQ_DISCARD)
286 		type = NBD_CMD_TRIM;
287 	else if (req->cmd_flags & REQ_FLUSH)
288 		type = NBD_CMD_FLUSH;
289 	else if (rq_data_dir(req) == WRITE)
290 		type = NBD_CMD_WRITE;
291 	else
292 		type = NBD_CMD_READ;
293 
294 	memset(&request, 0, sizeof(request));
295 	request.magic = htonl(NBD_REQUEST_MAGIC);
296 	request.type = htonl(type);
297 	if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
298 		request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
299 		request.len = htonl(size);
300 	}
301 	memcpy(request.handle, &req, sizeof(req));
302 
303 	dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
304 		req, nbdcmd_to_ascii(type),
305 		(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
306 	result = sock_xmit(nbd, 1, &request, sizeof(request),
307 			(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
308 	if (result <= 0) {
309 		dev_err(disk_to_dev(nbd->disk),
310 			"Send control failed (result %d)\n", result);
311 		return -EIO;
312 	}
313 
314 	if (type == NBD_CMD_WRITE) {
315 		struct req_iterator iter;
316 		struct bio_vec bvec;
317 		/*
318 		 * we are really probing at internals to determine
319 		 * whether to set MSG_MORE or not...
320 		 */
321 		rq_for_each_segment(bvec, req, iter) {
322 			flags = 0;
323 			if (!rq_iter_last(bvec, iter))
324 				flags = MSG_MORE;
325 			dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
326 				req, bvec.bv_len);
327 			result = sock_send_bvec(nbd, &bvec, flags);
328 			if (result <= 0) {
329 				dev_err(disk_to_dev(nbd->disk),
330 					"Send data failed (result %d)\n",
331 					result);
332 				return -EIO;
333 			}
334 		}
335 	}
336 	return 0;
337 }
338 
339 static struct request *nbd_find_request(struct nbd_device *nbd,
340 					struct request *xreq)
341 {
342 	struct request *req, *tmp;
343 	int err;
344 
345 	err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
346 	if (unlikely(err))
347 		return ERR_PTR(err);
348 
349 	spin_lock(&nbd->queue_lock);
350 	list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
351 		if (req != xreq)
352 			continue;
353 		list_del_init(&req->queuelist);
354 		spin_unlock(&nbd->queue_lock);
355 		return req;
356 	}
357 	spin_unlock(&nbd->queue_lock);
358 
359 	return ERR_PTR(-ENOENT);
360 }
361 
362 static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
363 {
364 	int result;
365 	void *kaddr = kmap(bvec->bv_page);
366 	result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
367 			MSG_WAITALL);
368 	kunmap(bvec->bv_page);
369 	return result;
370 }
371 
372 /* NULL returned = something went wrong, inform userspace */
373 static struct request *nbd_read_stat(struct nbd_device *nbd)
374 {
375 	int result;
376 	struct nbd_reply reply;
377 	struct request *req;
378 
379 	reply.magic = 0;
380 	result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
381 	if (result <= 0) {
382 		dev_err(disk_to_dev(nbd->disk),
383 			"Receive control failed (result %d)\n", result);
384 		return ERR_PTR(result);
385 	}
386 
387 	if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
388 		dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
389 				(unsigned long)ntohl(reply.magic));
390 		return ERR_PTR(-EPROTO);
391 	}
392 
393 	req = nbd_find_request(nbd, *(struct request **)reply.handle);
394 	if (IS_ERR(req)) {
395 		result = PTR_ERR(req);
396 		if (result != -ENOENT)
397 			return ERR_PTR(result);
398 
399 		dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
400 			reply.handle);
401 		return ERR_PTR(-EBADR);
402 	}
403 
404 	if (ntohl(reply.error)) {
405 		dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
406 			ntohl(reply.error));
407 		req->errors++;
408 		return req;
409 	}
410 
411 	dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
412 	if (rq_data_dir(req) != WRITE) {
413 		struct req_iterator iter;
414 		struct bio_vec bvec;
415 
416 		rq_for_each_segment(bvec, req, iter) {
417 			result = sock_recv_bvec(nbd, &bvec);
418 			if (result <= 0) {
419 				dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
420 					result);
421 				req->errors++;
422 				return req;
423 			}
424 			dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
425 				req, bvec.bv_len);
426 		}
427 	}
428 	return req;
429 }
430 
431 static ssize_t pid_show(struct device *dev,
432 			struct device_attribute *attr, char *buf)
433 {
434 	struct gendisk *disk = dev_to_disk(dev);
435 	struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
436 
437 	return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
438 }
439 
440 static struct device_attribute pid_attr = {
441 	.attr = { .name = "pid", .mode = S_IRUGO},
442 	.show = pid_show,
443 };
444 
445 static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev)
446 {
447 	struct request *req;
448 	int ret;
449 
450 	BUG_ON(nbd->magic != NBD_MAGIC);
451 
452 	sk_set_memalloc(nbd->sock->sk);
453 
454 	nbd->task_recv = current;
455 
456 	ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
457 	if (ret) {
458 		dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
459 
460 		nbd->task_recv = NULL;
461 
462 		return ret;
463 	}
464 
465 	nbd_size_update(nbd, bdev);
466 
467 	while (1) {
468 		req = nbd_read_stat(nbd);
469 		if (IS_ERR(req)) {
470 			ret = PTR_ERR(req);
471 			break;
472 		}
473 
474 		nbd_end_request(nbd, req);
475 	}
476 
477 	nbd_size_clear(nbd, bdev);
478 
479 	device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
480 
481 	nbd->task_recv = NULL;
482 
483 	return ret;
484 }
485 
486 static void nbd_clear_que(struct nbd_device *nbd)
487 {
488 	struct request *req;
489 
490 	BUG_ON(nbd->magic != NBD_MAGIC);
491 
492 	/*
493 	 * Because we have set nbd->sock to NULL under the tx_lock, all
494 	 * modifications to the list must have completed by now.  For
495 	 * the same reason, the active_req must be NULL.
496 	 *
497 	 * As a consequence, we don't need to take the spin lock while
498 	 * purging the list here.
499 	 */
500 	BUG_ON(nbd->sock);
501 	BUG_ON(nbd->active_req);
502 
503 	while (!list_empty(&nbd->queue_head)) {
504 		req = list_entry(nbd->queue_head.next, struct request,
505 				 queuelist);
506 		list_del_init(&req->queuelist);
507 		req->errors++;
508 		nbd_end_request(nbd, req);
509 	}
510 
511 	while (!list_empty(&nbd->waiting_queue)) {
512 		req = list_entry(nbd->waiting_queue.next, struct request,
513 				 queuelist);
514 		list_del_init(&req->queuelist);
515 		req->errors++;
516 		nbd_end_request(nbd, req);
517 	}
518 	dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
519 }
520 
521 
522 static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
523 {
524 	if (req->cmd_type != REQ_TYPE_FS)
525 		goto error_out;
526 
527 	if (rq_data_dir(req) == WRITE &&
528 	    (nbd->flags & NBD_FLAG_READ_ONLY)) {
529 		dev_err(disk_to_dev(nbd->disk),
530 			"Write on read-only\n");
531 		goto error_out;
532 	}
533 
534 	req->errors = 0;
535 
536 	mutex_lock(&nbd->tx_lock);
537 	if (unlikely(!nbd->sock)) {
538 		mutex_unlock(&nbd->tx_lock);
539 		dev_err(disk_to_dev(nbd->disk),
540 			"Attempted send on closed socket\n");
541 		goto error_out;
542 	}
543 
544 	nbd->active_req = req;
545 
546 	if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
547 		mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
548 
549 	if (nbd_send_req(nbd, req) != 0) {
550 		dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
551 		req->errors++;
552 		nbd_end_request(nbd, req);
553 	} else {
554 		spin_lock(&nbd->queue_lock);
555 		list_add_tail(&req->queuelist, &nbd->queue_head);
556 		spin_unlock(&nbd->queue_lock);
557 	}
558 
559 	nbd->active_req = NULL;
560 	mutex_unlock(&nbd->tx_lock);
561 	wake_up_all(&nbd->active_wq);
562 
563 	return;
564 
565 error_out:
566 	req->errors++;
567 	nbd_end_request(nbd, req);
568 }
569 
570 static int nbd_thread_send(void *data)
571 {
572 	struct nbd_device *nbd = data;
573 	struct request *req;
574 
575 	nbd->task_send = current;
576 
577 	set_user_nice(current, MIN_NICE);
578 	while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
579 		/* wait for something to do */
580 		wait_event_interruptible(nbd->waiting_wq,
581 					 kthread_should_stop() ||
582 					 !list_empty(&nbd->waiting_queue));
583 
584 		/* extract request */
585 		if (list_empty(&nbd->waiting_queue))
586 			continue;
587 
588 		spin_lock_irq(&nbd->queue_lock);
589 		req = list_entry(nbd->waiting_queue.next, struct request,
590 				 queuelist);
591 		list_del_init(&req->queuelist);
592 		spin_unlock_irq(&nbd->queue_lock);
593 
594 		/* handle request */
595 		nbd_handle_req(nbd, req);
596 	}
597 
598 	nbd->task_send = NULL;
599 
600 	return 0;
601 }
602 
603 /*
604  * We always wait for result of write, for now. It would be nice to make it optional
605  * in future
606  * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
607  *   { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
608  */
609 
610 static void nbd_request_handler(struct request_queue *q)
611 		__releases(q->queue_lock) __acquires(q->queue_lock)
612 {
613 	struct request *req;
614 
615 	while ((req = blk_fetch_request(q)) != NULL) {
616 		struct nbd_device *nbd;
617 
618 		spin_unlock_irq(q->queue_lock);
619 
620 		nbd = req->rq_disk->private_data;
621 
622 		BUG_ON(nbd->magic != NBD_MAGIC);
623 
624 		dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n",
625 			req, req->cmd_type);
626 
627 		if (unlikely(!nbd->sock)) {
628 			dev_err_ratelimited(disk_to_dev(nbd->disk),
629 					    "Attempted send on closed socket\n");
630 			req->errors++;
631 			nbd_end_request(nbd, req);
632 			spin_lock_irq(q->queue_lock);
633 			continue;
634 		}
635 
636 		spin_lock_irq(&nbd->queue_lock);
637 		list_add_tail(&req->queuelist, &nbd->waiting_queue);
638 		spin_unlock_irq(&nbd->queue_lock);
639 
640 		wake_up(&nbd->waiting_wq);
641 
642 		spin_lock_irq(q->queue_lock);
643 	}
644 }
645 
646 static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock)
647 {
648 	int ret = 0;
649 
650 	spin_lock_irq(&nbd->sock_lock);
651 
652 	if (nbd->sock) {
653 		ret = -EBUSY;
654 		goto out;
655 	}
656 
657 	nbd->sock = sock;
658 
659 out:
660 	spin_unlock_irq(&nbd->sock_lock);
661 
662 	return ret;
663 }
664 
665 /* Reset all properties of an NBD device */
666 static void nbd_reset(struct nbd_device *nbd)
667 {
668 	nbd->disconnect = false;
669 	nbd->timedout = false;
670 	nbd->blksize = 1024;
671 	nbd->bytesize = 0;
672 	set_capacity(nbd->disk, 0);
673 	nbd->flags = 0;
674 	nbd->xmit_timeout = 0;
675 	queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
676 	del_timer_sync(&nbd->timeout_timer);
677 }
678 
679 static void nbd_bdev_reset(struct block_device *bdev)
680 {
681 	set_device_ro(bdev, false);
682 	bdev->bd_inode->i_size = 0;
683 	if (max_part > 0) {
684 		blkdev_reread_part(bdev);
685 		bdev->bd_invalidated = 1;
686 	}
687 }
688 
689 static void nbd_parse_flags(struct nbd_device *nbd, struct block_device *bdev)
690 {
691 	if (nbd->flags & NBD_FLAG_READ_ONLY)
692 		set_device_ro(bdev, true);
693 	if (nbd->flags & NBD_FLAG_SEND_TRIM)
694 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
695 	if (nbd->flags & NBD_FLAG_SEND_FLUSH)
696 		blk_queue_flush(nbd->disk->queue, REQ_FLUSH);
697 	else
698 		blk_queue_flush(nbd->disk->queue, 0);
699 }
700 
701 static int nbd_dev_dbg_init(struct nbd_device *nbd);
702 static void nbd_dev_dbg_close(struct nbd_device *nbd);
703 
704 /* Must be called with tx_lock held */
705 
706 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
707 		       unsigned int cmd, unsigned long arg)
708 {
709 	switch (cmd) {
710 	case NBD_DISCONNECT: {
711 		struct request sreq;
712 
713 		dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
714 		if (!nbd->sock)
715 			return -EINVAL;
716 
717 		mutex_unlock(&nbd->tx_lock);
718 		fsync_bdev(bdev);
719 		mutex_lock(&nbd->tx_lock);
720 		blk_rq_init(NULL, &sreq);
721 		sreq.cmd_type = REQ_TYPE_DRV_PRIV;
722 
723 		/* Check again after getting mutex back.  */
724 		if (!nbd->sock)
725 			return -EINVAL;
726 
727 		nbd->disconnect = true;
728 
729 		nbd_send_req(nbd, &sreq);
730 		return 0;
731 	}
732 
733 	case NBD_CLEAR_SOCK:
734 		sock_shutdown(nbd);
735 		nbd_clear_que(nbd);
736 		BUG_ON(!list_empty(&nbd->queue_head));
737 		BUG_ON(!list_empty(&nbd->waiting_queue));
738 		kill_bdev(bdev);
739 		return 0;
740 
741 	case NBD_SET_SOCK: {
742 		int err;
743 		struct socket *sock = sockfd_lookup(arg, &err);
744 
745 		if (!sock)
746 			return err;
747 
748 		err = nbd_set_socket(nbd, sock);
749 		if (!err && max_part)
750 			bdev->bd_invalidated = 1;
751 
752 		return err;
753 	}
754 
755 	case NBD_SET_BLKSIZE: {
756 		loff_t bsize = div_s64(nbd->bytesize, arg);
757 
758 		return nbd_size_set(nbd, bdev, arg, bsize);
759 	}
760 
761 	case NBD_SET_SIZE:
762 		return nbd_size_set(nbd, bdev, nbd->blksize,
763 				    arg / nbd->blksize);
764 
765 	case NBD_SET_SIZE_BLOCKS:
766 		return nbd_size_set(nbd, bdev, nbd->blksize, arg);
767 
768 	case NBD_SET_TIMEOUT:
769 		nbd->xmit_timeout = arg * HZ;
770 		if (arg)
771 			mod_timer(&nbd->timeout_timer,
772 				  jiffies + nbd->xmit_timeout);
773 		else
774 			del_timer_sync(&nbd->timeout_timer);
775 
776 		return 0;
777 
778 	case NBD_SET_FLAGS:
779 		nbd->flags = arg;
780 		return 0;
781 
782 	case NBD_DO_IT: {
783 		struct task_struct *thread;
784 		int error;
785 
786 		if (nbd->task_recv)
787 			return -EBUSY;
788 		if (!nbd->sock)
789 			return -EINVAL;
790 
791 		mutex_unlock(&nbd->tx_lock);
792 
793 		nbd_parse_flags(nbd, bdev);
794 
795 		thread = kthread_run(nbd_thread_send, nbd, "%s",
796 				     nbd_name(nbd));
797 		if (IS_ERR(thread)) {
798 			mutex_lock(&nbd->tx_lock);
799 			return PTR_ERR(thread);
800 		}
801 
802 		nbd_dev_dbg_init(nbd);
803 		error = nbd_thread_recv(nbd, bdev);
804 		nbd_dev_dbg_close(nbd);
805 		kthread_stop(thread);
806 
807 		mutex_lock(&nbd->tx_lock);
808 
809 		sock_shutdown(nbd);
810 		nbd_clear_que(nbd);
811 		kill_bdev(bdev);
812 		nbd_bdev_reset(bdev);
813 
814 		if (nbd->disconnect) /* user requested, ignore socket errors */
815 			error = 0;
816 		if (nbd->timedout)
817 			error = -ETIMEDOUT;
818 
819 		nbd_reset(nbd);
820 
821 		return error;
822 	}
823 
824 	case NBD_CLEAR_QUE:
825 		/*
826 		 * This is for compatibility only.  The queue is always cleared
827 		 * by NBD_DO_IT or NBD_CLEAR_SOCK.
828 		 */
829 		return 0;
830 
831 	case NBD_PRINT_DEBUG:
832 		dev_info(disk_to_dev(nbd->disk),
833 			"next = %p, prev = %p, head = %p\n",
834 			nbd->queue_head.next, nbd->queue_head.prev,
835 			&nbd->queue_head);
836 		return 0;
837 	}
838 	return -ENOTTY;
839 }
840 
841 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
842 		     unsigned int cmd, unsigned long arg)
843 {
844 	struct nbd_device *nbd = bdev->bd_disk->private_data;
845 	int error;
846 
847 	if (!capable(CAP_SYS_ADMIN))
848 		return -EPERM;
849 
850 	BUG_ON(nbd->magic != NBD_MAGIC);
851 
852 	mutex_lock(&nbd->tx_lock);
853 	error = __nbd_ioctl(bdev, nbd, cmd, arg);
854 	mutex_unlock(&nbd->tx_lock);
855 
856 	return error;
857 }
858 
859 static const struct block_device_operations nbd_fops =
860 {
861 	.owner =	THIS_MODULE,
862 	.ioctl =	nbd_ioctl,
863 	.compat_ioctl =	nbd_ioctl,
864 };
865 
866 #if IS_ENABLED(CONFIG_DEBUG_FS)
867 
868 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
869 {
870 	struct nbd_device *nbd = s->private;
871 
872 	if (nbd->task_recv)
873 		seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
874 	if (nbd->task_send)
875 		seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
876 
877 	return 0;
878 }
879 
880 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
881 {
882 	return single_open(file, nbd_dbg_tasks_show, inode->i_private);
883 }
884 
885 static const struct file_operations nbd_dbg_tasks_ops = {
886 	.open = nbd_dbg_tasks_open,
887 	.read = seq_read,
888 	.llseek = seq_lseek,
889 	.release = single_release,
890 };
891 
892 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
893 {
894 	struct nbd_device *nbd = s->private;
895 	u32 flags = nbd->flags;
896 
897 	seq_printf(s, "Hex: 0x%08x\n\n", flags);
898 
899 	seq_puts(s, "Known flags:\n");
900 
901 	if (flags & NBD_FLAG_HAS_FLAGS)
902 		seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
903 	if (flags & NBD_FLAG_READ_ONLY)
904 		seq_puts(s, "NBD_FLAG_READ_ONLY\n");
905 	if (flags & NBD_FLAG_SEND_FLUSH)
906 		seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
907 	if (flags & NBD_FLAG_SEND_TRIM)
908 		seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
909 
910 	return 0;
911 }
912 
913 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
914 {
915 	return single_open(file, nbd_dbg_flags_show, inode->i_private);
916 }
917 
918 static const struct file_operations nbd_dbg_flags_ops = {
919 	.open = nbd_dbg_flags_open,
920 	.read = seq_read,
921 	.llseek = seq_lseek,
922 	.release = single_release,
923 };
924 
925 static int nbd_dev_dbg_init(struct nbd_device *nbd)
926 {
927 	struct dentry *dir;
928 
929 	if (!nbd_dbg_dir)
930 		return -EIO;
931 
932 	dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
933 	if (!dir) {
934 		dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
935 			nbd_name(nbd));
936 		return -EIO;
937 	}
938 	nbd->dbg_dir = dir;
939 
940 	debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
941 	debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
942 	debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
943 	debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
944 	debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
945 
946 	return 0;
947 }
948 
949 static void nbd_dev_dbg_close(struct nbd_device *nbd)
950 {
951 	debugfs_remove_recursive(nbd->dbg_dir);
952 }
953 
954 static int nbd_dbg_init(void)
955 {
956 	struct dentry *dbg_dir;
957 
958 	dbg_dir = debugfs_create_dir("nbd", NULL);
959 	if (!dbg_dir)
960 		return -EIO;
961 
962 	nbd_dbg_dir = dbg_dir;
963 
964 	return 0;
965 }
966 
967 static void nbd_dbg_close(void)
968 {
969 	debugfs_remove_recursive(nbd_dbg_dir);
970 }
971 
972 #else  /* IS_ENABLED(CONFIG_DEBUG_FS) */
973 
974 static int nbd_dev_dbg_init(struct nbd_device *nbd)
975 {
976 	return 0;
977 }
978 
979 static void nbd_dev_dbg_close(struct nbd_device *nbd)
980 {
981 }
982 
983 static int nbd_dbg_init(void)
984 {
985 	return 0;
986 }
987 
988 static void nbd_dbg_close(void)
989 {
990 }
991 
992 #endif
993 
994 /*
995  * And here should be modules and kernel interface
996  *  (Just smiley confuses emacs :-)
997  */
998 
999 static int __init nbd_init(void)
1000 {
1001 	int err = -ENOMEM;
1002 	int i;
1003 	int part_shift;
1004 
1005 	BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
1006 
1007 	if (max_part < 0) {
1008 		printk(KERN_ERR "nbd: max_part must be >= 0\n");
1009 		return -EINVAL;
1010 	}
1011 
1012 	part_shift = 0;
1013 	if (max_part > 0) {
1014 		part_shift = fls(max_part);
1015 
1016 		/*
1017 		 * Adjust max_part according to part_shift as it is exported
1018 		 * to user space so that user can know the max number of
1019 		 * partition kernel should be able to manage.
1020 		 *
1021 		 * Note that -1 is required because partition 0 is reserved
1022 		 * for the whole disk.
1023 		 */
1024 		max_part = (1UL << part_shift) - 1;
1025 	}
1026 
1027 	if ((1UL << part_shift) > DISK_MAX_PARTS)
1028 		return -EINVAL;
1029 
1030 	if (nbds_max > 1UL << (MINORBITS - part_shift))
1031 		return -EINVAL;
1032 
1033 	nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
1034 	if (!nbd_dev)
1035 		return -ENOMEM;
1036 
1037 	for (i = 0; i < nbds_max; i++) {
1038 		struct gendisk *disk = alloc_disk(1 << part_shift);
1039 		if (!disk)
1040 			goto out;
1041 		nbd_dev[i].disk = disk;
1042 		/*
1043 		 * The new linux 2.5 block layer implementation requires
1044 		 * every gendisk to have its very own request_queue struct.
1045 		 * These structs are big so we dynamically allocate them.
1046 		 */
1047 		disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
1048 		if (!disk->queue) {
1049 			put_disk(disk);
1050 			goto out;
1051 		}
1052 		/*
1053 		 * Tell the block layer that we are not a rotational device
1054 		 */
1055 		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
1056 		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1057 		disk->queue->limits.discard_granularity = 512;
1058 		blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
1059 		disk->queue->limits.discard_zeroes_data = 0;
1060 		blk_queue_max_hw_sectors(disk->queue, 65536);
1061 		disk->queue->limits.max_sectors = 256;
1062 	}
1063 
1064 	if (register_blkdev(NBD_MAJOR, "nbd")) {
1065 		err = -EIO;
1066 		goto out;
1067 	}
1068 
1069 	printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
1070 
1071 	nbd_dbg_init();
1072 
1073 	for (i = 0; i < nbds_max; i++) {
1074 		struct gendisk *disk = nbd_dev[i].disk;
1075 		nbd_dev[i].magic = NBD_MAGIC;
1076 		INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
1077 		spin_lock_init(&nbd_dev[i].queue_lock);
1078 		spin_lock_init(&nbd_dev[i].sock_lock);
1079 		INIT_LIST_HEAD(&nbd_dev[i].queue_head);
1080 		mutex_init(&nbd_dev[i].tx_lock);
1081 		init_timer(&nbd_dev[i].timeout_timer);
1082 		nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
1083 		nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
1084 		init_waitqueue_head(&nbd_dev[i].active_wq);
1085 		init_waitqueue_head(&nbd_dev[i].waiting_wq);
1086 		disk->major = NBD_MAJOR;
1087 		disk->first_minor = i << part_shift;
1088 		disk->fops = &nbd_fops;
1089 		disk->private_data = &nbd_dev[i];
1090 		sprintf(disk->disk_name, "nbd%d", i);
1091 		nbd_reset(&nbd_dev[i]);
1092 		add_disk(disk);
1093 	}
1094 
1095 	return 0;
1096 out:
1097 	while (i--) {
1098 		blk_cleanup_queue(nbd_dev[i].disk->queue);
1099 		put_disk(nbd_dev[i].disk);
1100 	}
1101 	kfree(nbd_dev);
1102 	return err;
1103 }
1104 
1105 static void __exit nbd_cleanup(void)
1106 {
1107 	int i;
1108 
1109 	nbd_dbg_close();
1110 
1111 	for (i = 0; i < nbds_max; i++) {
1112 		struct gendisk *disk = nbd_dev[i].disk;
1113 		nbd_dev[i].magic = 0;
1114 		if (disk) {
1115 			del_gendisk(disk);
1116 			blk_cleanup_queue(disk->queue);
1117 			put_disk(disk);
1118 		}
1119 	}
1120 	unregister_blkdev(NBD_MAJOR, "nbd");
1121 	kfree(nbd_dev);
1122 	printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
1123 }
1124 
1125 module_init(nbd_init);
1126 module_exit(nbd_cleanup);
1127 
1128 MODULE_DESCRIPTION("Network Block Device");
1129 MODULE_LICENSE("GPL");
1130 
1131 module_param(nbds_max, int, 0444);
1132 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
1133 module_param(max_part, int, 0444);
1134 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
1135