xref: /openbmc/linux/drivers/block/nbd.c (revision 6abeae2a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Network block device - make block devices work over TCP
4  *
5  * Note that you can not swap over this thing, yet. Seems to work but
6  * deadlocks sometimes - you can not swap over TCP in general.
7  *
8  * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
9  * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10  *
11  * (part of code stolen from loop.c)
12  */
13 
14 #include <linux/major.h>
15 
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/fs.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/completion.h>
30 #include <linux/err.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <net/sock.h>
34 #include <linux/net.h>
35 #include <linux/kthread.h>
36 #include <linux/types.h>
37 #include <linux/debugfs.h>
38 #include <linux/blk-mq.h>
39 
40 #include <linux/uaccess.h>
41 #include <asm/types.h>
42 
43 #include <linux/nbd.h>
44 #include <linux/nbd-netlink.h>
45 #include <net/genetlink.h>
46 
47 #define CREATE_TRACE_POINTS
48 #include <trace/events/nbd.h>
49 
50 static DEFINE_IDR(nbd_index_idr);
51 static DEFINE_MUTEX(nbd_index_mutex);
52 static int nbd_total_devices = 0;
53 
54 struct nbd_sock {
55 	struct socket *sock;
56 	struct mutex tx_lock;
57 	struct request *pending;
58 	int sent;
59 	bool dead;
60 	int fallback_index;
61 	int cookie;
62 };
63 
64 struct recv_thread_args {
65 	struct work_struct work;
66 	struct nbd_device *nbd;
67 	int index;
68 };
69 
70 struct link_dead_args {
71 	struct work_struct work;
72 	int index;
73 };
74 
75 #define NBD_RT_TIMEDOUT			0
76 #define NBD_RT_DISCONNECT_REQUESTED	1
77 #define NBD_RT_DISCONNECTED		2
78 #define NBD_RT_HAS_PID_FILE		3
79 #define NBD_RT_HAS_CONFIG_REF		4
80 #define NBD_RT_BOUND			5
81 #define NBD_RT_DESTROY_ON_DISCONNECT	6
82 #define NBD_RT_DISCONNECT_ON_CLOSE	7
83 
84 #define NBD_DESTROY_ON_DISCONNECT	0
85 #define NBD_DISCONNECT_REQUESTED	1
86 
87 struct nbd_config {
88 	u32 flags;
89 	unsigned long runtime_flags;
90 	u64 dead_conn_timeout;
91 
92 	struct nbd_sock **socks;
93 	int num_connections;
94 	atomic_t live_connections;
95 	wait_queue_head_t conn_wait;
96 
97 	atomic_t recv_threads;
98 	wait_queue_head_t recv_wq;
99 	loff_t blksize;
100 	loff_t bytesize;
101 #if IS_ENABLED(CONFIG_DEBUG_FS)
102 	struct dentry *dbg_dir;
103 #endif
104 };
105 
106 struct nbd_device {
107 	struct blk_mq_tag_set tag_set;
108 
109 	int index;
110 	refcount_t config_refs;
111 	refcount_t refs;
112 	struct nbd_config *config;
113 	struct mutex config_lock;
114 	struct gendisk *disk;
115 	struct workqueue_struct *recv_workq;
116 
117 	struct list_head list;
118 	struct task_struct *task_recv;
119 	struct task_struct *task_setup;
120 
121 	struct completion *destroy_complete;
122 	unsigned long flags;
123 };
124 
125 #define NBD_CMD_REQUEUED	1
126 
127 struct nbd_cmd {
128 	struct nbd_device *nbd;
129 	struct mutex lock;
130 	int index;
131 	int cookie;
132 	int retries;
133 	blk_status_t status;
134 	unsigned long flags;
135 	u32 cmd_cookie;
136 };
137 
138 #if IS_ENABLED(CONFIG_DEBUG_FS)
139 static struct dentry *nbd_dbg_dir;
140 #endif
141 
142 #define nbd_name(nbd) ((nbd)->disk->disk_name)
143 
144 #define NBD_MAGIC 0x68797548
145 
146 #define NBD_DEF_BLKSIZE 1024
147 
148 static unsigned int nbds_max = 16;
149 static int max_part = 16;
150 static int part_shift;
151 
152 static int nbd_dev_dbg_init(struct nbd_device *nbd);
153 static void nbd_dev_dbg_close(struct nbd_device *nbd);
154 static void nbd_config_put(struct nbd_device *nbd);
155 static void nbd_connect_reply(struct genl_info *info, int index);
156 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
157 static void nbd_dead_link_work(struct work_struct *work);
158 static void nbd_disconnect_and_put(struct nbd_device *nbd);
159 
160 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
161 {
162 	return disk_to_dev(nbd->disk);
163 }
164 
165 static void nbd_requeue_cmd(struct nbd_cmd *cmd)
166 {
167 	struct request *req = blk_mq_rq_from_pdu(cmd);
168 
169 	if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
170 		blk_mq_requeue_request(req, true);
171 }
172 
173 #define NBD_COOKIE_BITS 32
174 
175 static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
176 {
177 	struct request *req = blk_mq_rq_from_pdu(cmd);
178 	u32 tag = blk_mq_unique_tag(req);
179 	u64 cookie = cmd->cmd_cookie;
180 
181 	return (cookie << NBD_COOKIE_BITS) | tag;
182 }
183 
184 static u32 nbd_handle_to_tag(u64 handle)
185 {
186 	return (u32)handle;
187 }
188 
189 static u32 nbd_handle_to_cookie(u64 handle)
190 {
191 	return (u32)(handle >> NBD_COOKIE_BITS);
192 }
193 
194 static const char *nbdcmd_to_ascii(int cmd)
195 {
196 	switch (cmd) {
197 	case  NBD_CMD_READ: return "read";
198 	case NBD_CMD_WRITE: return "write";
199 	case  NBD_CMD_DISC: return "disconnect";
200 	case NBD_CMD_FLUSH: return "flush";
201 	case  NBD_CMD_TRIM: return "trim/discard";
202 	}
203 	return "invalid";
204 }
205 
206 static ssize_t pid_show(struct device *dev,
207 			struct device_attribute *attr, char *buf)
208 {
209 	struct gendisk *disk = dev_to_disk(dev);
210 	struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
211 
212 	return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
213 }
214 
215 static const struct device_attribute pid_attr = {
216 	.attr = { .name = "pid", .mode = 0444},
217 	.show = pid_show,
218 };
219 
220 static void nbd_dev_remove(struct nbd_device *nbd)
221 {
222 	struct gendisk *disk = nbd->disk;
223 	struct request_queue *q;
224 
225 	if (disk) {
226 		q = disk->queue;
227 		del_gendisk(disk);
228 		blk_cleanup_queue(q);
229 		blk_mq_free_tag_set(&nbd->tag_set);
230 		disk->private_data = NULL;
231 		put_disk(disk);
232 	}
233 
234 	/*
235 	 * Place this in the last just before the nbd is freed to
236 	 * make sure that the disk and the related kobject are also
237 	 * totally removed to avoid duplicate creation of the same
238 	 * one.
239 	 */
240 	if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
241 		complete(nbd->destroy_complete);
242 
243 	kfree(nbd);
244 }
245 
246 static void nbd_put(struct nbd_device *nbd)
247 {
248 	if (refcount_dec_and_mutex_lock(&nbd->refs,
249 					&nbd_index_mutex)) {
250 		idr_remove(&nbd_index_idr, nbd->index);
251 		nbd_dev_remove(nbd);
252 		mutex_unlock(&nbd_index_mutex);
253 	}
254 }
255 
256 static int nbd_disconnected(struct nbd_config *config)
257 {
258 	return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
259 		test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
260 }
261 
262 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
263 				int notify)
264 {
265 	if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
266 		struct link_dead_args *args;
267 		args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
268 		if (args) {
269 			INIT_WORK(&args->work, nbd_dead_link_work);
270 			args->index = nbd->index;
271 			queue_work(system_wq, &args->work);
272 		}
273 	}
274 	if (!nsock->dead) {
275 		kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
276 		if (atomic_dec_return(&nbd->config->live_connections) == 0) {
277 			if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
278 					       &nbd->config->runtime_flags)) {
279 				set_bit(NBD_RT_DISCONNECTED,
280 					&nbd->config->runtime_flags);
281 				dev_info(nbd_to_dev(nbd),
282 					"Disconnected due to user request.\n");
283 			}
284 		}
285 	}
286 	nsock->dead = true;
287 	nsock->pending = NULL;
288 	nsock->sent = 0;
289 }
290 
291 static void nbd_size_clear(struct nbd_device *nbd)
292 {
293 	if (nbd->config->bytesize) {
294 		set_capacity(nbd->disk, 0);
295 		kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
296 	}
297 }
298 
299 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
300 		loff_t blksize)
301 {
302 	if (!blksize)
303 		blksize = NBD_DEF_BLKSIZE;
304 	if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
305 		return -EINVAL;
306 
307 	nbd->config->bytesize = bytesize;
308 	nbd->config->blksize = blksize;
309 
310 	if (!nbd->task_recv)
311 		return 0;
312 
313 	if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
314 		nbd->disk->queue->limits.discard_granularity = blksize;
315 		nbd->disk->queue->limits.discard_alignment = blksize;
316 		blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
317 	}
318 	blk_queue_logical_block_size(nbd->disk->queue, blksize);
319 	blk_queue_physical_block_size(nbd->disk->queue, blksize);
320 
321 	if (max_part)
322 		set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
323 	if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
324 		kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
325 	return 0;
326 }
327 
328 static void nbd_complete_rq(struct request *req)
329 {
330 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
331 
332 	dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
333 		cmd->status ? "failed" : "done");
334 
335 	blk_mq_end_request(req, cmd->status);
336 }
337 
338 /*
339  * Forcibly shutdown the socket causing all listeners to error
340  */
341 static void sock_shutdown(struct nbd_device *nbd)
342 {
343 	struct nbd_config *config = nbd->config;
344 	int i;
345 
346 	if (config->num_connections == 0)
347 		return;
348 	if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
349 		return;
350 
351 	for (i = 0; i < config->num_connections; i++) {
352 		struct nbd_sock *nsock = config->socks[i];
353 		mutex_lock(&nsock->tx_lock);
354 		nbd_mark_nsock_dead(nbd, nsock, 0);
355 		mutex_unlock(&nsock->tx_lock);
356 	}
357 	dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
358 }
359 
360 static u32 req_to_nbd_cmd_type(struct request *req)
361 {
362 	switch (req_op(req)) {
363 	case REQ_OP_DISCARD:
364 		return NBD_CMD_TRIM;
365 	case REQ_OP_FLUSH:
366 		return NBD_CMD_FLUSH;
367 	case REQ_OP_WRITE:
368 		return NBD_CMD_WRITE;
369 	case REQ_OP_READ:
370 		return NBD_CMD_READ;
371 	default:
372 		return U32_MAX;
373 	}
374 }
375 
376 static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
377 						 bool reserved)
378 {
379 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
380 	struct nbd_device *nbd = cmd->nbd;
381 	struct nbd_config *config;
382 
383 	if (!mutex_trylock(&cmd->lock))
384 		return BLK_EH_RESET_TIMER;
385 
386 	if (!refcount_inc_not_zero(&nbd->config_refs)) {
387 		cmd->status = BLK_STS_TIMEOUT;
388 		mutex_unlock(&cmd->lock);
389 		goto done;
390 	}
391 	config = nbd->config;
392 
393 	if (config->num_connections > 1 ||
394 	    (config->num_connections == 1 && nbd->tag_set.timeout)) {
395 		dev_err_ratelimited(nbd_to_dev(nbd),
396 				    "Connection timed out, retrying (%d/%d alive)\n",
397 				    atomic_read(&config->live_connections),
398 				    config->num_connections);
399 		/*
400 		 * Hooray we have more connections, requeue this IO, the submit
401 		 * path will put it on a real connection. Or if only one
402 		 * connection is configured, the submit path will wait util
403 		 * a new connection is reconfigured or util dead timeout.
404 		 */
405 		if (config->socks) {
406 			if (cmd->index < config->num_connections) {
407 				struct nbd_sock *nsock =
408 					config->socks[cmd->index];
409 				mutex_lock(&nsock->tx_lock);
410 				/* We can have multiple outstanding requests, so
411 				 * we don't want to mark the nsock dead if we've
412 				 * already reconnected with a new socket, so
413 				 * only mark it dead if its the same socket we
414 				 * were sent out on.
415 				 */
416 				if (cmd->cookie == nsock->cookie)
417 					nbd_mark_nsock_dead(nbd, nsock, 1);
418 				mutex_unlock(&nsock->tx_lock);
419 			}
420 			mutex_unlock(&cmd->lock);
421 			nbd_requeue_cmd(cmd);
422 			nbd_config_put(nbd);
423 			return BLK_EH_DONE;
424 		}
425 	}
426 
427 	if (!nbd->tag_set.timeout) {
428 		/*
429 		 * Userspace sets timeout=0 to disable socket disconnection,
430 		 * so just warn and reset the timer.
431 		 */
432 		struct nbd_sock *nsock = config->socks[cmd->index];
433 		cmd->retries++;
434 		dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
435 			req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
436 			(unsigned long long)blk_rq_pos(req) << 9,
437 			blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
438 
439 		mutex_lock(&nsock->tx_lock);
440 		if (cmd->cookie != nsock->cookie) {
441 			nbd_requeue_cmd(cmd);
442 			mutex_unlock(&nsock->tx_lock);
443 			mutex_unlock(&cmd->lock);
444 			nbd_config_put(nbd);
445 			return BLK_EH_DONE;
446 		}
447 		mutex_unlock(&nsock->tx_lock);
448 		mutex_unlock(&cmd->lock);
449 		nbd_config_put(nbd);
450 		return BLK_EH_RESET_TIMER;
451 	}
452 
453 	dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
454 	set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
455 	cmd->status = BLK_STS_IOERR;
456 	mutex_unlock(&cmd->lock);
457 	sock_shutdown(nbd);
458 	nbd_config_put(nbd);
459 done:
460 	blk_mq_complete_request(req);
461 	return BLK_EH_DONE;
462 }
463 
464 /*
465  *  Send or receive packet.
466  */
467 static int sock_xmit(struct nbd_device *nbd, int index, int send,
468 		     struct iov_iter *iter, int msg_flags, int *sent)
469 {
470 	struct nbd_config *config = nbd->config;
471 	struct socket *sock = config->socks[index]->sock;
472 	int result;
473 	struct msghdr msg;
474 	unsigned int noreclaim_flag;
475 
476 	if (unlikely(!sock)) {
477 		dev_err_ratelimited(disk_to_dev(nbd->disk),
478 			"Attempted %s on closed socket in sock_xmit\n",
479 			(send ? "send" : "recv"));
480 		return -EINVAL;
481 	}
482 
483 	msg.msg_iter = *iter;
484 
485 	noreclaim_flag = memalloc_noreclaim_save();
486 	do {
487 		sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
488 		msg.msg_name = NULL;
489 		msg.msg_namelen = 0;
490 		msg.msg_control = NULL;
491 		msg.msg_controllen = 0;
492 		msg.msg_flags = msg_flags | MSG_NOSIGNAL;
493 
494 		if (send)
495 			result = sock_sendmsg(sock, &msg);
496 		else
497 			result = sock_recvmsg(sock, &msg, msg.msg_flags);
498 
499 		if (result <= 0) {
500 			if (result == 0)
501 				result = -EPIPE; /* short read */
502 			break;
503 		}
504 		if (sent)
505 			*sent += result;
506 	} while (msg_data_left(&msg));
507 
508 	memalloc_noreclaim_restore(noreclaim_flag);
509 
510 	return result;
511 }
512 
513 /*
514  * Different settings for sk->sk_sndtimeo can result in different return values
515  * if there is a signal pending when we enter sendmsg, because reasons?
516  */
517 static inline int was_interrupted(int result)
518 {
519 	return result == -ERESTARTSYS || result == -EINTR;
520 }
521 
522 /* always call with the tx_lock held */
523 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
524 {
525 	struct request *req = blk_mq_rq_from_pdu(cmd);
526 	struct nbd_config *config = nbd->config;
527 	struct nbd_sock *nsock = config->socks[index];
528 	int result;
529 	struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
530 	struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
531 	struct iov_iter from;
532 	unsigned long size = blk_rq_bytes(req);
533 	struct bio *bio;
534 	u64 handle;
535 	u32 type;
536 	u32 nbd_cmd_flags = 0;
537 	int sent = nsock->sent, skip = 0;
538 
539 	iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
540 
541 	type = req_to_nbd_cmd_type(req);
542 	if (type == U32_MAX)
543 		return -EIO;
544 
545 	if (rq_data_dir(req) == WRITE &&
546 	    (config->flags & NBD_FLAG_READ_ONLY)) {
547 		dev_err_ratelimited(disk_to_dev(nbd->disk),
548 				    "Write on read-only\n");
549 		return -EIO;
550 	}
551 
552 	if (req->cmd_flags & REQ_FUA)
553 		nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
554 
555 	/* We did a partial send previously, and we at least sent the whole
556 	 * request struct, so just go and send the rest of the pages in the
557 	 * request.
558 	 */
559 	if (sent) {
560 		if (sent >= sizeof(request)) {
561 			skip = sent - sizeof(request);
562 
563 			/* initialize handle for tracing purposes */
564 			handle = nbd_cmd_handle(cmd);
565 
566 			goto send_pages;
567 		}
568 		iov_iter_advance(&from, sent);
569 	} else {
570 		cmd->cmd_cookie++;
571 	}
572 	cmd->index = index;
573 	cmd->cookie = nsock->cookie;
574 	cmd->retries = 0;
575 	request.type = htonl(type | nbd_cmd_flags);
576 	if (type != NBD_CMD_FLUSH) {
577 		request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
578 		request.len = htonl(size);
579 	}
580 	handle = nbd_cmd_handle(cmd);
581 	memcpy(request.handle, &handle, sizeof(handle));
582 
583 	trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
584 
585 	dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
586 		req, nbdcmd_to_ascii(type),
587 		(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
588 	result = sock_xmit(nbd, index, 1, &from,
589 			(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
590 	trace_nbd_header_sent(req, handle);
591 	if (result <= 0) {
592 		if (was_interrupted(result)) {
593 			/* If we havne't sent anything we can just return BUSY,
594 			 * however if we have sent something we need to make
595 			 * sure we only allow this req to be sent until we are
596 			 * completely done.
597 			 */
598 			if (sent) {
599 				nsock->pending = req;
600 				nsock->sent = sent;
601 			}
602 			set_bit(NBD_CMD_REQUEUED, &cmd->flags);
603 			return BLK_STS_RESOURCE;
604 		}
605 		dev_err_ratelimited(disk_to_dev(nbd->disk),
606 			"Send control failed (result %d)\n", result);
607 		return -EAGAIN;
608 	}
609 send_pages:
610 	if (type != NBD_CMD_WRITE)
611 		goto out;
612 
613 	bio = req->bio;
614 	while (bio) {
615 		struct bio *next = bio->bi_next;
616 		struct bvec_iter iter;
617 		struct bio_vec bvec;
618 
619 		bio_for_each_segment(bvec, bio, iter) {
620 			bool is_last = !next && bio_iter_last(bvec, iter);
621 			int flags = is_last ? 0 : MSG_MORE;
622 
623 			dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
624 				req, bvec.bv_len);
625 			iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
626 			if (skip) {
627 				if (skip >= iov_iter_count(&from)) {
628 					skip -= iov_iter_count(&from);
629 					continue;
630 				}
631 				iov_iter_advance(&from, skip);
632 				skip = 0;
633 			}
634 			result = sock_xmit(nbd, index, 1, &from, flags, &sent);
635 			if (result <= 0) {
636 				if (was_interrupted(result)) {
637 					/* We've already sent the header, we
638 					 * have no choice but to set pending and
639 					 * return BUSY.
640 					 */
641 					nsock->pending = req;
642 					nsock->sent = sent;
643 					set_bit(NBD_CMD_REQUEUED, &cmd->flags);
644 					return BLK_STS_RESOURCE;
645 				}
646 				dev_err(disk_to_dev(nbd->disk),
647 					"Send data failed (result %d)\n",
648 					result);
649 				return -EAGAIN;
650 			}
651 			/*
652 			 * The completion might already have come in,
653 			 * so break for the last one instead of letting
654 			 * the iterator do it. This prevents use-after-free
655 			 * of the bio.
656 			 */
657 			if (is_last)
658 				break;
659 		}
660 		bio = next;
661 	}
662 out:
663 	trace_nbd_payload_sent(req, handle);
664 	nsock->pending = NULL;
665 	nsock->sent = 0;
666 	return 0;
667 }
668 
669 /* NULL returned = something went wrong, inform userspace */
670 static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
671 {
672 	struct nbd_config *config = nbd->config;
673 	int result;
674 	struct nbd_reply reply;
675 	struct nbd_cmd *cmd;
676 	struct request *req = NULL;
677 	u64 handle;
678 	u16 hwq;
679 	u32 tag;
680 	struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
681 	struct iov_iter to;
682 	int ret = 0;
683 
684 	reply.magic = 0;
685 	iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
686 	result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
687 	if (result <= 0) {
688 		if (!nbd_disconnected(config))
689 			dev_err(disk_to_dev(nbd->disk),
690 				"Receive control failed (result %d)\n", result);
691 		return ERR_PTR(result);
692 	}
693 
694 	if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
695 		dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
696 				(unsigned long)ntohl(reply.magic));
697 		return ERR_PTR(-EPROTO);
698 	}
699 
700 	memcpy(&handle, reply.handle, sizeof(handle));
701 	tag = nbd_handle_to_tag(handle);
702 	hwq = blk_mq_unique_tag_to_hwq(tag);
703 	if (hwq < nbd->tag_set.nr_hw_queues)
704 		req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
705 				       blk_mq_unique_tag_to_tag(tag));
706 	if (!req || !blk_mq_request_started(req)) {
707 		dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
708 			tag, req);
709 		return ERR_PTR(-ENOENT);
710 	}
711 	trace_nbd_header_received(req, handle);
712 	cmd = blk_mq_rq_to_pdu(req);
713 
714 	mutex_lock(&cmd->lock);
715 	if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
716 		dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
717 			req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
718 		ret = -ENOENT;
719 		goto out;
720 	}
721 	if (cmd->status != BLK_STS_OK) {
722 		dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
723 			req);
724 		ret = -ENOENT;
725 		goto out;
726 	}
727 	if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
728 		dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
729 			req);
730 		ret = -ENOENT;
731 		goto out;
732 	}
733 	if (ntohl(reply.error)) {
734 		dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
735 			ntohl(reply.error));
736 		cmd->status = BLK_STS_IOERR;
737 		goto out;
738 	}
739 
740 	dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
741 	if (rq_data_dir(req) != WRITE) {
742 		struct req_iterator iter;
743 		struct bio_vec bvec;
744 
745 		rq_for_each_segment(bvec, req, iter) {
746 			iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
747 			result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
748 			if (result <= 0) {
749 				dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
750 					result);
751 				/*
752 				 * If we've disconnected, we need to make sure we
753 				 * complete this request, otherwise error out
754 				 * and let the timeout stuff handle resubmitting
755 				 * this request onto another connection.
756 				 */
757 				if (nbd_disconnected(config)) {
758 					cmd->status = BLK_STS_IOERR;
759 					goto out;
760 				}
761 				ret = -EIO;
762 				goto out;
763 			}
764 			dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
765 				req, bvec.bv_len);
766 		}
767 	}
768 out:
769 	trace_nbd_payload_received(req, handle);
770 	mutex_unlock(&cmd->lock);
771 	return ret ? ERR_PTR(ret) : cmd;
772 }
773 
774 static void recv_work(struct work_struct *work)
775 {
776 	struct recv_thread_args *args = container_of(work,
777 						     struct recv_thread_args,
778 						     work);
779 	struct nbd_device *nbd = args->nbd;
780 	struct nbd_config *config = nbd->config;
781 	struct nbd_cmd *cmd;
782 	struct request *rq;
783 
784 	while (1) {
785 		cmd = nbd_read_stat(nbd, args->index);
786 		if (IS_ERR(cmd)) {
787 			struct nbd_sock *nsock = config->socks[args->index];
788 
789 			mutex_lock(&nsock->tx_lock);
790 			nbd_mark_nsock_dead(nbd, nsock, 1);
791 			mutex_unlock(&nsock->tx_lock);
792 			break;
793 		}
794 
795 		rq = blk_mq_rq_from_pdu(cmd);
796 		if (likely(!blk_should_fake_timeout(rq->q)))
797 			blk_mq_complete_request(rq);
798 	}
799 	nbd_config_put(nbd);
800 	atomic_dec(&config->recv_threads);
801 	wake_up(&config->recv_wq);
802 	kfree(args);
803 }
804 
805 static bool nbd_clear_req(struct request *req, void *data, bool reserved)
806 {
807 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
808 
809 	mutex_lock(&cmd->lock);
810 	cmd->status = BLK_STS_IOERR;
811 	mutex_unlock(&cmd->lock);
812 
813 	blk_mq_complete_request(req);
814 	return true;
815 }
816 
817 static void nbd_clear_que(struct nbd_device *nbd)
818 {
819 	blk_mq_quiesce_queue(nbd->disk->queue);
820 	blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
821 	blk_mq_unquiesce_queue(nbd->disk->queue);
822 	dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
823 }
824 
825 static int find_fallback(struct nbd_device *nbd, int index)
826 {
827 	struct nbd_config *config = nbd->config;
828 	int new_index = -1;
829 	struct nbd_sock *nsock = config->socks[index];
830 	int fallback = nsock->fallback_index;
831 
832 	if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
833 		return new_index;
834 
835 	if (config->num_connections <= 1) {
836 		dev_err_ratelimited(disk_to_dev(nbd->disk),
837 				    "Dead connection, failed to find a fallback\n");
838 		return new_index;
839 	}
840 
841 	if (fallback >= 0 && fallback < config->num_connections &&
842 	    !config->socks[fallback]->dead)
843 		return fallback;
844 
845 	if (nsock->fallback_index < 0 ||
846 	    nsock->fallback_index >= config->num_connections ||
847 	    config->socks[nsock->fallback_index]->dead) {
848 		int i;
849 		for (i = 0; i < config->num_connections; i++) {
850 			if (i == index)
851 				continue;
852 			if (!config->socks[i]->dead) {
853 				new_index = i;
854 				break;
855 			}
856 		}
857 		nsock->fallback_index = new_index;
858 		if (new_index < 0) {
859 			dev_err_ratelimited(disk_to_dev(nbd->disk),
860 					    "Dead connection, failed to find a fallback\n");
861 			return new_index;
862 		}
863 	}
864 	new_index = nsock->fallback_index;
865 	return new_index;
866 }
867 
868 static int wait_for_reconnect(struct nbd_device *nbd)
869 {
870 	struct nbd_config *config = nbd->config;
871 	if (!config->dead_conn_timeout)
872 		return 0;
873 	if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
874 		return 0;
875 	return wait_event_timeout(config->conn_wait,
876 				  atomic_read(&config->live_connections) > 0,
877 				  config->dead_conn_timeout) > 0;
878 }
879 
880 static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
881 {
882 	struct request *req = blk_mq_rq_from_pdu(cmd);
883 	struct nbd_device *nbd = cmd->nbd;
884 	struct nbd_config *config;
885 	struct nbd_sock *nsock;
886 	int ret;
887 
888 	if (!refcount_inc_not_zero(&nbd->config_refs)) {
889 		dev_err_ratelimited(disk_to_dev(nbd->disk),
890 				    "Socks array is empty\n");
891 		blk_mq_start_request(req);
892 		return -EINVAL;
893 	}
894 	config = nbd->config;
895 
896 	if (index >= config->num_connections) {
897 		dev_err_ratelimited(disk_to_dev(nbd->disk),
898 				    "Attempted send on invalid socket\n");
899 		nbd_config_put(nbd);
900 		blk_mq_start_request(req);
901 		return -EINVAL;
902 	}
903 	cmd->status = BLK_STS_OK;
904 again:
905 	nsock = config->socks[index];
906 	mutex_lock(&nsock->tx_lock);
907 	if (nsock->dead) {
908 		int old_index = index;
909 		index = find_fallback(nbd, index);
910 		mutex_unlock(&nsock->tx_lock);
911 		if (index < 0) {
912 			if (wait_for_reconnect(nbd)) {
913 				index = old_index;
914 				goto again;
915 			}
916 			/* All the sockets should already be down at this point,
917 			 * we just want to make sure that DISCONNECTED is set so
918 			 * any requests that come in that were queue'ed waiting
919 			 * for the reconnect timer don't trigger the timer again
920 			 * and instead just error out.
921 			 */
922 			sock_shutdown(nbd);
923 			nbd_config_put(nbd);
924 			blk_mq_start_request(req);
925 			return -EIO;
926 		}
927 		goto again;
928 	}
929 
930 	/* Handle the case that we have a pending request that was partially
931 	 * transmitted that _has_ to be serviced first.  We need to call requeue
932 	 * here so that it gets put _after_ the request that is already on the
933 	 * dispatch list.
934 	 */
935 	blk_mq_start_request(req);
936 	if (unlikely(nsock->pending && nsock->pending != req)) {
937 		nbd_requeue_cmd(cmd);
938 		ret = 0;
939 		goto out;
940 	}
941 	/*
942 	 * Some failures are related to the link going down, so anything that
943 	 * returns EAGAIN can be retried on a different socket.
944 	 */
945 	ret = nbd_send_cmd(nbd, cmd, index);
946 	if (ret == -EAGAIN) {
947 		dev_err_ratelimited(disk_to_dev(nbd->disk),
948 				    "Request send failed, requeueing\n");
949 		nbd_mark_nsock_dead(nbd, nsock, 1);
950 		nbd_requeue_cmd(cmd);
951 		ret = 0;
952 	}
953 out:
954 	mutex_unlock(&nsock->tx_lock);
955 	nbd_config_put(nbd);
956 	return ret;
957 }
958 
959 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
960 			const struct blk_mq_queue_data *bd)
961 {
962 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
963 	int ret;
964 
965 	/*
966 	 * Since we look at the bio's to send the request over the network we
967 	 * need to make sure the completion work doesn't mark this request done
968 	 * before we are done doing our send.  This keeps us from dereferencing
969 	 * freed data if we have particularly fast completions (ie we get the
970 	 * completion before we exit sock_xmit on the last bvec) or in the case
971 	 * that the server is misbehaving (or there was an error) before we're
972 	 * done sending everything over the wire.
973 	 */
974 	mutex_lock(&cmd->lock);
975 	clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
976 
977 	/* We can be called directly from the user space process, which means we
978 	 * could possibly have signals pending so our sendmsg will fail.  In
979 	 * this case we need to return that we are busy, otherwise error out as
980 	 * appropriate.
981 	 */
982 	ret = nbd_handle_cmd(cmd, hctx->queue_num);
983 	if (ret < 0)
984 		ret = BLK_STS_IOERR;
985 	else if (!ret)
986 		ret = BLK_STS_OK;
987 	mutex_unlock(&cmd->lock);
988 
989 	return ret;
990 }
991 
992 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
993 				     int *err)
994 {
995 	struct socket *sock;
996 
997 	*err = 0;
998 	sock = sockfd_lookup(fd, err);
999 	if (!sock)
1000 		return NULL;
1001 
1002 	if (sock->ops->shutdown == sock_no_shutdown) {
1003 		dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1004 		*err = -EINVAL;
1005 		sockfd_put(sock);
1006 		return NULL;
1007 	}
1008 
1009 	return sock;
1010 }
1011 
1012 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1013 			  bool netlink)
1014 {
1015 	struct nbd_config *config = nbd->config;
1016 	struct socket *sock;
1017 	struct nbd_sock **socks;
1018 	struct nbd_sock *nsock;
1019 	int err;
1020 
1021 	sock = nbd_get_socket(nbd, arg, &err);
1022 	if (!sock)
1023 		return err;
1024 
1025 	if (!netlink && !nbd->task_setup &&
1026 	    !test_bit(NBD_RT_BOUND, &config->runtime_flags))
1027 		nbd->task_setup = current;
1028 
1029 	if (!netlink &&
1030 	    (nbd->task_setup != current ||
1031 	     test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1032 		dev_err(disk_to_dev(nbd->disk),
1033 			"Device being setup by another task");
1034 		err = -EBUSY;
1035 		goto put_socket;
1036 	}
1037 
1038 	nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1039 	if (!nsock) {
1040 		err = -ENOMEM;
1041 		goto put_socket;
1042 	}
1043 
1044 	socks = krealloc(config->socks, (config->num_connections + 1) *
1045 			 sizeof(struct nbd_sock *), GFP_KERNEL);
1046 	if (!socks) {
1047 		kfree(nsock);
1048 		err = -ENOMEM;
1049 		goto put_socket;
1050 	}
1051 
1052 	config->socks = socks;
1053 
1054 	nsock->fallback_index = -1;
1055 	nsock->dead = false;
1056 	mutex_init(&nsock->tx_lock);
1057 	nsock->sock = sock;
1058 	nsock->pending = NULL;
1059 	nsock->sent = 0;
1060 	nsock->cookie = 0;
1061 	socks[config->num_connections++] = nsock;
1062 	atomic_inc(&config->live_connections);
1063 
1064 	return 0;
1065 
1066 put_socket:
1067 	sockfd_put(sock);
1068 	return err;
1069 }
1070 
1071 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1072 {
1073 	struct nbd_config *config = nbd->config;
1074 	struct socket *sock, *old;
1075 	struct recv_thread_args *args;
1076 	int i;
1077 	int err;
1078 
1079 	sock = nbd_get_socket(nbd, arg, &err);
1080 	if (!sock)
1081 		return err;
1082 
1083 	args = kzalloc(sizeof(*args), GFP_KERNEL);
1084 	if (!args) {
1085 		sockfd_put(sock);
1086 		return -ENOMEM;
1087 	}
1088 
1089 	for (i = 0; i < config->num_connections; i++) {
1090 		struct nbd_sock *nsock = config->socks[i];
1091 
1092 		if (!nsock->dead)
1093 			continue;
1094 
1095 		mutex_lock(&nsock->tx_lock);
1096 		if (!nsock->dead) {
1097 			mutex_unlock(&nsock->tx_lock);
1098 			continue;
1099 		}
1100 		sk_set_memalloc(sock->sk);
1101 		if (nbd->tag_set.timeout)
1102 			sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1103 		atomic_inc(&config->recv_threads);
1104 		refcount_inc(&nbd->config_refs);
1105 		old = nsock->sock;
1106 		nsock->fallback_index = -1;
1107 		nsock->sock = sock;
1108 		nsock->dead = false;
1109 		INIT_WORK(&args->work, recv_work);
1110 		args->index = i;
1111 		args->nbd = nbd;
1112 		nsock->cookie++;
1113 		mutex_unlock(&nsock->tx_lock);
1114 		sockfd_put(old);
1115 
1116 		clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1117 
1118 		/* We take the tx_mutex in an error path in the recv_work, so we
1119 		 * need to queue_work outside of the tx_mutex.
1120 		 */
1121 		queue_work(nbd->recv_workq, &args->work);
1122 
1123 		atomic_inc(&config->live_connections);
1124 		wake_up(&config->conn_wait);
1125 		return 0;
1126 	}
1127 	sockfd_put(sock);
1128 	kfree(args);
1129 	return -ENOSPC;
1130 }
1131 
1132 static void nbd_bdev_reset(struct block_device *bdev)
1133 {
1134 	if (bdev->bd_openers > 1)
1135 		return;
1136 	set_capacity(bdev->bd_disk, 0);
1137 }
1138 
1139 static void nbd_parse_flags(struct nbd_device *nbd)
1140 {
1141 	struct nbd_config *config = nbd->config;
1142 	if (config->flags & NBD_FLAG_READ_ONLY)
1143 		set_disk_ro(nbd->disk, true);
1144 	else
1145 		set_disk_ro(nbd->disk, false);
1146 	if (config->flags & NBD_FLAG_SEND_TRIM)
1147 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1148 	if (config->flags & NBD_FLAG_SEND_FLUSH) {
1149 		if (config->flags & NBD_FLAG_SEND_FUA)
1150 			blk_queue_write_cache(nbd->disk->queue, true, true);
1151 		else
1152 			blk_queue_write_cache(nbd->disk->queue, true, false);
1153 	}
1154 	else
1155 		blk_queue_write_cache(nbd->disk->queue, false, false);
1156 }
1157 
1158 static void send_disconnects(struct nbd_device *nbd)
1159 {
1160 	struct nbd_config *config = nbd->config;
1161 	struct nbd_request request = {
1162 		.magic = htonl(NBD_REQUEST_MAGIC),
1163 		.type = htonl(NBD_CMD_DISC),
1164 	};
1165 	struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1166 	struct iov_iter from;
1167 	int i, ret;
1168 
1169 	for (i = 0; i < config->num_connections; i++) {
1170 		struct nbd_sock *nsock = config->socks[i];
1171 
1172 		iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
1173 		mutex_lock(&nsock->tx_lock);
1174 		ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1175 		if (ret <= 0)
1176 			dev_err(disk_to_dev(nbd->disk),
1177 				"Send disconnect failed %d\n", ret);
1178 		mutex_unlock(&nsock->tx_lock);
1179 	}
1180 }
1181 
1182 static int nbd_disconnect(struct nbd_device *nbd)
1183 {
1184 	struct nbd_config *config = nbd->config;
1185 
1186 	dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1187 	set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1188 	set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1189 	send_disconnects(nbd);
1190 	return 0;
1191 }
1192 
1193 static void nbd_clear_sock(struct nbd_device *nbd)
1194 {
1195 	sock_shutdown(nbd);
1196 	nbd_clear_que(nbd);
1197 	nbd->task_setup = NULL;
1198 }
1199 
1200 static void nbd_config_put(struct nbd_device *nbd)
1201 {
1202 	if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1203 					&nbd->config_lock)) {
1204 		struct nbd_config *config = nbd->config;
1205 		nbd_dev_dbg_close(nbd);
1206 		nbd_size_clear(nbd);
1207 		if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1208 				       &config->runtime_flags))
1209 			device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1210 		nbd->task_recv = NULL;
1211 		nbd_clear_sock(nbd);
1212 		if (config->num_connections) {
1213 			int i;
1214 			for (i = 0; i < config->num_connections; i++) {
1215 				sockfd_put(config->socks[i]->sock);
1216 				kfree(config->socks[i]);
1217 			}
1218 			kfree(config->socks);
1219 		}
1220 		kfree(nbd->config);
1221 		nbd->config = NULL;
1222 
1223 		if (nbd->recv_workq)
1224 			destroy_workqueue(nbd->recv_workq);
1225 		nbd->recv_workq = NULL;
1226 
1227 		nbd->tag_set.timeout = 0;
1228 		nbd->disk->queue->limits.discard_granularity = 0;
1229 		nbd->disk->queue->limits.discard_alignment = 0;
1230 		blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
1231 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1232 
1233 		mutex_unlock(&nbd->config_lock);
1234 		nbd_put(nbd);
1235 		module_put(THIS_MODULE);
1236 	}
1237 }
1238 
1239 static int nbd_start_device(struct nbd_device *nbd)
1240 {
1241 	struct nbd_config *config = nbd->config;
1242 	int num_connections = config->num_connections;
1243 	int error = 0, i;
1244 
1245 	if (nbd->task_recv)
1246 		return -EBUSY;
1247 	if (!config->socks)
1248 		return -EINVAL;
1249 	if (num_connections > 1 &&
1250 	    !(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1251 		dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1252 		return -EINVAL;
1253 	}
1254 
1255 	nbd->recv_workq = alloc_workqueue("knbd%d-recv",
1256 					  WQ_MEM_RECLAIM | WQ_HIGHPRI |
1257 					  WQ_UNBOUND, 0, nbd->index);
1258 	if (!nbd->recv_workq) {
1259 		dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1260 		return -ENOMEM;
1261 	}
1262 
1263 	blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1264 	nbd->task_recv = current;
1265 
1266 	nbd_parse_flags(nbd);
1267 
1268 	error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1269 	if (error) {
1270 		dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1271 		return error;
1272 	}
1273 	set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1274 
1275 	nbd_dev_dbg_init(nbd);
1276 	for (i = 0; i < num_connections; i++) {
1277 		struct recv_thread_args *args;
1278 
1279 		args = kzalloc(sizeof(*args), GFP_KERNEL);
1280 		if (!args) {
1281 			sock_shutdown(nbd);
1282 			/*
1283 			 * If num_connections is m (2 < m),
1284 			 * and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1285 			 * But NO.(n + 1) failed. We still have n recv threads.
1286 			 * So, add flush_workqueue here to prevent recv threads
1287 			 * dropping the last config_refs and trying to destroy
1288 			 * the workqueue from inside the workqueue.
1289 			 */
1290 			if (i)
1291 				flush_workqueue(nbd->recv_workq);
1292 			return -ENOMEM;
1293 		}
1294 		sk_set_memalloc(config->socks[i]->sock->sk);
1295 		if (nbd->tag_set.timeout)
1296 			config->socks[i]->sock->sk->sk_sndtimeo =
1297 				nbd->tag_set.timeout;
1298 		atomic_inc(&config->recv_threads);
1299 		refcount_inc(&nbd->config_refs);
1300 		INIT_WORK(&args->work, recv_work);
1301 		args->nbd = nbd;
1302 		args->index = i;
1303 		queue_work(nbd->recv_workq, &args->work);
1304 	}
1305 	return nbd_set_size(nbd, config->bytesize, config->blksize);
1306 }
1307 
1308 static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
1309 {
1310 	struct nbd_config *config = nbd->config;
1311 	int ret;
1312 
1313 	ret = nbd_start_device(nbd);
1314 	if (ret)
1315 		return ret;
1316 
1317 	if (max_part)
1318 		set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1319 	mutex_unlock(&nbd->config_lock);
1320 	ret = wait_event_interruptible(config->recv_wq,
1321 					 atomic_read(&config->recv_threads) == 0);
1322 	if (ret)
1323 		sock_shutdown(nbd);
1324 	flush_workqueue(nbd->recv_workq);
1325 
1326 	mutex_lock(&nbd->config_lock);
1327 	nbd_bdev_reset(bdev);
1328 	/* user requested, ignore socket errors */
1329 	if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1330 		ret = 0;
1331 	if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1332 		ret = -ETIMEDOUT;
1333 	return ret;
1334 }
1335 
1336 static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
1337 				 struct block_device *bdev)
1338 {
1339 	sock_shutdown(nbd);
1340 	__invalidate_device(bdev, true);
1341 	nbd_bdev_reset(bdev);
1342 	if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1343 			       &nbd->config->runtime_flags))
1344 		nbd_config_put(nbd);
1345 }
1346 
1347 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1348 {
1349 	nbd->tag_set.timeout = timeout * HZ;
1350 	if (timeout)
1351 		blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1352 	else
1353 		blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1354 }
1355 
1356 /* Must be called with config_lock held */
1357 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1358 		       unsigned int cmd, unsigned long arg)
1359 {
1360 	struct nbd_config *config = nbd->config;
1361 
1362 	switch (cmd) {
1363 	case NBD_DISCONNECT:
1364 		return nbd_disconnect(nbd);
1365 	case NBD_CLEAR_SOCK:
1366 		nbd_clear_sock_ioctl(nbd, bdev);
1367 		return 0;
1368 	case NBD_SET_SOCK:
1369 		return nbd_add_socket(nbd, arg, false);
1370 	case NBD_SET_BLKSIZE:
1371 		return nbd_set_size(nbd, config->bytesize, arg);
1372 	case NBD_SET_SIZE:
1373 		return nbd_set_size(nbd, arg, config->blksize);
1374 	case NBD_SET_SIZE_BLOCKS:
1375 		return nbd_set_size(nbd, arg * config->blksize,
1376 				    config->blksize);
1377 	case NBD_SET_TIMEOUT:
1378 		nbd_set_cmd_timeout(nbd, arg);
1379 		return 0;
1380 
1381 	case NBD_SET_FLAGS:
1382 		config->flags = arg;
1383 		return 0;
1384 	case NBD_DO_IT:
1385 		return nbd_start_device_ioctl(nbd, bdev);
1386 	case NBD_CLEAR_QUE:
1387 		/*
1388 		 * This is for compatibility only.  The queue is always cleared
1389 		 * by NBD_DO_IT or NBD_CLEAR_SOCK.
1390 		 */
1391 		return 0;
1392 	case NBD_PRINT_DEBUG:
1393 		/*
1394 		 * For compatibility only, we no longer keep a list of
1395 		 * outstanding requests.
1396 		 */
1397 		return 0;
1398 	}
1399 	return -ENOTTY;
1400 }
1401 
1402 static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1403 		     unsigned int cmd, unsigned long arg)
1404 {
1405 	struct nbd_device *nbd = bdev->bd_disk->private_data;
1406 	struct nbd_config *config = nbd->config;
1407 	int error = -EINVAL;
1408 
1409 	if (!capable(CAP_SYS_ADMIN))
1410 		return -EPERM;
1411 
1412 	/* The block layer will pass back some non-nbd ioctls in case we have
1413 	 * special handling for them, but we don't so just return an error.
1414 	 */
1415 	if (_IOC_TYPE(cmd) != 0xab)
1416 		return -EINVAL;
1417 
1418 	mutex_lock(&nbd->config_lock);
1419 
1420 	/* Don't allow ioctl operations on a nbd device that was created with
1421 	 * netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1422 	 */
1423 	if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1424 	    (cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1425 		error = __nbd_ioctl(bdev, nbd, cmd, arg);
1426 	else
1427 		dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1428 	mutex_unlock(&nbd->config_lock);
1429 	return error;
1430 }
1431 
1432 static struct nbd_config *nbd_alloc_config(void)
1433 {
1434 	struct nbd_config *config;
1435 
1436 	config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1437 	if (!config)
1438 		return NULL;
1439 	atomic_set(&config->recv_threads, 0);
1440 	init_waitqueue_head(&config->recv_wq);
1441 	init_waitqueue_head(&config->conn_wait);
1442 	config->blksize = NBD_DEF_BLKSIZE;
1443 	atomic_set(&config->live_connections, 0);
1444 	try_module_get(THIS_MODULE);
1445 	return config;
1446 }
1447 
1448 static int nbd_open(struct block_device *bdev, fmode_t mode)
1449 {
1450 	struct nbd_device *nbd;
1451 	int ret = 0;
1452 
1453 	mutex_lock(&nbd_index_mutex);
1454 	nbd = bdev->bd_disk->private_data;
1455 	if (!nbd) {
1456 		ret = -ENXIO;
1457 		goto out;
1458 	}
1459 	if (!refcount_inc_not_zero(&nbd->refs)) {
1460 		ret = -ENXIO;
1461 		goto out;
1462 	}
1463 	if (!refcount_inc_not_zero(&nbd->config_refs)) {
1464 		struct nbd_config *config;
1465 
1466 		mutex_lock(&nbd->config_lock);
1467 		if (refcount_inc_not_zero(&nbd->config_refs)) {
1468 			mutex_unlock(&nbd->config_lock);
1469 			goto out;
1470 		}
1471 		config = nbd->config = nbd_alloc_config();
1472 		if (!config) {
1473 			ret = -ENOMEM;
1474 			mutex_unlock(&nbd->config_lock);
1475 			goto out;
1476 		}
1477 		refcount_set(&nbd->config_refs, 1);
1478 		refcount_inc(&nbd->refs);
1479 		mutex_unlock(&nbd->config_lock);
1480 		if (max_part)
1481 			set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1482 	} else if (nbd_disconnected(nbd->config)) {
1483 		if (max_part)
1484 			set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1485 	}
1486 out:
1487 	mutex_unlock(&nbd_index_mutex);
1488 	return ret;
1489 }
1490 
1491 static void nbd_release(struct gendisk *disk, fmode_t mode)
1492 {
1493 	struct nbd_device *nbd = disk->private_data;
1494 
1495 	if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1496 			disk->part0->bd_openers == 0)
1497 		nbd_disconnect_and_put(nbd);
1498 
1499 	nbd_config_put(nbd);
1500 	nbd_put(nbd);
1501 }
1502 
1503 static const struct block_device_operations nbd_fops =
1504 {
1505 	.owner =	THIS_MODULE,
1506 	.open =		nbd_open,
1507 	.release =	nbd_release,
1508 	.ioctl =	nbd_ioctl,
1509 	.compat_ioctl =	nbd_ioctl,
1510 };
1511 
1512 #if IS_ENABLED(CONFIG_DEBUG_FS)
1513 
1514 static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1515 {
1516 	struct nbd_device *nbd = s->private;
1517 
1518 	if (nbd->task_recv)
1519 		seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
1520 
1521 	return 0;
1522 }
1523 
1524 static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
1525 {
1526 	return single_open(file, nbd_dbg_tasks_show, inode->i_private);
1527 }
1528 
1529 static const struct file_operations nbd_dbg_tasks_ops = {
1530 	.open = nbd_dbg_tasks_open,
1531 	.read = seq_read,
1532 	.llseek = seq_lseek,
1533 	.release = single_release,
1534 };
1535 
1536 static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1537 {
1538 	struct nbd_device *nbd = s->private;
1539 	u32 flags = nbd->config->flags;
1540 
1541 	seq_printf(s, "Hex: 0x%08x\n\n", flags);
1542 
1543 	seq_puts(s, "Known flags:\n");
1544 
1545 	if (flags & NBD_FLAG_HAS_FLAGS)
1546 		seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1547 	if (flags & NBD_FLAG_READ_ONLY)
1548 		seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1549 	if (flags & NBD_FLAG_SEND_FLUSH)
1550 		seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1551 	if (flags & NBD_FLAG_SEND_FUA)
1552 		seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1553 	if (flags & NBD_FLAG_SEND_TRIM)
1554 		seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1555 
1556 	return 0;
1557 }
1558 
1559 static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
1560 {
1561 	return single_open(file, nbd_dbg_flags_show, inode->i_private);
1562 }
1563 
1564 static const struct file_operations nbd_dbg_flags_ops = {
1565 	.open = nbd_dbg_flags_open,
1566 	.read = seq_read,
1567 	.llseek = seq_lseek,
1568 	.release = single_release,
1569 };
1570 
1571 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1572 {
1573 	struct dentry *dir;
1574 	struct nbd_config *config = nbd->config;
1575 
1576 	if (!nbd_dbg_dir)
1577 		return -EIO;
1578 
1579 	dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1580 	if (!dir) {
1581 		dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1582 			nbd_name(nbd));
1583 		return -EIO;
1584 	}
1585 	config->dbg_dir = dir;
1586 
1587 	debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
1588 	debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1589 	debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1590 	debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
1591 	debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops);
1592 
1593 	return 0;
1594 }
1595 
1596 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1597 {
1598 	debugfs_remove_recursive(nbd->config->dbg_dir);
1599 }
1600 
1601 static int nbd_dbg_init(void)
1602 {
1603 	struct dentry *dbg_dir;
1604 
1605 	dbg_dir = debugfs_create_dir("nbd", NULL);
1606 	if (!dbg_dir)
1607 		return -EIO;
1608 
1609 	nbd_dbg_dir = dbg_dir;
1610 
1611 	return 0;
1612 }
1613 
1614 static void nbd_dbg_close(void)
1615 {
1616 	debugfs_remove_recursive(nbd_dbg_dir);
1617 }
1618 
1619 #else  /* IS_ENABLED(CONFIG_DEBUG_FS) */
1620 
1621 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1622 {
1623 	return 0;
1624 }
1625 
1626 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1627 {
1628 }
1629 
1630 static int nbd_dbg_init(void)
1631 {
1632 	return 0;
1633 }
1634 
1635 static void nbd_dbg_close(void)
1636 {
1637 }
1638 
1639 #endif
1640 
1641 static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1642 			    unsigned int hctx_idx, unsigned int numa_node)
1643 {
1644 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1645 	cmd->nbd = set->driver_data;
1646 	cmd->flags = 0;
1647 	mutex_init(&cmd->lock);
1648 	return 0;
1649 }
1650 
1651 static const struct blk_mq_ops nbd_mq_ops = {
1652 	.queue_rq	= nbd_queue_rq,
1653 	.complete	= nbd_complete_rq,
1654 	.init_request	= nbd_init_request,
1655 	.timeout	= nbd_xmit_timeout,
1656 };
1657 
1658 static int nbd_dev_add(int index)
1659 {
1660 	struct nbd_device *nbd;
1661 	struct gendisk *disk;
1662 	struct request_queue *q;
1663 	int err = -ENOMEM;
1664 
1665 	nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1666 	if (!nbd)
1667 		goto out;
1668 
1669 	disk = alloc_disk(1 << part_shift);
1670 	if (!disk)
1671 		goto out_free_nbd;
1672 
1673 	if (index >= 0) {
1674 		err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1675 				GFP_KERNEL);
1676 		if (err == -ENOSPC)
1677 			err = -EEXIST;
1678 	} else {
1679 		err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
1680 		if (err >= 0)
1681 			index = err;
1682 	}
1683 	if (err < 0)
1684 		goto out_free_disk;
1685 
1686 	nbd->index = index;
1687 	nbd->disk = disk;
1688 	nbd->tag_set.ops = &nbd_mq_ops;
1689 	nbd->tag_set.nr_hw_queues = 1;
1690 	nbd->tag_set.queue_depth = 128;
1691 	nbd->tag_set.numa_node = NUMA_NO_NODE;
1692 	nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1693 	nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1694 		BLK_MQ_F_BLOCKING;
1695 	nbd->tag_set.driver_data = nbd;
1696 	nbd->destroy_complete = NULL;
1697 
1698 	err = blk_mq_alloc_tag_set(&nbd->tag_set);
1699 	if (err)
1700 		goto out_free_idr;
1701 
1702 	q = blk_mq_init_queue(&nbd->tag_set);
1703 	if (IS_ERR(q)) {
1704 		err = PTR_ERR(q);
1705 		goto out_free_tags;
1706 	}
1707 	disk->queue = q;
1708 
1709 	/*
1710 	 * Tell the block layer that we are not a rotational device
1711 	 */
1712 	blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1713 	blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1714 	disk->queue->limits.discard_granularity = 0;
1715 	disk->queue->limits.discard_alignment = 0;
1716 	blk_queue_max_discard_sectors(disk->queue, 0);
1717 	blk_queue_max_segment_size(disk->queue, UINT_MAX);
1718 	blk_queue_max_segments(disk->queue, USHRT_MAX);
1719 	blk_queue_max_hw_sectors(disk->queue, 65536);
1720 	disk->queue->limits.max_sectors = 256;
1721 
1722 	mutex_init(&nbd->config_lock);
1723 	refcount_set(&nbd->config_refs, 0);
1724 	refcount_set(&nbd->refs, 1);
1725 	INIT_LIST_HEAD(&nbd->list);
1726 	disk->major = NBD_MAJOR;
1727 	disk->first_minor = index << part_shift;
1728 	disk->fops = &nbd_fops;
1729 	disk->private_data = nbd;
1730 	sprintf(disk->disk_name, "nbd%d", index);
1731 	add_disk(disk);
1732 	nbd_total_devices++;
1733 	return index;
1734 
1735 out_free_tags:
1736 	blk_mq_free_tag_set(&nbd->tag_set);
1737 out_free_idr:
1738 	idr_remove(&nbd_index_idr, index);
1739 out_free_disk:
1740 	put_disk(disk);
1741 out_free_nbd:
1742 	kfree(nbd);
1743 out:
1744 	return err;
1745 }
1746 
1747 static int find_free_cb(int id, void *ptr, void *data)
1748 {
1749 	struct nbd_device *nbd = ptr;
1750 	struct nbd_device **found = data;
1751 
1752 	if (!refcount_read(&nbd->config_refs)) {
1753 		*found = nbd;
1754 		return 1;
1755 	}
1756 	return 0;
1757 }
1758 
1759 /* Netlink interface. */
1760 static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
1761 	[NBD_ATTR_INDEX]		=	{ .type = NLA_U32 },
1762 	[NBD_ATTR_SIZE_BYTES]		=	{ .type = NLA_U64 },
1763 	[NBD_ATTR_BLOCK_SIZE_BYTES]	=	{ .type = NLA_U64 },
1764 	[NBD_ATTR_TIMEOUT]		=	{ .type = NLA_U64 },
1765 	[NBD_ATTR_SERVER_FLAGS]		=	{ .type = NLA_U64 },
1766 	[NBD_ATTR_CLIENT_FLAGS]		=	{ .type = NLA_U64 },
1767 	[NBD_ATTR_SOCKETS]		=	{ .type = NLA_NESTED},
1768 	[NBD_ATTR_DEAD_CONN_TIMEOUT]	=	{ .type = NLA_U64 },
1769 	[NBD_ATTR_DEVICE_LIST]		=	{ .type = NLA_NESTED},
1770 };
1771 
1772 static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
1773 	[NBD_SOCK_FD]			=	{ .type = NLA_U32 },
1774 };
1775 
1776 /* We don't use this right now since we don't parse the incoming list, but we
1777  * still want it here so userspace knows what to expect.
1778  */
1779 static const struct nla_policy __attribute__((unused))
1780 nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
1781 	[NBD_DEVICE_INDEX]		=	{ .type = NLA_U32 },
1782 	[NBD_DEVICE_CONNECTED]		=	{ .type = NLA_U8 },
1783 };
1784 
1785 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1786 {
1787 	struct nbd_config *config = nbd->config;
1788 	u64 bsize = config->blksize;
1789 	u64 bytes = config->bytesize;
1790 
1791 	if (info->attrs[NBD_ATTR_SIZE_BYTES])
1792 		bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
1793 
1794 	if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
1795 		bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
1796 
1797 	if (bytes != config->bytesize || bsize != config->blksize)
1798 		return nbd_set_size(nbd, bytes, bsize);
1799 	return 0;
1800 }
1801 
1802 static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
1803 {
1804 	DECLARE_COMPLETION_ONSTACK(destroy_complete);
1805 	struct nbd_device *nbd = NULL;
1806 	struct nbd_config *config;
1807 	int index = -1;
1808 	int ret;
1809 	bool put_dev = false;
1810 
1811 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
1812 		return -EPERM;
1813 
1814 	if (info->attrs[NBD_ATTR_INDEX])
1815 		index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
1816 	if (!info->attrs[NBD_ATTR_SOCKETS]) {
1817 		printk(KERN_ERR "nbd: must specify at least one socket\n");
1818 		return -EINVAL;
1819 	}
1820 	if (!info->attrs[NBD_ATTR_SIZE_BYTES]) {
1821 		printk(KERN_ERR "nbd: must specify a size in bytes for the device\n");
1822 		return -EINVAL;
1823 	}
1824 again:
1825 	mutex_lock(&nbd_index_mutex);
1826 	if (index == -1) {
1827 		ret = idr_for_each(&nbd_index_idr, &find_free_cb, &nbd);
1828 		if (ret == 0) {
1829 			int new_index;
1830 			new_index = nbd_dev_add(-1);
1831 			if (new_index < 0) {
1832 				mutex_unlock(&nbd_index_mutex);
1833 				printk(KERN_ERR "nbd: failed to add new device\n");
1834 				return new_index;
1835 			}
1836 			nbd = idr_find(&nbd_index_idr, new_index);
1837 		}
1838 	} else {
1839 		nbd = idr_find(&nbd_index_idr, index);
1840 		if (!nbd) {
1841 			ret = nbd_dev_add(index);
1842 			if (ret < 0) {
1843 				mutex_unlock(&nbd_index_mutex);
1844 				printk(KERN_ERR "nbd: failed to add new device\n");
1845 				return ret;
1846 			}
1847 			nbd = idr_find(&nbd_index_idr, index);
1848 		}
1849 	}
1850 	if (!nbd) {
1851 		printk(KERN_ERR "nbd: couldn't find device at index %d\n",
1852 		       index);
1853 		mutex_unlock(&nbd_index_mutex);
1854 		return -EINVAL;
1855 	}
1856 
1857 	if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1858 	    test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) {
1859 		nbd->destroy_complete = &destroy_complete;
1860 		mutex_unlock(&nbd_index_mutex);
1861 
1862 		/* Wait untill the the nbd stuff is totally destroyed */
1863 		wait_for_completion(&destroy_complete);
1864 		goto again;
1865 	}
1866 
1867 	if (!refcount_inc_not_zero(&nbd->refs)) {
1868 		mutex_unlock(&nbd_index_mutex);
1869 		if (index == -1)
1870 			goto again;
1871 		printk(KERN_ERR "nbd: device at index %d is going down\n",
1872 		       index);
1873 		return -EINVAL;
1874 	}
1875 	mutex_unlock(&nbd_index_mutex);
1876 
1877 	mutex_lock(&nbd->config_lock);
1878 	if (refcount_read(&nbd->config_refs)) {
1879 		mutex_unlock(&nbd->config_lock);
1880 		nbd_put(nbd);
1881 		if (index == -1)
1882 			goto again;
1883 		printk(KERN_ERR "nbd: nbd%d already in use\n", index);
1884 		return -EBUSY;
1885 	}
1886 	if (WARN_ON(nbd->config)) {
1887 		mutex_unlock(&nbd->config_lock);
1888 		nbd_put(nbd);
1889 		return -EINVAL;
1890 	}
1891 	config = nbd->config = nbd_alloc_config();
1892 	if (!nbd->config) {
1893 		mutex_unlock(&nbd->config_lock);
1894 		nbd_put(nbd);
1895 		printk(KERN_ERR "nbd: couldn't allocate config\n");
1896 		return -ENOMEM;
1897 	}
1898 	refcount_set(&nbd->config_refs, 1);
1899 	set_bit(NBD_RT_BOUND, &config->runtime_flags);
1900 
1901 	ret = nbd_genl_size_set(info, nbd);
1902 	if (ret)
1903 		goto out;
1904 
1905 	if (info->attrs[NBD_ATTR_TIMEOUT])
1906 		nbd_set_cmd_timeout(nbd,
1907 				    nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
1908 	if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
1909 		config->dead_conn_timeout =
1910 			nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
1911 		config->dead_conn_timeout *= HZ;
1912 	}
1913 	if (info->attrs[NBD_ATTR_SERVER_FLAGS])
1914 		config->flags =
1915 			nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
1916 	if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
1917 		u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
1918 		if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
1919 			set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
1920 				&config->runtime_flags);
1921 			set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
1922 			put_dev = true;
1923 		} else {
1924 			clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
1925 		}
1926 		if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
1927 			set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
1928 				&config->runtime_flags);
1929 		}
1930 	}
1931 
1932 	if (info->attrs[NBD_ATTR_SOCKETS]) {
1933 		struct nlattr *attr;
1934 		int rem, fd;
1935 
1936 		nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
1937 				    rem) {
1938 			struct nlattr *socks[NBD_SOCK_MAX+1];
1939 
1940 			if (nla_type(attr) != NBD_SOCK_ITEM) {
1941 				printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
1942 				ret = -EINVAL;
1943 				goto out;
1944 			}
1945 			ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
1946 							  attr,
1947 							  nbd_sock_policy,
1948 							  info->extack);
1949 			if (ret != 0) {
1950 				printk(KERN_ERR "nbd: error processing sock list\n");
1951 				ret = -EINVAL;
1952 				goto out;
1953 			}
1954 			if (!socks[NBD_SOCK_FD])
1955 				continue;
1956 			fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
1957 			ret = nbd_add_socket(nbd, fd, true);
1958 			if (ret)
1959 				goto out;
1960 		}
1961 	}
1962 	ret = nbd_start_device(nbd);
1963 out:
1964 	mutex_unlock(&nbd->config_lock);
1965 	if (!ret) {
1966 		set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
1967 		refcount_inc(&nbd->config_refs);
1968 		nbd_connect_reply(info, nbd->index);
1969 	}
1970 	nbd_config_put(nbd);
1971 	if (put_dev)
1972 		nbd_put(nbd);
1973 	return ret;
1974 }
1975 
1976 static void nbd_disconnect_and_put(struct nbd_device *nbd)
1977 {
1978 	mutex_lock(&nbd->config_lock);
1979 	nbd_disconnect(nbd);
1980 	nbd_clear_sock(nbd);
1981 	mutex_unlock(&nbd->config_lock);
1982 	/*
1983 	 * Make sure recv thread has finished, so it does not drop the last
1984 	 * config ref and try to destroy the workqueue from inside the work
1985 	 * queue.
1986 	 */
1987 	flush_workqueue(nbd->recv_workq);
1988 	if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1989 			       &nbd->config->runtime_flags))
1990 		nbd_config_put(nbd);
1991 }
1992 
1993 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
1994 {
1995 	struct nbd_device *nbd;
1996 	int index;
1997 
1998 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
1999 		return -EPERM;
2000 
2001 	if (!info->attrs[NBD_ATTR_INDEX]) {
2002 		printk(KERN_ERR "nbd: must specify an index to disconnect\n");
2003 		return -EINVAL;
2004 	}
2005 	index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2006 	mutex_lock(&nbd_index_mutex);
2007 	nbd = idr_find(&nbd_index_idr, index);
2008 	if (!nbd) {
2009 		mutex_unlock(&nbd_index_mutex);
2010 		printk(KERN_ERR "nbd: couldn't find device at index %d\n",
2011 		       index);
2012 		return -EINVAL;
2013 	}
2014 	if (!refcount_inc_not_zero(&nbd->refs)) {
2015 		mutex_unlock(&nbd_index_mutex);
2016 		printk(KERN_ERR "nbd: device at index %d is going down\n",
2017 		       index);
2018 		return -EINVAL;
2019 	}
2020 	mutex_unlock(&nbd_index_mutex);
2021 	if (!refcount_inc_not_zero(&nbd->config_refs)) {
2022 		nbd_put(nbd);
2023 		return 0;
2024 	}
2025 	nbd_disconnect_and_put(nbd);
2026 	nbd_config_put(nbd);
2027 	nbd_put(nbd);
2028 	return 0;
2029 }
2030 
2031 static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2032 {
2033 	struct nbd_device *nbd = NULL;
2034 	struct nbd_config *config;
2035 	int index;
2036 	int ret = 0;
2037 	bool put_dev = false;
2038 
2039 	if (!netlink_capable(skb, CAP_SYS_ADMIN))
2040 		return -EPERM;
2041 
2042 	if (!info->attrs[NBD_ATTR_INDEX]) {
2043 		printk(KERN_ERR "nbd: must specify a device to reconfigure\n");
2044 		return -EINVAL;
2045 	}
2046 	index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2047 	mutex_lock(&nbd_index_mutex);
2048 	nbd = idr_find(&nbd_index_idr, index);
2049 	if (!nbd) {
2050 		mutex_unlock(&nbd_index_mutex);
2051 		printk(KERN_ERR "nbd: couldn't find a device at index %d\n",
2052 		       index);
2053 		return -EINVAL;
2054 	}
2055 	if (!refcount_inc_not_zero(&nbd->refs)) {
2056 		mutex_unlock(&nbd_index_mutex);
2057 		printk(KERN_ERR "nbd: device at index %d is going down\n",
2058 		       index);
2059 		return -EINVAL;
2060 	}
2061 	mutex_unlock(&nbd_index_mutex);
2062 
2063 	if (!refcount_inc_not_zero(&nbd->config_refs)) {
2064 		dev_err(nbd_to_dev(nbd),
2065 			"not configured, cannot reconfigure\n");
2066 		nbd_put(nbd);
2067 		return -EINVAL;
2068 	}
2069 
2070 	mutex_lock(&nbd->config_lock);
2071 	config = nbd->config;
2072 	if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2073 	    !nbd->task_recv) {
2074 		dev_err(nbd_to_dev(nbd),
2075 			"not configured, cannot reconfigure\n");
2076 		ret = -EINVAL;
2077 		goto out;
2078 	}
2079 
2080 	ret = nbd_genl_size_set(info, nbd);
2081 	if (ret)
2082 		goto out;
2083 
2084 	if (info->attrs[NBD_ATTR_TIMEOUT])
2085 		nbd_set_cmd_timeout(nbd,
2086 				    nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2087 	if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2088 		config->dead_conn_timeout =
2089 			nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2090 		config->dead_conn_timeout *= HZ;
2091 	}
2092 	if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2093 		u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2094 		if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2095 			if (!test_and_set_bit(NBD_RT_DESTROY_ON_DISCONNECT,
2096 					      &config->runtime_flags))
2097 				put_dev = true;
2098 			set_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
2099 		} else {
2100 			if (test_and_clear_bit(NBD_RT_DESTROY_ON_DISCONNECT,
2101 					       &config->runtime_flags))
2102 				refcount_inc(&nbd->refs);
2103 			clear_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags);
2104 		}
2105 
2106 		if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2107 			set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2108 					&config->runtime_flags);
2109 		} else {
2110 			clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2111 					&config->runtime_flags);
2112 		}
2113 	}
2114 
2115 	if (info->attrs[NBD_ATTR_SOCKETS]) {
2116 		struct nlattr *attr;
2117 		int rem, fd;
2118 
2119 		nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2120 				    rem) {
2121 			struct nlattr *socks[NBD_SOCK_MAX+1];
2122 
2123 			if (nla_type(attr) != NBD_SOCK_ITEM) {
2124 				printk(KERN_ERR "nbd: socks must be embedded in a SOCK_ITEM attr\n");
2125 				ret = -EINVAL;
2126 				goto out;
2127 			}
2128 			ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2129 							  attr,
2130 							  nbd_sock_policy,
2131 							  info->extack);
2132 			if (ret != 0) {
2133 				printk(KERN_ERR "nbd: error processing sock list\n");
2134 				ret = -EINVAL;
2135 				goto out;
2136 			}
2137 			if (!socks[NBD_SOCK_FD])
2138 				continue;
2139 			fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2140 			ret = nbd_reconnect_socket(nbd, fd);
2141 			if (ret) {
2142 				if (ret == -ENOSPC)
2143 					ret = 0;
2144 				goto out;
2145 			}
2146 			dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2147 		}
2148 	}
2149 out:
2150 	mutex_unlock(&nbd->config_lock);
2151 	nbd_config_put(nbd);
2152 	nbd_put(nbd);
2153 	if (put_dev)
2154 		nbd_put(nbd);
2155 	return ret;
2156 }
2157 
2158 static const struct genl_small_ops nbd_connect_genl_ops[] = {
2159 	{
2160 		.cmd	= NBD_CMD_CONNECT,
2161 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2162 		.doit	= nbd_genl_connect,
2163 	},
2164 	{
2165 		.cmd	= NBD_CMD_DISCONNECT,
2166 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2167 		.doit	= nbd_genl_disconnect,
2168 	},
2169 	{
2170 		.cmd	= NBD_CMD_RECONFIGURE,
2171 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2172 		.doit	= nbd_genl_reconfigure,
2173 	},
2174 	{
2175 		.cmd	= NBD_CMD_STATUS,
2176 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2177 		.doit	= nbd_genl_status,
2178 	},
2179 };
2180 
2181 static const struct genl_multicast_group nbd_mcast_grps[] = {
2182 	{ .name = NBD_GENL_MCAST_GROUP_NAME, },
2183 };
2184 
2185 static struct genl_family nbd_genl_family __ro_after_init = {
2186 	.hdrsize	= 0,
2187 	.name		= NBD_GENL_FAMILY_NAME,
2188 	.version	= NBD_GENL_VERSION,
2189 	.module		= THIS_MODULE,
2190 	.small_ops	= nbd_connect_genl_ops,
2191 	.n_small_ops	= ARRAY_SIZE(nbd_connect_genl_ops),
2192 	.maxattr	= NBD_ATTR_MAX,
2193 	.policy = nbd_attr_policy,
2194 	.mcgrps		= nbd_mcast_grps,
2195 	.n_mcgrps	= ARRAY_SIZE(nbd_mcast_grps),
2196 };
2197 
2198 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2199 {
2200 	struct nlattr *dev_opt;
2201 	u8 connected = 0;
2202 	int ret;
2203 
2204 	/* This is a little racey, but for status it's ok.  The
2205 	 * reason we don't take a ref here is because we can't
2206 	 * take a ref in the index == -1 case as we would need
2207 	 * to put under the nbd_index_mutex, which could
2208 	 * deadlock if we are configured to remove ourselves
2209 	 * once we're disconnected.
2210 	 */
2211 	if (refcount_read(&nbd->config_refs))
2212 		connected = 1;
2213 	dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2214 	if (!dev_opt)
2215 		return -EMSGSIZE;
2216 	ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2217 	if (ret)
2218 		return -EMSGSIZE;
2219 	ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2220 			 connected);
2221 	if (ret)
2222 		return -EMSGSIZE;
2223 	nla_nest_end(reply, dev_opt);
2224 	return 0;
2225 }
2226 
2227 static int status_cb(int id, void *ptr, void *data)
2228 {
2229 	struct nbd_device *nbd = ptr;
2230 	return populate_nbd_status(nbd, (struct sk_buff *)data);
2231 }
2232 
2233 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2234 {
2235 	struct nlattr *dev_list;
2236 	struct sk_buff *reply;
2237 	void *reply_head;
2238 	size_t msg_size;
2239 	int index = -1;
2240 	int ret = -ENOMEM;
2241 
2242 	if (info->attrs[NBD_ATTR_INDEX])
2243 		index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2244 
2245 	mutex_lock(&nbd_index_mutex);
2246 
2247 	msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2248 				  nla_attr_size(sizeof(u8)));
2249 	msg_size *= (index == -1) ? nbd_total_devices : 1;
2250 
2251 	reply = genlmsg_new(msg_size, GFP_KERNEL);
2252 	if (!reply)
2253 		goto out;
2254 	reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2255 				       NBD_CMD_STATUS);
2256 	if (!reply_head) {
2257 		nlmsg_free(reply);
2258 		goto out;
2259 	}
2260 
2261 	dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2262 	if (index == -1) {
2263 		ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2264 		if (ret) {
2265 			nlmsg_free(reply);
2266 			goto out;
2267 		}
2268 	} else {
2269 		struct nbd_device *nbd;
2270 		nbd = idr_find(&nbd_index_idr, index);
2271 		if (nbd) {
2272 			ret = populate_nbd_status(nbd, reply);
2273 			if (ret) {
2274 				nlmsg_free(reply);
2275 				goto out;
2276 			}
2277 		}
2278 	}
2279 	nla_nest_end(reply, dev_list);
2280 	genlmsg_end(reply, reply_head);
2281 	ret = genlmsg_reply(reply, info);
2282 out:
2283 	mutex_unlock(&nbd_index_mutex);
2284 	return ret;
2285 }
2286 
2287 static void nbd_connect_reply(struct genl_info *info, int index)
2288 {
2289 	struct sk_buff *skb;
2290 	void *msg_head;
2291 	int ret;
2292 
2293 	skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2294 	if (!skb)
2295 		return;
2296 	msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2297 				     NBD_CMD_CONNECT);
2298 	if (!msg_head) {
2299 		nlmsg_free(skb);
2300 		return;
2301 	}
2302 	ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2303 	if (ret) {
2304 		nlmsg_free(skb);
2305 		return;
2306 	}
2307 	genlmsg_end(skb, msg_head);
2308 	genlmsg_reply(skb, info);
2309 }
2310 
2311 static void nbd_mcast_index(int index)
2312 {
2313 	struct sk_buff *skb;
2314 	void *msg_head;
2315 	int ret;
2316 
2317 	skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2318 	if (!skb)
2319 		return;
2320 	msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2321 				     NBD_CMD_LINK_DEAD);
2322 	if (!msg_head) {
2323 		nlmsg_free(skb);
2324 		return;
2325 	}
2326 	ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2327 	if (ret) {
2328 		nlmsg_free(skb);
2329 		return;
2330 	}
2331 	genlmsg_end(skb, msg_head);
2332 	genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2333 }
2334 
2335 static void nbd_dead_link_work(struct work_struct *work)
2336 {
2337 	struct link_dead_args *args = container_of(work, struct link_dead_args,
2338 						   work);
2339 	nbd_mcast_index(args->index);
2340 	kfree(args);
2341 }
2342 
2343 static int __init nbd_init(void)
2344 {
2345 	int i;
2346 
2347 	BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2348 
2349 	if (max_part < 0) {
2350 		printk(KERN_ERR "nbd: max_part must be >= 0\n");
2351 		return -EINVAL;
2352 	}
2353 
2354 	part_shift = 0;
2355 	if (max_part > 0) {
2356 		part_shift = fls(max_part);
2357 
2358 		/*
2359 		 * Adjust max_part according to part_shift as it is exported
2360 		 * to user space so that user can know the max number of
2361 		 * partition kernel should be able to manage.
2362 		 *
2363 		 * Note that -1 is required because partition 0 is reserved
2364 		 * for the whole disk.
2365 		 */
2366 		max_part = (1UL << part_shift) - 1;
2367 	}
2368 
2369 	if ((1UL << part_shift) > DISK_MAX_PARTS)
2370 		return -EINVAL;
2371 
2372 	if (nbds_max > 1UL << (MINORBITS - part_shift))
2373 		return -EINVAL;
2374 
2375 	if (register_blkdev(NBD_MAJOR, "nbd"))
2376 		return -EIO;
2377 
2378 	if (genl_register_family(&nbd_genl_family)) {
2379 		unregister_blkdev(NBD_MAJOR, "nbd");
2380 		return -EINVAL;
2381 	}
2382 	nbd_dbg_init();
2383 
2384 	mutex_lock(&nbd_index_mutex);
2385 	for (i = 0; i < nbds_max; i++)
2386 		nbd_dev_add(i);
2387 	mutex_unlock(&nbd_index_mutex);
2388 	return 0;
2389 }
2390 
2391 static int nbd_exit_cb(int id, void *ptr, void *data)
2392 {
2393 	struct list_head *list = (struct list_head *)data;
2394 	struct nbd_device *nbd = ptr;
2395 
2396 	list_add_tail(&nbd->list, list);
2397 	return 0;
2398 }
2399 
2400 static void __exit nbd_cleanup(void)
2401 {
2402 	struct nbd_device *nbd;
2403 	LIST_HEAD(del_list);
2404 
2405 	nbd_dbg_close();
2406 
2407 	mutex_lock(&nbd_index_mutex);
2408 	idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2409 	mutex_unlock(&nbd_index_mutex);
2410 
2411 	while (!list_empty(&del_list)) {
2412 		nbd = list_first_entry(&del_list, struct nbd_device, list);
2413 		list_del_init(&nbd->list);
2414 		if (refcount_read(&nbd->refs) != 1)
2415 			printk(KERN_ERR "nbd: possibly leaking a device\n");
2416 		nbd_put(nbd);
2417 	}
2418 
2419 	idr_destroy(&nbd_index_idr);
2420 	genl_unregister_family(&nbd_genl_family);
2421 	unregister_blkdev(NBD_MAJOR, "nbd");
2422 }
2423 
2424 module_init(nbd_init);
2425 module_exit(nbd_cleanup);
2426 
2427 MODULE_DESCRIPTION("Network Block Device");
2428 MODULE_LICENSE("GPL");
2429 
2430 module_param(nbds_max, int, 0444);
2431 MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2432 module_param(max_part, int, 0444);
2433 MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");
2434