xref: /openbmc/linux/drivers/block/xen-blkfront.c (revision 3805e6a1)
1 /*
2  * blkfront.c
3  *
4  * XenLinux virtual block device driver.
5  *
6  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8  * Copyright (c) 2004, Christian Limpach
9  * Copyright (c) 2004, Andrew Warfield
10  * Copyright (c) 2005, Christopher Clark
11  * Copyright (c) 2005, XenSource Ltd
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37 
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/cdrom.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/mutex.h>
46 #include <linux/scatterlist.h>
47 #include <linux/bitmap.h>
48 #include <linux/list.h>
49 
50 #include <xen/xen.h>
51 #include <xen/xenbus.h>
52 #include <xen/grant_table.h>
53 #include <xen/events.h>
54 #include <xen/page.h>
55 #include <xen/platform_pci.h>
56 
57 #include <xen/interface/grant_table.h>
58 #include <xen/interface/io/blkif.h>
59 #include <xen/interface/io/protocols.h>
60 
61 #include <asm/xen/hypervisor.h>
62 
63 /*
64  * The minimal size of segment supported by the block framework is PAGE_SIZE.
65  * When Linux is using a different page size than Xen, it may not be possible
66  * to put all the data in a single segment.
67  * This can happen when the backend doesn't support indirect descriptor and
68  * therefore the maximum amount of data that a request can carry is
69  * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
70  *
71  * Note that we only support one extra request. So the Linux page size
72  * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
73  * 88KB.
74  */
75 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
76 
77 enum blkif_state {
78 	BLKIF_STATE_DISCONNECTED,
79 	BLKIF_STATE_CONNECTED,
80 	BLKIF_STATE_SUSPENDED,
81 };
82 
83 struct grant {
84 	grant_ref_t gref;
85 	struct page *page;
86 	struct list_head node;
87 };
88 
89 enum blk_req_status {
90 	REQ_WAITING,
91 	REQ_DONE,
92 	REQ_ERROR,
93 	REQ_EOPNOTSUPP,
94 };
95 
96 struct blk_shadow {
97 	struct blkif_request req;
98 	struct request *request;
99 	struct grant **grants_used;
100 	struct grant **indirect_grants;
101 	struct scatterlist *sg;
102 	unsigned int num_sg;
103 	enum blk_req_status status;
104 
105 	#define NO_ASSOCIATED_ID ~0UL
106 	/*
107 	 * Id of the sibling if we ever need 2 requests when handling a
108 	 * block I/O request
109 	 */
110 	unsigned long associated_id;
111 };
112 
113 struct split_bio {
114 	struct bio *bio;
115 	atomic_t pending;
116 };
117 
118 static DEFINE_MUTEX(blkfront_mutex);
119 static const struct block_device_operations xlvbd_block_fops;
120 
121 /*
122  * Maximum number of segments in indirect requests, the actual value used by
123  * the frontend driver is the minimum of this value and the value provided
124  * by the backend driver.
125  */
126 
127 static unsigned int xen_blkif_max_segments = 32;
128 module_param_named(max_indirect_segments, xen_blkif_max_segments, uint,
129 		   S_IRUGO);
130 MODULE_PARM_DESC(max_indirect_segments,
131 		 "Maximum amount of segments in indirect requests (default is 32)");
132 
133 static unsigned int xen_blkif_max_queues = 4;
134 module_param_named(max_queues, xen_blkif_max_queues, uint, S_IRUGO);
135 MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
136 
137 /*
138  * Maximum order of pages to be used for the shared ring between front and
139  * backend, 4KB page granularity is used.
140  */
141 static unsigned int xen_blkif_max_ring_order;
142 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
143 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
144 
145 #define BLK_RING_SIZE(info)	\
146 	__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
147 
148 #define BLK_MAX_RING_SIZE	\
149 	__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
150 
151 /*
152  * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
153  * characters are enough. Define to 20 to keep consistent with backend.
154  */
155 #define RINGREF_NAME_LEN (20)
156 /*
157  * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
158  */
159 #define QUEUE_NAME_LEN (17)
160 
161 /*
162  *  Per-ring info.
163  *  Every blkfront device can associate with one or more blkfront_ring_info,
164  *  depending on how many hardware queues/rings to be used.
165  */
166 struct blkfront_ring_info {
167 	/* Lock to protect data in every ring buffer. */
168 	spinlock_t ring_lock;
169 	struct blkif_front_ring ring;
170 	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
171 	unsigned int evtchn, irq;
172 	struct work_struct work;
173 	struct gnttab_free_callback callback;
174 	struct blk_shadow shadow[BLK_MAX_RING_SIZE];
175 	struct list_head indirect_pages;
176 	struct list_head grants;
177 	unsigned int persistent_gnts_c;
178 	unsigned long shadow_free;
179 	struct blkfront_info *dev_info;
180 };
181 
182 /*
183  * We have one of these per vbd, whether ide, scsi or 'other'.  They
184  * hang in private_data off the gendisk structure. We may end up
185  * putting all kinds of interesting stuff here :-)
186  */
187 struct blkfront_info
188 {
189 	struct mutex mutex;
190 	struct xenbus_device *xbdev;
191 	struct gendisk *gd;
192 	int vdevice;
193 	blkif_vdev_t handle;
194 	enum blkif_state connected;
195 	/* Number of pages per ring buffer. */
196 	unsigned int nr_ring_pages;
197 	struct request_queue *rq;
198 	unsigned int feature_flush;
199 	unsigned int feature_discard:1;
200 	unsigned int feature_secdiscard:1;
201 	unsigned int discard_granularity;
202 	unsigned int discard_alignment;
203 	unsigned int feature_persistent:1;
204 	/* Number of 4KB segments handled */
205 	unsigned int max_indirect_segments;
206 	int is_ready;
207 	struct blk_mq_tag_set tag_set;
208 	struct blkfront_ring_info *rinfo;
209 	unsigned int nr_rings;
210 };
211 
212 static unsigned int nr_minors;
213 static unsigned long *minors;
214 static DEFINE_SPINLOCK(minor_lock);
215 
216 #define GRANT_INVALID_REF	0
217 
218 #define PARTS_PER_DISK		16
219 #define PARTS_PER_EXT_DISK      256
220 
221 #define BLKIF_MAJOR(dev) ((dev)>>8)
222 #define BLKIF_MINOR(dev) ((dev) & 0xff)
223 
224 #define EXT_SHIFT 28
225 #define EXTENDED (1<<EXT_SHIFT)
226 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
227 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
228 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
229 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
230 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
231 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
232 
233 #define DEV_NAME	"xvd"	/* name in /dev */
234 
235 /*
236  * Grants are always the same size as a Xen page (i.e 4KB).
237  * A physical segment is always the same size as a Linux page.
238  * Number of grants per physical segment
239  */
240 #define GRANTS_PER_PSEG	(PAGE_SIZE / XEN_PAGE_SIZE)
241 
242 #define GRANTS_PER_INDIRECT_FRAME \
243 	(XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
244 
245 #define PSEGS_PER_INDIRECT_FRAME	\
246 	(GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
247 
248 #define INDIRECT_GREFS(_grants)		\
249 	DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
250 
251 #define GREFS(_psegs)	((_psegs) * GRANTS_PER_PSEG)
252 
253 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
254 static void blkfront_gather_backend_features(struct blkfront_info *info);
255 
256 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
257 {
258 	unsigned long free = rinfo->shadow_free;
259 
260 	BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
261 	rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
262 	rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
263 	return free;
264 }
265 
266 static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
267 			      unsigned long id)
268 {
269 	if (rinfo->shadow[id].req.u.rw.id != id)
270 		return -EINVAL;
271 	if (rinfo->shadow[id].request == NULL)
272 		return -EINVAL;
273 	rinfo->shadow[id].req.u.rw.id  = rinfo->shadow_free;
274 	rinfo->shadow[id].request = NULL;
275 	rinfo->shadow_free = id;
276 	return 0;
277 }
278 
279 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
280 {
281 	struct blkfront_info *info = rinfo->dev_info;
282 	struct page *granted_page;
283 	struct grant *gnt_list_entry, *n;
284 	int i = 0;
285 
286 	while (i < num) {
287 		gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
288 		if (!gnt_list_entry)
289 			goto out_of_memory;
290 
291 		if (info->feature_persistent) {
292 			granted_page = alloc_page(GFP_NOIO);
293 			if (!granted_page) {
294 				kfree(gnt_list_entry);
295 				goto out_of_memory;
296 			}
297 			gnt_list_entry->page = granted_page;
298 		}
299 
300 		gnt_list_entry->gref = GRANT_INVALID_REF;
301 		list_add(&gnt_list_entry->node, &rinfo->grants);
302 		i++;
303 	}
304 
305 	return 0;
306 
307 out_of_memory:
308 	list_for_each_entry_safe(gnt_list_entry, n,
309 	                         &rinfo->grants, node) {
310 		list_del(&gnt_list_entry->node);
311 		if (info->feature_persistent)
312 			__free_page(gnt_list_entry->page);
313 		kfree(gnt_list_entry);
314 		i--;
315 	}
316 	BUG_ON(i != 0);
317 	return -ENOMEM;
318 }
319 
320 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
321 {
322 	struct grant *gnt_list_entry;
323 
324 	BUG_ON(list_empty(&rinfo->grants));
325 	gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
326 					  node);
327 	list_del(&gnt_list_entry->node);
328 
329 	if (gnt_list_entry->gref != GRANT_INVALID_REF)
330 		rinfo->persistent_gnts_c--;
331 
332 	return gnt_list_entry;
333 }
334 
335 static inline void grant_foreign_access(const struct grant *gnt_list_entry,
336 					const struct blkfront_info *info)
337 {
338 	gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
339 						 info->xbdev->otherend_id,
340 						 gnt_list_entry->page,
341 						 0);
342 }
343 
344 static struct grant *get_grant(grant_ref_t *gref_head,
345 			       unsigned long gfn,
346 			       struct blkfront_ring_info *rinfo)
347 {
348 	struct grant *gnt_list_entry = get_free_grant(rinfo);
349 	struct blkfront_info *info = rinfo->dev_info;
350 
351 	if (gnt_list_entry->gref != GRANT_INVALID_REF)
352 		return gnt_list_entry;
353 
354 	/* Assign a gref to this page */
355 	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
356 	BUG_ON(gnt_list_entry->gref == -ENOSPC);
357 	if (info->feature_persistent)
358 		grant_foreign_access(gnt_list_entry, info);
359 	else {
360 		/* Grant access to the GFN passed by the caller */
361 		gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
362 						info->xbdev->otherend_id,
363 						gfn, 0);
364 	}
365 
366 	return gnt_list_entry;
367 }
368 
369 static struct grant *get_indirect_grant(grant_ref_t *gref_head,
370 					struct blkfront_ring_info *rinfo)
371 {
372 	struct grant *gnt_list_entry = get_free_grant(rinfo);
373 	struct blkfront_info *info = rinfo->dev_info;
374 
375 	if (gnt_list_entry->gref != GRANT_INVALID_REF)
376 		return gnt_list_entry;
377 
378 	/* Assign a gref to this page */
379 	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
380 	BUG_ON(gnt_list_entry->gref == -ENOSPC);
381 	if (!info->feature_persistent) {
382 		struct page *indirect_page;
383 
384 		/* Fetch a pre-allocated page to use for indirect grefs */
385 		BUG_ON(list_empty(&rinfo->indirect_pages));
386 		indirect_page = list_first_entry(&rinfo->indirect_pages,
387 						 struct page, lru);
388 		list_del(&indirect_page->lru);
389 		gnt_list_entry->page = indirect_page;
390 	}
391 	grant_foreign_access(gnt_list_entry, info);
392 
393 	return gnt_list_entry;
394 }
395 
396 static const char *op_name(int op)
397 {
398 	static const char *const names[] = {
399 		[BLKIF_OP_READ] = "read",
400 		[BLKIF_OP_WRITE] = "write",
401 		[BLKIF_OP_WRITE_BARRIER] = "barrier",
402 		[BLKIF_OP_FLUSH_DISKCACHE] = "flush",
403 		[BLKIF_OP_DISCARD] = "discard" };
404 
405 	if (op < 0 || op >= ARRAY_SIZE(names))
406 		return "unknown";
407 
408 	if (!names[op])
409 		return "reserved";
410 
411 	return names[op];
412 }
413 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
414 {
415 	unsigned int end = minor + nr;
416 	int rc;
417 
418 	if (end > nr_minors) {
419 		unsigned long *bitmap, *old;
420 
421 		bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
422 				 GFP_KERNEL);
423 		if (bitmap == NULL)
424 			return -ENOMEM;
425 
426 		spin_lock(&minor_lock);
427 		if (end > nr_minors) {
428 			old = minors;
429 			memcpy(bitmap, minors,
430 			       BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
431 			minors = bitmap;
432 			nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
433 		} else
434 			old = bitmap;
435 		spin_unlock(&minor_lock);
436 		kfree(old);
437 	}
438 
439 	spin_lock(&minor_lock);
440 	if (find_next_bit(minors, end, minor) >= end) {
441 		bitmap_set(minors, minor, nr);
442 		rc = 0;
443 	} else
444 		rc = -EBUSY;
445 	spin_unlock(&minor_lock);
446 
447 	return rc;
448 }
449 
450 static void xlbd_release_minors(unsigned int minor, unsigned int nr)
451 {
452 	unsigned int end = minor + nr;
453 
454 	BUG_ON(end > nr_minors);
455 	spin_lock(&minor_lock);
456 	bitmap_clear(minors,  minor, nr);
457 	spin_unlock(&minor_lock);
458 }
459 
460 static void blkif_restart_queue_callback(void *arg)
461 {
462 	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
463 	schedule_work(&rinfo->work);
464 }
465 
466 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
467 {
468 	/* We don't have real geometry info, but let's at least return
469 	   values consistent with the size of the device */
470 	sector_t nsect = get_capacity(bd->bd_disk);
471 	sector_t cylinders = nsect;
472 
473 	hg->heads = 0xff;
474 	hg->sectors = 0x3f;
475 	sector_div(cylinders, hg->heads * hg->sectors);
476 	hg->cylinders = cylinders;
477 	if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
478 		hg->cylinders = 0xffff;
479 	return 0;
480 }
481 
482 static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
483 		       unsigned command, unsigned long argument)
484 {
485 	struct blkfront_info *info = bdev->bd_disk->private_data;
486 	int i;
487 
488 	dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
489 		command, (long)argument);
490 
491 	switch (command) {
492 	case CDROMMULTISESSION:
493 		dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
494 		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
495 			if (put_user(0, (char __user *)(argument + i)))
496 				return -EFAULT;
497 		return 0;
498 
499 	case CDROM_GET_CAPABILITY: {
500 		struct gendisk *gd = info->gd;
501 		if (gd->flags & GENHD_FL_CD)
502 			return 0;
503 		return -EINVAL;
504 	}
505 
506 	default:
507 		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
508 		  command);*/
509 		return -EINVAL; /* same return as native Linux */
510 	}
511 
512 	return 0;
513 }
514 
515 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
516 					    struct request *req,
517 					    struct blkif_request **ring_req)
518 {
519 	unsigned long id;
520 
521 	*ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
522 	rinfo->ring.req_prod_pvt++;
523 
524 	id = get_id_from_freelist(rinfo);
525 	rinfo->shadow[id].request = req;
526 	rinfo->shadow[id].status = REQ_WAITING;
527 	rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
528 
529 	(*ring_req)->u.rw.id = id;
530 
531 	return id;
532 }
533 
534 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
535 {
536 	struct blkfront_info *info = rinfo->dev_info;
537 	struct blkif_request *ring_req;
538 	unsigned long id;
539 
540 	/* Fill out a communications ring structure. */
541 	id = blkif_ring_get_request(rinfo, req, &ring_req);
542 
543 	ring_req->operation = BLKIF_OP_DISCARD;
544 	ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
545 	ring_req->u.discard.id = id;
546 	ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
547 	if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
548 		ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
549 	else
550 		ring_req->u.discard.flag = 0;
551 
552 	/* Keep a private copy so we can reissue requests when recovering. */
553 	rinfo->shadow[id].req = *ring_req;
554 
555 	return 0;
556 }
557 
558 struct setup_rw_req {
559 	unsigned int grant_idx;
560 	struct blkif_request_segment *segments;
561 	struct blkfront_ring_info *rinfo;
562 	struct blkif_request *ring_req;
563 	grant_ref_t gref_head;
564 	unsigned int id;
565 	/* Only used when persistent grant is used and it's a read request */
566 	bool need_copy;
567 	unsigned int bvec_off;
568 	char *bvec_data;
569 
570 	bool require_extra_req;
571 	struct blkif_request *extra_ring_req;
572 };
573 
574 static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
575 				     unsigned int len, void *data)
576 {
577 	struct setup_rw_req *setup = data;
578 	int n, ref;
579 	struct grant *gnt_list_entry;
580 	unsigned int fsect, lsect;
581 	/* Convenient aliases */
582 	unsigned int grant_idx = setup->grant_idx;
583 	struct blkif_request *ring_req = setup->ring_req;
584 	struct blkfront_ring_info *rinfo = setup->rinfo;
585 	/*
586 	 * We always use the shadow of the first request to store the list
587 	 * of grant associated to the block I/O request. This made the
588 	 * completion more easy to handle even if the block I/O request is
589 	 * split.
590 	 */
591 	struct blk_shadow *shadow = &rinfo->shadow[setup->id];
592 
593 	if (unlikely(setup->require_extra_req &&
594 		     grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
595 		/*
596 		 * We are using the second request, setup grant_idx
597 		 * to be the index of the segment array.
598 		 */
599 		grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
600 		ring_req = setup->extra_ring_req;
601 	}
602 
603 	if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
604 	    (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
605 		if (setup->segments)
606 			kunmap_atomic(setup->segments);
607 
608 		n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
609 		gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
610 		shadow->indirect_grants[n] = gnt_list_entry;
611 		setup->segments = kmap_atomic(gnt_list_entry->page);
612 		ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
613 	}
614 
615 	gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
616 	ref = gnt_list_entry->gref;
617 	/*
618 	 * All the grants are stored in the shadow of the first
619 	 * request. Therefore we have to use the global index.
620 	 */
621 	shadow->grants_used[setup->grant_idx] = gnt_list_entry;
622 
623 	if (setup->need_copy) {
624 		void *shared_data;
625 
626 		shared_data = kmap_atomic(gnt_list_entry->page);
627 		/*
628 		 * this does not wipe data stored outside the
629 		 * range sg->offset..sg->offset+sg->length.
630 		 * Therefore, blkback *could* see data from
631 		 * previous requests. This is OK as long as
632 		 * persistent grants are shared with just one
633 		 * domain. It may need refactoring if this
634 		 * changes
635 		 */
636 		memcpy(shared_data + offset,
637 		       setup->bvec_data + setup->bvec_off,
638 		       len);
639 
640 		kunmap_atomic(shared_data);
641 		setup->bvec_off += len;
642 	}
643 
644 	fsect = offset >> 9;
645 	lsect = fsect + (len >> 9) - 1;
646 	if (ring_req->operation != BLKIF_OP_INDIRECT) {
647 		ring_req->u.rw.seg[grant_idx] =
648 			(struct blkif_request_segment) {
649 				.gref       = ref,
650 				.first_sect = fsect,
651 				.last_sect  = lsect };
652 	} else {
653 		setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
654 			(struct blkif_request_segment) {
655 				.gref       = ref,
656 				.first_sect = fsect,
657 				.last_sect  = lsect };
658 	}
659 
660 	(setup->grant_idx)++;
661 }
662 
663 static void blkif_setup_extra_req(struct blkif_request *first,
664 				  struct blkif_request *second)
665 {
666 	uint16_t nr_segments = first->u.rw.nr_segments;
667 
668 	/*
669 	 * The second request is only present when the first request uses
670 	 * all its segments. It's always the continuity of the first one.
671 	 */
672 	first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
673 
674 	second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
675 	second->u.rw.sector_number = first->u.rw.sector_number +
676 		(BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
677 
678 	second->u.rw.handle = first->u.rw.handle;
679 	second->operation = first->operation;
680 }
681 
682 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
683 {
684 	struct blkfront_info *info = rinfo->dev_info;
685 	struct blkif_request *ring_req, *extra_ring_req = NULL;
686 	unsigned long id, extra_id = NO_ASSOCIATED_ID;
687 	bool require_extra_req = false;
688 	int i;
689 	struct setup_rw_req setup = {
690 		.grant_idx = 0,
691 		.segments = NULL,
692 		.rinfo = rinfo,
693 		.need_copy = rq_data_dir(req) && info->feature_persistent,
694 	};
695 
696 	/*
697 	 * Used to store if we are able to queue the request by just using
698 	 * existing persistent grants, or if we have to get new grants,
699 	 * as there are not sufficiently many free.
700 	 */
701 	struct scatterlist *sg;
702 	int num_sg, max_grefs, num_grant;
703 
704 	max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
705 	if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
706 		/*
707 		 * If we are using indirect segments we need to account
708 		 * for the indirect grefs used in the request.
709 		 */
710 		max_grefs += INDIRECT_GREFS(max_grefs);
711 
712 	/*
713 	 * We have to reserve 'max_grefs' grants because persistent
714 	 * grants are shared by all rings.
715 	 */
716 	if (max_grefs > 0)
717 		if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
718 			gnttab_request_free_callback(
719 				&rinfo->callback,
720 				blkif_restart_queue_callback,
721 				rinfo,
722 				max_grefs);
723 			return 1;
724 		}
725 
726 	/* Fill out a communications ring structure. */
727 	id = blkif_ring_get_request(rinfo, req, &ring_req);
728 
729 	num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
730 	num_grant = 0;
731 	/* Calculate the number of grant used */
732 	for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
733 	       num_grant += gnttab_count_grant(sg->offset, sg->length);
734 
735 	require_extra_req = info->max_indirect_segments == 0 &&
736 		num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
737 	BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
738 
739 	rinfo->shadow[id].num_sg = num_sg;
740 	if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
741 	    likely(!require_extra_req)) {
742 		/*
743 		 * The indirect operation can only be a BLKIF_OP_READ or
744 		 * BLKIF_OP_WRITE
745 		 */
746 		BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
747 		ring_req->operation = BLKIF_OP_INDIRECT;
748 		ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
749 			BLKIF_OP_WRITE : BLKIF_OP_READ;
750 		ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
751 		ring_req->u.indirect.handle = info->handle;
752 		ring_req->u.indirect.nr_segments = num_grant;
753 	} else {
754 		ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
755 		ring_req->u.rw.handle = info->handle;
756 		ring_req->operation = rq_data_dir(req) ?
757 			BLKIF_OP_WRITE : BLKIF_OP_READ;
758 		if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
759 			/*
760 			 * Ideally we can do an unordered flush-to-disk.
761 			 * In case the backend onlysupports barriers, use that.
762 			 * A barrier request a superset of FUA, so we can
763 			 * implement it the same way.  (It's also a FLUSH+FUA,
764 			 * since it is guaranteed ordered WRT previous writes.)
765 			 */
766 			switch (info->feature_flush &
767 				((REQ_FLUSH|REQ_FUA))) {
768 			case REQ_FLUSH|REQ_FUA:
769 				ring_req->operation =
770 					BLKIF_OP_WRITE_BARRIER;
771 				break;
772 			case REQ_FLUSH:
773 				ring_req->operation =
774 					BLKIF_OP_FLUSH_DISKCACHE;
775 				break;
776 			default:
777 				ring_req->operation = 0;
778 			}
779 		}
780 		ring_req->u.rw.nr_segments = num_grant;
781 		if (unlikely(require_extra_req)) {
782 			extra_id = blkif_ring_get_request(rinfo, req,
783 							  &extra_ring_req);
784 			/*
785 			 * Only the first request contains the scatter-gather
786 			 * list.
787 			 */
788 			rinfo->shadow[extra_id].num_sg = 0;
789 
790 			blkif_setup_extra_req(ring_req, extra_ring_req);
791 
792 			/* Link the 2 requests together */
793 			rinfo->shadow[extra_id].associated_id = id;
794 			rinfo->shadow[id].associated_id = extra_id;
795 		}
796 	}
797 
798 	setup.ring_req = ring_req;
799 	setup.id = id;
800 
801 	setup.require_extra_req = require_extra_req;
802 	if (unlikely(require_extra_req))
803 		setup.extra_ring_req = extra_ring_req;
804 
805 	for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
806 		BUG_ON(sg->offset + sg->length > PAGE_SIZE);
807 
808 		if (setup.need_copy) {
809 			setup.bvec_off = sg->offset;
810 			setup.bvec_data = kmap_atomic(sg_page(sg));
811 		}
812 
813 		gnttab_foreach_grant_in_range(sg_page(sg),
814 					      sg->offset,
815 					      sg->length,
816 					      blkif_setup_rw_req_grant,
817 					      &setup);
818 
819 		if (setup.need_copy)
820 			kunmap_atomic(setup.bvec_data);
821 	}
822 	if (setup.segments)
823 		kunmap_atomic(setup.segments);
824 
825 	/* Keep a private copy so we can reissue requests when recovering. */
826 	rinfo->shadow[id].req = *ring_req;
827 	if (unlikely(require_extra_req))
828 		rinfo->shadow[extra_id].req = *extra_ring_req;
829 
830 	if (max_grefs > 0)
831 		gnttab_free_grant_references(setup.gref_head);
832 
833 	return 0;
834 }
835 
836 /*
837  * Generate a Xen blkfront IO request from a blk layer request.  Reads
838  * and writes are handled as expected.
839  *
840  * @req: a request struct
841  */
842 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
843 {
844 	if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
845 		return 1;
846 
847 	if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
848 		return blkif_queue_discard_req(req, rinfo);
849 	else
850 		return blkif_queue_rw_req(req, rinfo);
851 }
852 
853 static inline void flush_requests(struct blkfront_ring_info *rinfo)
854 {
855 	int notify;
856 
857 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
858 
859 	if (notify)
860 		notify_remote_via_irq(rinfo->irq);
861 }
862 
863 static inline bool blkif_request_flush_invalid(struct request *req,
864 					       struct blkfront_info *info)
865 {
866 	return ((req->cmd_type != REQ_TYPE_FS) ||
867 		((req->cmd_flags & REQ_FLUSH) &&
868 		 !(info->feature_flush & REQ_FLUSH)) ||
869 		((req->cmd_flags & REQ_FUA) &&
870 		 !(info->feature_flush & REQ_FUA)));
871 }
872 
873 static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
874 			  const struct blk_mq_queue_data *qd)
875 {
876 	unsigned long flags;
877 	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
878 
879 	blk_mq_start_request(qd->rq);
880 	spin_lock_irqsave(&rinfo->ring_lock, flags);
881 	if (RING_FULL(&rinfo->ring))
882 		goto out_busy;
883 
884 	if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
885 		goto out_err;
886 
887 	if (blkif_queue_request(qd->rq, rinfo))
888 		goto out_busy;
889 
890 	flush_requests(rinfo);
891 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
892 	return BLK_MQ_RQ_QUEUE_OK;
893 
894 out_err:
895 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
896 	return BLK_MQ_RQ_QUEUE_ERROR;
897 
898 out_busy:
899 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
900 	blk_mq_stop_hw_queue(hctx);
901 	return BLK_MQ_RQ_QUEUE_BUSY;
902 }
903 
904 static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
905 			    unsigned int index)
906 {
907 	struct blkfront_info *info = (struct blkfront_info *)data;
908 
909 	BUG_ON(info->nr_rings <= index);
910 	hctx->driver_data = &info->rinfo[index];
911 	return 0;
912 }
913 
914 static struct blk_mq_ops blkfront_mq_ops = {
915 	.queue_rq = blkif_queue_rq,
916 	.map_queue = blk_mq_map_queue,
917 	.init_hctx = blk_mq_init_hctx,
918 };
919 
920 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
921 				unsigned int physical_sector_size,
922 				unsigned int segments)
923 {
924 	struct request_queue *rq;
925 	struct blkfront_info *info = gd->private_data;
926 
927 	memset(&info->tag_set, 0, sizeof(info->tag_set));
928 	info->tag_set.ops = &blkfront_mq_ops;
929 	info->tag_set.nr_hw_queues = info->nr_rings;
930 	if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
931 		/*
932 		 * When indirect descriptior is not supported, the I/O request
933 		 * will be split between multiple request in the ring.
934 		 * To avoid problems when sending the request, divide by
935 		 * 2 the depth of the queue.
936 		 */
937 		info->tag_set.queue_depth =  BLK_RING_SIZE(info) / 2;
938 	} else
939 		info->tag_set.queue_depth = BLK_RING_SIZE(info);
940 	info->tag_set.numa_node = NUMA_NO_NODE;
941 	info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
942 	info->tag_set.cmd_size = 0;
943 	info->tag_set.driver_data = info;
944 
945 	if (blk_mq_alloc_tag_set(&info->tag_set))
946 		return -EINVAL;
947 	rq = blk_mq_init_queue(&info->tag_set);
948 	if (IS_ERR(rq)) {
949 		blk_mq_free_tag_set(&info->tag_set);
950 		return PTR_ERR(rq);
951 	}
952 
953 	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
954 
955 	if (info->feature_discard) {
956 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
957 		blk_queue_max_discard_sectors(rq, get_capacity(gd));
958 		rq->limits.discard_granularity = info->discard_granularity;
959 		rq->limits.discard_alignment = info->discard_alignment;
960 		if (info->feature_secdiscard)
961 			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
962 	}
963 
964 	/* Hard sector size and max sectors impersonate the equiv. hardware. */
965 	blk_queue_logical_block_size(rq, sector_size);
966 	blk_queue_physical_block_size(rq, physical_sector_size);
967 	blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
968 
969 	/* Each segment in a request is up to an aligned page in size. */
970 	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
971 	blk_queue_max_segment_size(rq, PAGE_SIZE);
972 
973 	/* Ensure a merged request will fit in a single I/O ring slot. */
974 	blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
975 
976 	/* Make sure buffer addresses are sector-aligned. */
977 	blk_queue_dma_alignment(rq, 511);
978 
979 	/* Make sure we don't use bounce buffers. */
980 	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
981 
982 	gd->queue = rq;
983 
984 	return 0;
985 }
986 
987 static const char *flush_info(unsigned int feature_flush)
988 {
989 	switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
990 	case REQ_FLUSH|REQ_FUA:
991 		return "barrier: enabled;";
992 	case REQ_FLUSH:
993 		return "flush diskcache: enabled;";
994 	default:
995 		return "barrier or flush: disabled;";
996 	}
997 }
998 
999 static void xlvbd_flush(struct blkfront_info *info)
1000 {
1001 	blk_queue_write_cache(info->rq, info->feature_flush & REQ_FLUSH,
1002 				info->feature_flush & REQ_FUA);
1003 	pr_info("blkfront: %s: %s %s %s %s %s\n",
1004 		info->gd->disk_name, flush_info(info->feature_flush),
1005 		"persistent grants:", info->feature_persistent ?
1006 		"enabled;" : "disabled;", "indirect descriptors:",
1007 		info->max_indirect_segments ? "enabled;" : "disabled;");
1008 }
1009 
1010 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
1011 {
1012 	int major;
1013 	major = BLKIF_MAJOR(vdevice);
1014 	*minor = BLKIF_MINOR(vdevice);
1015 	switch (major) {
1016 		case XEN_IDE0_MAJOR:
1017 			*offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
1018 			*minor = ((*minor / 64) * PARTS_PER_DISK) +
1019 				EMULATED_HD_DISK_MINOR_OFFSET;
1020 			break;
1021 		case XEN_IDE1_MAJOR:
1022 			*offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
1023 			*minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
1024 				EMULATED_HD_DISK_MINOR_OFFSET;
1025 			break;
1026 		case XEN_SCSI_DISK0_MAJOR:
1027 			*offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
1028 			*minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
1029 			break;
1030 		case XEN_SCSI_DISK1_MAJOR:
1031 		case XEN_SCSI_DISK2_MAJOR:
1032 		case XEN_SCSI_DISK3_MAJOR:
1033 		case XEN_SCSI_DISK4_MAJOR:
1034 		case XEN_SCSI_DISK5_MAJOR:
1035 		case XEN_SCSI_DISK6_MAJOR:
1036 		case XEN_SCSI_DISK7_MAJOR:
1037 			*offset = (*minor / PARTS_PER_DISK) +
1038 				((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
1039 				EMULATED_SD_DISK_NAME_OFFSET;
1040 			*minor = *minor +
1041 				((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
1042 				EMULATED_SD_DISK_MINOR_OFFSET;
1043 			break;
1044 		case XEN_SCSI_DISK8_MAJOR:
1045 		case XEN_SCSI_DISK9_MAJOR:
1046 		case XEN_SCSI_DISK10_MAJOR:
1047 		case XEN_SCSI_DISK11_MAJOR:
1048 		case XEN_SCSI_DISK12_MAJOR:
1049 		case XEN_SCSI_DISK13_MAJOR:
1050 		case XEN_SCSI_DISK14_MAJOR:
1051 		case XEN_SCSI_DISK15_MAJOR:
1052 			*offset = (*minor / PARTS_PER_DISK) +
1053 				((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
1054 				EMULATED_SD_DISK_NAME_OFFSET;
1055 			*minor = *minor +
1056 				((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
1057 				EMULATED_SD_DISK_MINOR_OFFSET;
1058 			break;
1059 		case XENVBD_MAJOR:
1060 			*offset = *minor / PARTS_PER_DISK;
1061 			break;
1062 		default:
1063 			printk(KERN_WARNING "blkfront: your disk configuration is "
1064 					"incorrect, please use an xvd device instead\n");
1065 			return -ENODEV;
1066 	}
1067 	return 0;
1068 }
1069 
1070 static char *encode_disk_name(char *ptr, unsigned int n)
1071 {
1072 	if (n >= 26)
1073 		ptr = encode_disk_name(ptr, n / 26 - 1);
1074 	*ptr = 'a' + n % 26;
1075 	return ptr + 1;
1076 }
1077 
1078 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1079 			       struct blkfront_info *info,
1080 			       u16 vdisk_info, u16 sector_size,
1081 			       unsigned int physical_sector_size)
1082 {
1083 	struct gendisk *gd;
1084 	int nr_minors = 1;
1085 	int err;
1086 	unsigned int offset;
1087 	int minor;
1088 	int nr_parts;
1089 	char *ptr;
1090 
1091 	BUG_ON(info->gd != NULL);
1092 	BUG_ON(info->rq != NULL);
1093 
1094 	if ((info->vdevice>>EXT_SHIFT) > 1) {
1095 		/* this is above the extended range; something is wrong */
1096 		printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
1097 		return -ENODEV;
1098 	}
1099 
1100 	if (!VDEV_IS_EXTENDED(info->vdevice)) {
1101 		err = xen_translate_vdev(info->vdevice, &minor, &offset);
1102 		if (err)
1103 			return err;
1104  		nr_parts = PARTS_PER_DISK;
1105 	} else {
1106 		minor = BLKIF_MINOR_EXT(info->vdevice);
1107 		nr_parts = PARTS_PER_EXT_DISK;
1108 		offset = minor / nr_parts;
1109 		if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
1110 			printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
1111 					"emulated IDE disks,\n\t choose an xvd device name"
1112 					"from xvde on\n", info->vdevice);
1113 	}
1114 	if (minor >> MINORBITS) {
1115 		pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1116 			info->vdevice, minor);
1117 		return -ENODEV;
1118 	}
1119 
1120 	if ((minor % nr_parts) == 0)
1121 		nr_minors = nr_parts;
1122 
1123 	err = xlbd_reserve_minors(minor, nr_minors);
1124 	if (err)
1125 		goto out;
1126 	err = -ENODEV;
1127 
1128 	gd = alloc_disk(nr_minors);
1129 	if (gd == NULL)
1130 		goto release;
1131 
1132 	strcpy(gd->disk_name, DEV_NAME);
1133 	ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
1134 	BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
1135 	if (nr_minors > 1)
1136 		*ptr = 0;
1137 	else
1138 		snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
1139 			 "%d", minor & (nr_parts - 1));
1140 
1141 	gd->major = XENVBD_MAJOR;
1142 	gd->first_minor = minor;
1143 	gd->fops = &xlvbd_block_fops;
1144 	gd->private_data = info;
1145 	gd->driverfs_dev = &(info->xbdev->dev);
1146 	set_capacity(gd, capacity);
1147 
1148 	if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
1149 				 info->max_indirect_segments ? :
1150 				 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
1151 		del_gendisk(gd);
1152 		goto release;
1153 	}
1154 
1155 	info->rq = gd->queue;
1156 	info->gd = gd;
1157 
1158 	xlvbd_flush(info);
1159 
1160 	if (vdisk_info & VDISK_READONLY)
1161 		set_disk_ro(gd, 1);
1162 
1163 	if (vdisk_info & VDISK_REMOVABLE)
1164 		gd->flags |= GENHD_FL_REMOVABLE;
1165 
1166 	if (vdisk_info & VDISK_CDROM)
1167 		gd->flags |= GENHD_FL_CD;
1168 
1169 	return 0;
1170 
1171  release:
1172 	xlbd_release_minors(minor, nr_minors);
1173  out:
1174 	return err;
1175 }
1176 
1177 static void xlvbd_release_gendisk(struct blkfront_info *info)
1178 {
1179 	unsigned int minor, nr_minors, i;
1180 
1181 	if (info->rq == NULL)
1182 		return;
1183 
1184 	/* No more blkif_request(). */
1185 	blk_mq_stop_hw_queues(info->rq);
1186 
1187 	for (i = 0; i < info->nr_rings; i++) {
1188 		struct blkfront_ring_info *rinfo = &info->rinfo[i];
1189 
1190 		/* No more gnttab callback work. */
1191 		gnttab_cancel_free_callback(&rinfo->callback);
1192 
1193 		/* Flush gnttab callback work. Must be done with no locks held. */
1194 		flush_work(&rinfo->work);
1195 	}
1196 
1197 	del_gendisk(info->gd);
1198 
1199 	minor = info->gd->first_minor;
1200 	nr_minors = info->gd->minors;
1201 	xlbd_release_minors(minor, nr_minors);
1202 
1203 	blk_cleanup_queue(info->rq);
1204 	blk_mq_free_tag_set(&info->tag_set);
1205 	info->rq = NULL;
1206 
1207 	put_disk(info->gd);
1208 	info->gd = NULL;
1209 }
1210 
1211 /* Already hold rinfo->ring_lock. */
1212 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1213 {
1214 	if (!RING_FULL(&rinfo->ring))
1215 		blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1216 }
1217 
1218 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1219 {
1220 	unsigned long flags;
1221 
1222 	spin_lock_irqsave(&rinfo->ring_lock, flags);
1223 	kick_pending_request_queues_locked(rinfo);
1224 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1225 }
1226 
1227 static void blkif_restart_queue(struct work_struct *work)
1228 {
1229 	struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1230 
1231 	if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1232 		kick_pending_request_queues(rinfo);
1233 }
1234 
1235 static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1236 {
1237 	struct grant *persistent_gnt, *n;
1238 	struct blkfront_info *info = rinfo->dev_info;
1239 	int i, j, segs;
1240 
1241 	/*
1242 	 * Remove indirect pages, this only happens when using indirect
1243 	 * descriptors but not persistent grants
1244 	 */
1245 	if (!list_empty(&rinfo->indirect_pages)) {
1246 		struct page *indirect_page, *n;
1247 
1248 		BUG_ON(info->feature_persistent);
1249 		list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1250 			list_del(&indirect_page->lru);
1251 			__free_page(indirect_page);
1252 		}
1253 	}
1254 
1255 	/* Remove all persistent grants. */
1256 	if (!list_empty(&rinfo->grants)) {
1257 		list_for_each_entry_safe(persistent_gnt, n,
1258 					 &rinfo->grants, node) {
1259 			list_del(&persistent_gnt->node);
1260 			if (persistent_gnt->gref != GRANT_INVALID_REF) {
1261 				gnttab_end_foreign_access(persistent_gnt->gref,
1262 							  0, 0UL);
1263 				rinfo->persistent_gnts_c--;
1264 			}
1265 			if (info->feature_persistent)
1266 				__free_page(persistent_gnt->page);
1267 			kfree(persistent_gnt);
1268 		}
1269 	}
1270 	BUG_ON(rinfo->persistent_gnts_c != 0);
1271 
1272 	for (i = 0; i < BLK_RING_SIZE(info); i++) {
1273 		/*
1274 		 * Clear persistent grants present in requests already
1275 		 * on the shared ring
1276 		 */
1277 		if (!rinfo->shadow[i].request)
1278 			goto free_shadow;
1279 
1280 		segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1281 		       rinfo->shadow[i].req.u.indirect.nr_segments :
1282 		       rinfo->shadow[i].req.u.rw.nr_segments;
1283 		for (j = 0; j < segs; j++) {
1284 			persistent_gnt = rinfo->shadow[i].grants_used[j];
1285 			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1286 			if (info->feature_persistent)
1287 				__free_page(persistent_gnt->page);
1288 			kfree(persistent_gnt);
1289 		}
1290 
1291 		if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1292 			/*
1293 			 * If this is not an indirect operation don't try to
1294 			 * free indirect segments
1295 			 */
1296 			goto free_shadow;
1297 
1298 		for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1299 			persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1300 			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1301 			__free_page(persistent_gnt->page);
1302 			kfree(persistent_gnt);
1303 		}
1304 
1305 free_shadow:
1306 		kfree(rinfo->shadow[i].grants_used);
1307 		rinfo->shadow[i].grants_used = NULL;
1308 		kfree(rinfo->shadow[i].indirect_grants);
1309 		rinfo->shadow[i].indirect_grants = NULL;
1310 		kfree(rinfo->shadow[i].sg);
1311 		rinfo->shadow[i].sg = NULL;
1312 	}
1313 
1314 	/* No more gnttab callback work. */
1315 	gnttab_cancel_free_callback(&rinfo->callback);
1316 
1317 	/* Flush gnttab callback work. Must be done with no locks held. */
1318 	flush_work(&rinfo->work);
1319 
1320 	/* Free resources associated with old device channel. */
1321 	for (i = 0; i < info->nr_ring_pages; i++) {
1322 		if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1323 			gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
1324 			rinfo->ring_ref[i] = GRANT_INVALID_REF;
1325 		}
1326 	}
1327 	free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
1328 	rinfo->ring.sring = NULL;
1329 
1330 	if (rinfo->irq)
1331 		unbind_from_irqhandler(rinfo->irq, rinfo);
1332 	rinfo->evtchn = rinfo->irq = 0;
1333 }
1334 
1335 static void blkif_free(struct blkfront_info *info, int suspend)
1336 {
1337 	unsigned int i;
1338 
1339 	/* Prevent new requests being issued until we fix things up. */
1340 	info->connected = suspend ?
1341 		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1342 	/* No more blkif_request(). */
1343 	if (info->rq)
1344 		blk_mq_stop_hw_queues(info->rq);
1345 
1346 	for (i = 0; i < info->nr_rings; i++)
1347 		blkif_free_ring(&info->rinfo[i]);
1348 
1349 	kfree(info->rinfo);
1350 	info->rinfo = NULL;
1351 	info->nr_rings = 0;
1352 }
1353 
1354 struct copy_from_grant {
1355 	const struct blk_shadow *s;
1356 	unsigned int grant_idx;
1357 	unsigned int bvec_offset;
1358 	char *bvec_data;
1359 };
1360 
1361 static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1362 				  unsigned int len, void *data)
1363 {
1364 	struct copy_from_grant *info = data;
1365 	char *shared_data;
1366 	/* Convenient aliases */
1367 	const struct blk_shadow *s = info->s;
1368 
1369 	shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1370 
1371 	memcpy(info->bvec_data + info->bvec_offset,
1372 	       shared_data + offset, len);
1373 
1374 	info->bvec_offset += len;
1375 	info->grant_idx++;
1376 
1377 	kunmap_atomic(shared_data);
1378 }
1379 
1380 static enum blk_req_status blkif_rsp_to_req_status(int rsp)
1381 {
1382 	switch (rsp)
1383 	{
1384 	case BLKIF_RSP_OKAY:
1385 		return REQ_DONE;
1386 	case BLKIF_RSP_EOPNOTSUPP:
1387 		return REQ_EOPNOTSUPP;
1388 	case BLKIF_RSP_ERROR:
1389 		/* Fallthrough. */
1390 	default:
1391 		return REQ_ERROR;
1392 	}
1393 }
1394 
1395 /*
1396  * Get the final status of the block request based on two ring response
1397  */
1398 static int blkif_get_final_status(enum blk_req_status s1,
1399 				  enum blk_req_status s2)
1400 {
1401 	BUG_ON(s1 == REQ_WAITING);
1402 	BUG_ON(s2 == REQ_WAITING);
1403 
1404 	if (s1 == REQ_ERROR || s2 == REQ_ERROR)
1405 		return BLKIF_RSP_ERROR;
1406 	else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
1407 		return BLKIF_RSP_EOPNOTSUPP;
1408 	return BLKIF_RSP_OKAY;
1409 }
1410 
1411 static bool blkif_completion(unsigned long *id,
1412 			     struct blkfront_ring_info *rinfo,
1413 			     struct blkif_response *bret)
1414 {
1415 	int i = 0;
1416 	struct scatterlist *sg;
1417 	int num_sg, num_grant;
1418 	struct blkfront_info *info = rinfo->dev_info;
1419 	struct blk_shadow *s = &rinfo->shadow[*id];
1420 	struct copy_from_grant data = {
1421 		.grant_idx = 0,
1422 	};
1423 
1424 	num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1425 		s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1426 
1427 	/* The I/O request may be split in two. */
1428 	if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
1429 		struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1430 
1431 		/* Keep the status of the current response in shadow. */
1432 		s->status = blkif_rsp_to_req_status(bret->status);
1433 
1434 		/* Wait the second response if not yet here. */
1435 		if (s2->status == REQ_WAITING)
1436 			return 0;
1437 
1438 		bret->status = blkif_get_final_status(s->status,
1439 						      s2->status);
1440 
1441 		/*
1442 		 * All the grants is stored in the first shadow in order
1443 		 * to make the completion code simpler.
1444 		 */
1445 		num_grant += s2->req.u.rw.nr_segments;
1446 
1447 		/*
1448 		 * The two responses may not come in order. Only the
1449 		 * first request will store the scatter-gather list.
1450 		 */
1451 		if (s2->num_sg != 0) {
1452 			/* Update "id" with the ID of the first response. */
1453 			*id = s->associated_id;
1454 			s = s2;
1455 		}
1456 
1457 		/*
1458 		 * We don't need anymore the second request, so recycling
1459 		 * it now.
1460 		 */
1461 		if (add_id_to_freelist(rinfo, s->associated_id))
1462 			WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1463 			     info->gd->disk_name, s->associated_id);
1464 	}
1465 
1466 	data.s = s;
1467 	num_sg = s->num_sg;
1468 
1469 	if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1470 		for_each_sg(s->sg, sg, num_sg, i) {
1471 			BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1472 
1473 			data.bvec_offset = sg->offset;
1474 			data.bvec_data = kmap_atomic(sg_page(sg));
1475 
1476 			gnttab_foreach_grant_in_range(sg_page(sg),
1477 						      sg->offset,
1478 						      sg->length,
1479 						      blkif_copy_from_grant,
1480 						      &data);
1481 
1482 			kunmap_atomic(data.bvec_data);
1483 		}
1484 	}
1485 	/* Add the persistent grant into the list of free grants */
1486 	for (i = 0; i < num_grant; i++) {
1487 		if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1488 			/*
1489 			 * If the grant is still mapped by the backend (the
1490 			 * backend has chosen to make this grant persistent)
1491 			 * we add it at the head of the list, so it will be
1492 			 * reused first.
1493 			 */
1494 			if (!info->feature_persistent)
1495 				pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1496 						     s->grants_used[i]->gref);
1497 			list_add(&s->grants_used[i]->node, &rinfo->grants);
1498 			rinfo->persistent_gnts_c++;
1499 		} else {
1500 			/*
1501 			 * If the grant is not mapped by the backend we end the
1502 			 * foreign access and add it to the tail of the list,
1503 			 * so it will not be picked again unless we run out of
1504 			 * persistent grants.
1505 			 */
1506 			gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
1507 			s->grants_used[i]->gref = GRANT_INVALID_REF;
1508 			list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1509 		}
1510 	}
1511 	if (s->req.operation == BLKIF_OP_INDIRECT) {
1512 		for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1513 			if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1514 				if (!info->feature_persistent)
1515 					pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1516 							     s->indirect_grants[i]->gref);
1517 				list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1518 				rinfo->persistent_gnts_c++;
1519 			} else {
1520 				struct page *indirect_page;
1521 
1522 				gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
1523 				/*
1524 				 * Add the used indirect page back to the list of
1525 				 * available pages for indirect grefs.
1526 				 */
1527 				if (!info->feature_persistent) {
1528 					indirect_page = s->indirect_grants[i]->page;
1529 					list_add(&indirect_page->lru, &rinfo->indirect_pages);
1530 				}
1531 				s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1532 				list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1533 			}
1534 		}
1535 	}
1536 
1537 	return 1;
1538 }
1539 
1540 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1541 {
1542 	struct request *req;
1543 	struct blkif_response *bret;
1544 	RING_IDX i, rp;
1545 	unsigned long flags;
1546 	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1547 	struct blkfront_info *info = rinfo->dev_info;
1548 	int error;
1549 
1550 	if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
1551 		return IRQ_HANDLED;
1552 
1553 	spin_lock_irqsave(&rinfo->ring_lock, flags);
1554  again:
1555 	rp = rinfo->ring.sring->rsp_prod;
1556 	rmb(); /* Ensure we see queued responses up to 'rp'. */
1557 
1558 	for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1559 		unsigned long id;
1560 
1561 		bret = RING_GET_RESPONSE(&rinfo->ring, i);
1562 		id   = bret->id;
1563 		/*
1564 		 * The backend has messed up and given us an id that we would
1565 		 * never have given to it (we stamp it up to BLK_RING_SIZE -
1566 		 * look in get_id_from_freelist.
1567 		 */
1568 		if (id >= BLK_RING_SIZE(info)) {
1569 			WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1570 			     info->gd->disk_name, op_name(bret->operation), id);
1571 			/* We can't safely get the 'struct request' as
1572 			 * the id is busted. */
1573 			continue;
1574 		}
1575 		req  = rinfo->shadow[id].request;
1576 
1577 		if (bret->operation != BLKIF_OP_DISCARD) {
1578 			/*
1579 			 * We may need to wait for an extra response if the
1580 			 * I/O request is split in 2
1581 			 */
1582 			if (!blkif_completion(&id, rinfo, bret))
1583 				continue;
1584 		}
1585 
1586 		if (add_id_to_freelist(rinfo, id)) {
1587 			WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1588 			     info->gd->disk_name, op_name(bret->operation), id);
1589 			continue;
1590 		}
1591 
1592 		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
1593 		switch (bret->operation) {
1594 		case BLKIF_OP_DISCARD:
1595 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1596 				struct request_queue *rq = info->rq;
1597 				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1598 					   info->gd->disk_name, op_name(bret->operation));
1599 				error = -EOPNOTSUPP;
1600 				info->feature_discard = 0;
1601 				info->feature_secdiscard = 0;
1602 				queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1603 				queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
1604 			}
1605 			blk_mq_complete_request(req, error);
1606 			break;
1607 		case BLKIF_OP_FLUSH_DISKCACHE:
1608 		case BLKIF_OP_WRITE_BARRIER:
1609 			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1610 				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
1611 				       info->gd->disk_name, op_name(bret->operation));
1612 				error = -EOPNOTSUPP;
1613 			}
1614 			if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1615 				     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1616 				printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
1617 				       info->gd->disk_name, op_name(bret->operation));
1618 				error = -EOPNOTSUPP;
1619 			}
1620 			if (unlikely(error)) {
1621 				if (error == -EOPNOTSUPP)
1622 					error = 0;
1623 				info->feature_flush = 0;
1624 				xlvbd_flush(info);
1625 			}
1626 			/* fall through */
1627 		case BLKIF_OP_READ:
1628 		case BLKIF_OP_WRITE:
1629 			if (unlikely(bret->status != BLKIF_RSP_OKAY))
1630 				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
1631 					"request: %x\n", bret->status);
1632 
1633 			blk_mq_complete_request(req, error);
1634 			break;
1635 		default:
1636 			BUG();
1637 		}
1638 	}
1639 
1640 	rinfo->ring.rsp_cons = i;
1641 
1642 	if (i != rinfo->ring.req_prod_pvt) {
1643 		int more_to_do;
1644 		RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1645 		if (more_to_do)
1646 			goto again;
1647 	} else
1648 		rinfo->ring.sring->rsp_event = i + 1;
1649 
1650 	kick_pending_request_queues_locked(rinfo);
1651 
1652 	spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1653 
1654 	return IRQ_HANDLED;
1655 }
1656 
1657 
1658 static int setup_blkring(struct xenbus_device *dev,
1659 			 struct blkfront_ring_info *rinfo)
1660 {
1661 	struct blkif_sring *sring;
1662 	int err, i;
1663 	struct blkfront_info *info = rinfo->dev_info;
1664 	unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1665 	grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1666 
1667 	for (i = 0; i < info->nr_ring_pages; i++)
1668 		rinfo->ring_ref[i] = GRANT_INVALID_REF;
1669 
1670 	sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
1671 						       get_order(ring_size));
1672 	if (!sring) {
1673 		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1674 		return -ENOMEM;
1675 	}
1676 	SHARED_RING_INIT(sring);
1677 	FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1678 
1679 	err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1680 	if (err < 0) {
1681 		free_pages((unsigned long)sring, get_order(ring_size));
1682 		rinfo->ring.sring = NULL;
1683 		goto fail;
1684 	}
1685 	for (i = 0; i < info->nr_ring_pages; i++)
1686 		rinfo->ring_ref[i] = gref[i];
1687 
1688 	err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1689 	if (err)
1690 		goto fail;
1691 
1692 	err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
1693 					"blkif", rinfo);
1694 	if (err <= 0) {
1695 		xenbus_dev_fatal(dev, err,
1696 				 "bind_evtchn_to_irqhandler failed");
1697 		goto fail;
1698 	}
1699 	rinfo->irq = err;
1700 
1701 	return 0;
1702 fail:
1703 	blkif_free(info, 0);
1704 	return err;
1705 }
1706 
1707 /*
1708  * Write out per-ring/queue nodes including ring-ref and event-channel, and each
1709  * ring buffer may have multi pages depending on ->nr_ring_pages.
1710  */
1711 static int write_per_ring_nodes(struct xenbus_transaction xbt,
1712 				struct blkfront_ring_info *rinfo, const char *dir)
1713 {
1714 	int err;
1715 	unsigned int i;
1716 	const char *message = NULL;
1717 	struct blkfront_info *info = rinfo->dev_info;
1718 
1719 	if (info->nr_ring_pages == 1) {
1720 		err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1721 		if (err) {
1722 			message = "writing ring-ref";
1723 			goto abort_transaction;
1724 		}
1725 	} else {
1726 		for (i = 0; i < info->nr_ring_pages; i++) {
1727 			char ring_ref_name[RINGREF_NAME_LEN];
1728 
1729 			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1730 			err = xenbus_printf(xbt, dir, ring_ref_name,
1731 					    "%u", rinfo->ring_ref[i]);
1732 			if (err) {
1733 				message = "writing ring-ref";
1734 				goto abort_transaction;
1735 			}
1736 		}
1737 	}
1738 
1739 	err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1740 	if (err) {
1741 		message = "writing event-channel";
1742 		goto abort_transaction;
1743 	}
1744 
1745 	return 0;
1746 
1747 abort_transaction:
1748 	xenbus_transaction_end(xbt, 1);
1749 	if (message)
1750 		xenbus_dev_fatal(info->xbdev, err, "%s", message);
1751 
1752 	return err;
1753 }
1754 
1755 /* Common code used when first setting up, and when resuming. */
1756 static int talk_to_blkback(struct xenbus_device *dev,
1757 			   struct blkfront_info *info)
1758 {
1759 	const char *message = NULL;
1760 	struct xenbus_transaction xbt;
1761 	int err;
1762 	unsigned int i, max_page_order = 0;
1763 	unsigned int ring_page_order = 0;
1764 
1765 	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1766 			   "max-ring-page-order", "%u", &max_page_order);
1767 	if (err != 1)
1768 		info->nr_ring_pages = 1;
1769 	else {
1770 		ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1771 		info->nr_ring_pages = 1 << ring_page_order;
1772 	}
1773 
1774 	for (i = 0; i < info->nr_rings; i++) {
1775 		struct blkfront_ring_info *rinfo = &info->rinfo[i];
1776 
1777 		/* Create shared ring, alloc event channel. */
1778 		err = setup_blkring(dev, rinfo);
1779 		if (err)
1780 			goto destroy_blkring;
1781 	}
1782 
1783 again:
1784 	err = xenbus_transaction_start(&xbt);
1785 	if (err) {
1786 		xenbus_dev_fatal(dev, err, "starting transaction");
1787 		goto destroy_blkring;
1788 	}
1789 
1790 	if (info->nr_ring_pages > 1) {
1791 		err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
1792 				    ring_page_order);
1793 		if (err) {
1794 			message = "writing ring-page-order";
1795 			goto abort_transaction;
1796 		}
1797 	}
1798 
1799 	/* We already got the number of queues/rings in _probe */
1800 	if (info->nr_rings == 1) {
1801 		err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
1802 		if (err)
1803 			goto destroy_blkring;
1804 	} else {
1805 		char *path;
1806 		size_t pathsize;
1807 
1808 		err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
1809 				    info->nr_rings);
1810 		if (err) {
1811 			message = "writing multi-queue-num-queues";
1812 			goto abort_transaction;
1813 		}
1814 
1815 		pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
1816 		path = kmalloc(pathsize, GFP_KERNEL);
1817 		if (!path) {
1818 			err = -ENOMEM;
1819 			message = "ENOMEM while writing ring references";
1820 			goto abort_transaction;
1821 		}
1822 
1823 		for (i = 0; i < info->nr_rings; i++) {
1824 			memset(path, 0, pathsize);
1825 			snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1826 			err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
1827 			if (err) {
1828 				kfree(path);
1829 				goto destroy_blkring;
1830 			}
1831 		}
1832 		kfree(path);
1833 	}
1834 	err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1835 			    XEN_IO_PROTO_ABI_NATIVE);
1836 	if (err) {
1837 		message = "writing protocol";
1838 		goto abort_transaction;
1839 	}
1840 	err = xenbus_printf(xbt, dev->nodename,
1841 			    "feature-persistent", "%u", 1);
1842 	if (err)
1843 		dev_warn(&dev->dev,
1844 			 "writing persistent grants feature to xenbus");
1845 
1846 	err = xenbus_transaction_end(xbt, 0);
1847 	if (err) {
1848 		if (err == -EAGAIN)
1849 			goto again;
1850 		xenbus_dev_fatal(dev, err, "completing transaction");
1851 		goto destroy_blkring;
1852 	}
1853 
1854 	for (i = 0; i < info->nr_rings; i++) {
1855 		unsigned int j;
1856 		struct blkfront_ring_info *rinfo = &info->rinfo[i];
1857 
1858 		for (j = 0; j < BLK_RING_SIZE(info); j++)
1859 			rinfo->shadow[j].req.u.rw.id = j + 1;
1860 		rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1861 	}
1862 	xenbus_switch_state(dev, XenbusStateInitialised);
1863 
1864 	return 0;
1865 
1866  abort_transaction:
1867 	xenbus_transaction_end(xbt, 1);
1868 	if (message)
1869 		xenbus_dev_fatal(dev, err, "%s", message);
1870  destroy_blkring:
1871 	blkif_free(info, 0);
1872 
1873 	kfree(info);
1874 	dev_set_drvdata(&dev->dev, NULL);
1875 
1876 	return err;
1877 }
1878 
1879 static int negotiate_mq(struct blkfront_info *info)
1880 {
1881 	unsigned int backend_max_queues = 0;
1882 	int err;
1883 	unsigned int i;
1884 
1885 	BUG_ON(info->nr_rings);
1886 
1887 	/* Check if backend supports multiple queues. */
1888 	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1889 			   "multi-queue-max-queues", "%u", &backend_max_queues);
1890 	if (err < 0)
1891 		backend_max_queues = 1;
1892 
1893 	info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1894 	/* We need at least one ring. */
1895 	if (!info->nr_rings)
1896 		info->nr_rings = 1;
1897 
1898 	info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1899 	if (!info->rinfo) {
1900 		xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1901 		return -ENOMEM;
1902 	}
1903 
1904 	for (i = 0; i < info->nr_rings; i++) {
1905 		struct blkfront_ring_info *rinfo;
1906 
1907 		rinfo = &info->rinfo[i];
1908 		INIT_LIST_HEAD(&rinfo->indirect_pages);
1909 		INIT_LIST_HEAD(&rinfo->grants);
1910 		rinfo->dev_info = info;
1911 		INIT_WORK(&rinfo->work, blkif_restart_queue);
1912 		spin_lock_init(&rinfo->ring_lock);
1913 	}
1914 	return 0;
1915 }
1916 /**
1917  * Entry point to this code when a new device is created.  Allocate the basic
1918  * structures and the ring buffer for communication with the backend, and
1919  * inform the backend of the appropriate details for those.  Switch to
1920  * Initialised state.
1921  */
1922 static int blkfront_probe(struct xenbus_device *dev,
1923 			  const struct xenbus_device_id *id)
1924 {
1925 	int err, vdevice;
1926 	struct blkfront_info *info;
1927 
1928 	/* FIXME: Use dynamic device id if this is not set. */
1929 	err = xenbus_scanf(XBT_NIL, dev->nodename,
1930 			   "virtual-device", "%i", &vdevice);
1931 	if (err != 1) {
1932 		/* go looking in the extended area instead */
1933 		err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1934 				   "%i", &vdevice);
1935 		if (err != 1) {
1936 			xenbus_dev_fatal(dev, err, "reading virtual-device");
1937 			return err;
1938 		}
1939 	}
1940 
1941 	if (xen_hvm_domain()) {
1942 		char *type;
1943 		int len;
1944 		/* no unplug has been done: do not hook devices != xen vbds */
1945 		if (xen_has_pv_and_legacy_disk_devices()) {
1946 			int major;
1947 
1948 			if (!VDEV_IS_EXTENDED(vdevice))
1949 				major = BLKIF_MAJOR(vdevice);
1950 			else
1951 				major = XENVBD_MAJOR;
1952 
1953 			if (major != XENVBD_MAJOR) {
1954 				printk(KERN_INFO
1955 						"%s: HVM does not support vbd %d as xen block device\n",
1956 						__func__, vdevice);
1957 				return -ENODEV;
1958 			}
1959 		}
1960 		/* do not create a PV cdrom device if we are an HVM guest */
1961 		type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1962 		if (IS_ERR(type))
1963 			return -ENODEV;
1964 		if (strncmp(type, "cdrom", 5) == 0) {
1965 			kfree(type);
1966 			return -ENODEV;
1967 		}
1968 		kfree(type);
1969 	}
1970 	info = kzalloc(sizeof(*info), GFP_KERNEL);
1971 	if (!info) {
1972 		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1973 		return -ENOMEM;
1974 	}
1975 
1976 	info->xbdev = dev;
1977 	err = negotiate_mq(info);
1978 	if (err) {
1979 		kfree(info);
1980 		return err;
1981 	}
1982 
1983 	mutex_init(&info->mutex);
1984 	info->vdevice = vdevice;
1985 	info->connected = BLKIF_STATE_DISCONNECTED;
1986 
1987 	/* Front end dir is a number, which is used as the id. */
1988 	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
1989 	dev_set_drvdata(&dev->dev, info);
1990 
1991 	return 0;
1992 }
1993 
1994 static void split_bio_end(struct bio *bio)
1995 {
1996 	struct split_bio *split_bio = bio->bi_private;
1997 
1998 	if (atomic_dec_and_test(&split_bio->pending)) {
1999 		split_bio->bio->bi_phys_segments = 0;
2000 		split_bio->bio->bi_error = bio->bi_error;
2001 		bio_endio(split_bio->bio);
2002 		kfree(split_bio);
2003 	}
2004 	bio_put(bio);
2005 }
2006 
2007 static int blkif_recover(struct blkfront_info *info)
2008 {
2009 	unsigned int i, r_index;
2010 	struct request *req, *n;
2011 	struct blk_shadow *copy;
2012 	int rc;
2013 	struct bio *bio, *cloned_bio;
2014 	struct bio_list bio_list, merge_bio;
2015 	unsigned int segs, offset;
2016 	int pending, size;
2017 	struct split_bio *split_bio;
2018 	struct list_head requests;
2019 
2020 	blkfront_gather_backend_features(info);
2021 	segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2022 	blk_queue_max_segments(info->rq, segs);
2023 	bio_list_init(&bio_list);
2024 	INIT_LIST_HEAD(&requests);
2025 
2026 	for (r_index = 0; r_index < info->nr_rings; r_index++) {
2027 		struct blkfront_ring_info *rinfo;
2028 
2029 		rinfo = &info->rinfo[r_index];
2030 		/* Stage 1: Make a safe copy of the shadow state. */
2031 		copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
2032 			       GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
2033 		if (!copy)
2034 			return -ENOMEM;
2035 
2036 		/* Stage 2: Set up free list. */
2037 		memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
2038 		for (i = 0; i < BLK_RING_SIZE(info); i++)
2039 			rinfo->shadow[i].req.u.rw.id = i+1;
2040 		rinfo->shadow_free = rinfo->ring.req_prod_pvt;
2041 		rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
2042 
2043 		rc = blkfront_setup_indirect(rinfo);
2044 		if (rc) {
2045 			kfree(copy);
2046 			return rc;
2047 		}
2048 
2049 		for (i = 0; i < BLK_RING_SIZE(info); i++) {
2050 			/* Not in use? */
2051 			if (!copy[i].request)
2052 				continue;
2053 
2054 			/*
2055 			 * Get the bios in the request so we can re-queue them.
2056 			 */
2057 			if (copy[i].request->cmd_flags &
2058 			    (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
2059 				/*
2060 				 * Flush operations don't contain bios, so
2061 				 * we need to requeue the whole request
2062 				 */
2063 				list_add(&copy[i].request->queuelist, &requests);
2064 				continue;
2065 			}
2066 			merge_bio.head = copy[i].request->bio;
2067 			merge_bio.tail = copy[i].request->biotail;
2068 			bio_list_merge(&bio_list, &merge_bio);
2069 			copy[i].request->bio = NULL;
2070 			blk_end_request_all(copy[i].request, 0);
2071 		}
2072 
2073 		kfree(copy);
2074 	}
2075 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
2076 
2077 	/* Now safe for us to use the shared ring */
2078 	info->connected = BLKIF_STATE_CONNECTED;
2079 
2080 	for (r_index = 0; r_index < info->nr_rings; r_index++) {
2081 		struct blkfront_ring_info *rinfo;
2082 
2083 		rinfo = &info->rinfo[r_index];
2084 		/* Kick any other new requests queued since we resumed */
2085 		kick_pending_request_queues(rinfo);
2086 	}
2087 
2088 	list_for_each_entry_safe(req, n, &requests, queuelist) {
2089 		/* Requeue pending requests (flush or discard) */
2090 		list_del_init(&req->queuelist);
2091 		BUG_ON(req->nr_phys_segments > segs);
2092 		blk_mq_requeue_request(req);
2093 	}
2094 	blk_mq_kick_requeue_list(info->rq);
2095 
2096 	while ((bio = bio_list_pop(&bio_list)) != NULL) {
2097 		/* Traverse the list of pending bios and re-queue them */
2098 		if (bio_segments(bio) > segs) {
2099 			/*
2100 			 * This bio has more segments than what we can
2101 			 * handle, we have to split it.
2102 			 */
2103 			pending = (bio_segments(bio) + segs - 1) / segs;
2104 			split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
2105 			BUG_ON(split_bio == NULL);
2106 			atomic_set(&split_bio->pending, pending);
2107 			split_bio->bio = bio;
2108 			for (i = 0; i < pending; i++) {
2109 				offset = (i * segs * XEN_PAGE_SIZE) >> 9;
2110 				size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
2111 					   (unsigned int)bio_sectors(bio) - offset);
2112 				cloned_bio = bio_clone(bio, GFP_NOIO);
2113 				BUG_ON(cloned_bio == NULL);
2114 				bio_trim(cloned_bio, offset, size);
2115 				cloned_bio->bi_private = split_bio;
2116 				cloned_bio->bi_end_io = split_bio_end;
2117 				submit_bio(cloned_bio->bi_rw, cloned_bio);
2118 			}
2119 			/*
2120 			 * Now we have to wait for all those smaller bios to
2121 			 * end, so we can also end the "parent" bio.
2122 			 */
2123 			continue;
2124 		}
2125 		/* We don't need to split this bio */
2126 		submit_bio(bio->bi_rw, bio);
2127 	}
2128 
2129 	return 0;
2130 }
2131 
2132 /**
2133  * We are reconnecting to the backend, due to a suspend/resume, or a backend
2134  * driver restart.  We tear down our blkif structure and recreate it, but
2135  * leave the device-layer structures intact so that this is transparent to the
2136  * rest of the kernel.
2137  */
2138 static int blkfront_resume(struct xenbus_device *dev)
2139 {
2140 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2141 	int err = 0;
2142 
2143 	dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2144 
2145 	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2146 
2147 	err = negotiate_mq(info);
2148 	if (err)
2149 		return err;
2150 
2151 	err = talk_to_blkback(dev, info);
2152 
2153 	/*
2154 	 * We have to wait for the backend to switch to
2155 	 * connected state, since we want to read which
2156 	 * features it supports.
2157 	 */
2158 
2159 	return err;
2160 }
2161 
2162 static void blkfront_closing(struct blkfront_info *info)
2163 {
2164 	struct xenbus_device *xbdev = info->xbdev;
2165 	struct block_device *bdev = NULL;
2166 
2167 	mutex_lock(&info->mutex);
2168 
2169 	if (xbdev->state == XenbusStateClosing) {
2170 		mutex_unlock(&info->mutex);
2171 		return;
2172 	}
2173 
2174 	if (info->gd)
2175 		bdev = bdget_disk(info->gd, 0);
2176 
2177 	mutex_unlock(&info->mutex);
2178 
2179 	if (!bdev) {
2180 		xenbus_frontend_closed(xbdev);
2181 		return;
2182 	}
2183 
2184 	mutex_lock(&bdev->bd_mutex);
2185 
2186 	if (bdev->bd_openers) {
2187 		xenbus_dev_error(xbdev, -EBUSY,
2188 				 "Device in use; refusing to close");
2189 		xenbus_switch_state(xbdev, XenbusStateClosing);
2190 	} else {
2191 		xlvbd_release_gendisk(info);
2192 		xenbus_frontend_closed(xbdev);
2193 	}
2194 
2195 	mutex_unlock(&bdev->bd_mutex);
2196 	bdput(bdev);
2197 }
2198 
2199 static void blkfront_setup_discard(struct blkfront_info *info)
2200 {
2201 	int err;
2202 	unsigned int discard_granularity;
2203 	unsigned int discard_alignment;
2204 	unsigned int discard_secure;
2205 
2206 	info->feature_discard = 1;
2207 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2208 		"discard-granularity", "%u", &discard_granularity,
2209 		"discard-alignment", "%u", &discard_alignment,
2210 		NULL);
2211 	if (!err) {
2212 		info->discard_granularity = discard_granularity;
2213 		info->discard_alignment = discard_alignment;
2214 	}
2215 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2216 		    "discard-secure", "%d", &discard_secure,
2217 		    NULL);
2218 	if (!err)
2219 		info->feature_secdiscard = !!discard_secure;
2220 }
2221 
2222 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2223 {
2224 	unsigned int psegs, grants;
2225 	int err, i;
2226 	struct blkfront_info *info = rinfo->dev_info;
2227 
2228 	if (info->max_indirect_segments == 0) {
2229 		if (!HAS_EXTRA_REQ)
2230 			grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2231 		else {
2232 			/*
2233 			 * When an extra req is required, the maximum
2234 			 * grants supported is related to the size of the
2235 			 * Linux block segment.
2236 			 */
2237 			grants = GRANTS_PER_PSEG;
2238 		}
2239 	}
2240 	else
2241 		grants = info->max_indirect_segments;
2242 	psegs = grants / GRANTS_PER_PSEG;
2243 
2244 	err = fill_grant_buffer(rinfo,
2245 				(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
2246 	if (err)
2247 		goto out_of_memory;
2248 
2249 	if (!info->feature_persistent && info->max_indirect_segments) {
2250 		/*
2251 		 * We are using indirect descriptors but not persistent
2252 		 * grants, we need to allocate a set of pages that can be
2253 		 * used for mapping indirect grefs
2254 		 */
2255 		int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
2256 
2257 		BUG_ON(!list_empty(&rinfo->indirect_pages));
2258 		for (i = 0; i < num; i++) {
2259 			struct page *indirect_page = alloc_page(GFP_NOIO);
2260 			if (!indirect_page)
2261 				goto out_of_memory;
2262 			list_add(&indirect_page->lru, &rinfo->indirect_pages);
2263 		}
2264 	}
2265 
2266 	for (i = 0; i < BLK_RING_SIZE(info); i++) {
2267 		rinfo->shadow[i].grants_used = kzalloc(
2268 			sizeof(rinfo->shadow[i].grants_used[0]) * grants,
2269 			GFP_NOIO);
2270 		rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * psegs, GFP_NOIO);
2271 		if (info->max_indirect_segments)
2272 			rinfo->shadow[i].indirect_grants = kzalloc(
2273 				sizeof(rinfo->shadow[i].indirect_grants[0]) *
2274 				INDIRECT_GREFS(grants),
2275 				GFP_NOIO);
2276 		if ((rinfo->shadow[i].grants_used == NULL) ||
2277 			(rinfo->shadow[i].sg == NULL) ||
2278 		     (info->max_indirect_segments &&
2279 		     (rinfo->shadow[i].indirect_grants == NULL)))
2280 			goto out_of_memory;
2281 		sg_init_table(rinfo->shadow[i].sg, psegs);
2282 	}
2283 
2284 
2285 	return 0;
2286 
2287 out_of_memory:
2288 	for (i = 0; i < BLK_RING_SIZE(info); i++) {
2289 		kfree(rinfo->shadow[i].grants_used);
2290 		rinfo->shadow[i].grants_used = NULL;
2291 		kfree(rinfo->shadow[i].sg);
2292 		rinfo->shadow[i].sg = NULL;
2293 		kfree(rinfo->shadow[i].indirect_grants);
2294 		rinfo->shadow[i].indirect_grants = NULL;
2295 	}
2296 	if (!list_empty(&rinfo->indirect_pages)) {
2297 		struct page *indirect_page, *n;
2298 		list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2299 			list_del(&indirect_page->lru);
2300 			__free_page(indirect_page);
2301 		}
2302 	}
2303 	return -ENOMEM;
2304 }
2305 
2306 /*
2307  * Gather all backend feature-*
2308  */
2309 static void blkfront_gather_backend_features(struct blkfront_info *info)
2310 {
2311 	int err;
2312 	int barrier, flush, discard, persistent;
2313 	unsigned int indirect_segments;
2314 
2315 	info->feature_flush = 0;
2316 
2317 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2318 			"feature-barrier", "%d", &barrier,
2319 			NULL);
2320 
2321 	/*
2322 	 * If there's no "feature-barrier" defined, then it means
2323 	 * we're dealing with a very old backend which writes
2324 	 * synchronously; nothing to do.
2325 	 *
2326 	 * If there are barriers, then we use flush.
2327 	 */
2328 	if (!err && barrier)
2329 		info->feature_flush = REQ_FLUSH | REQ_FUA;
2330 	/*
2331 	 * And if there is "feature-flush-cache" use that above
2332 	 * barriers.
2333 	 */
2334 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2335 			"feature-flush-cache", "%d", &flush,
2336 			NULL);
2337 
2338 	if (!err && flush)
2339 		info->feature_flush = REQ_FLUSH;
2340 
2341 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2342 			"feature-discard", "%d", &discard,
2343 			NULL);
2344 
2345 	if (!err && discard)
2346 		blkfront_setup_discard(info);
2347 
2348 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2349 			"feature-persistent", "%u", &persistent,
2350 			NULL);
2351 	if (err)
2352 		info->feature_persistent = 0;
2353 	else
2354 		info->feature_persistent = persistent;
2355 
2356 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2357 			    "feature-max-indirect-segments", "%u", &indirect_segments,
2358 			    NULL);
2359 	if (err)
2360 		info->max_indirect_segments = 0;
2361 	else
2362 		info->max_indirect_segments = min(indirect_segments,
2363 						  xen_blkif_max_segments);
2364 }
2365 
2366 /*
2367  * Invoked when the backend is finally 'ready' (and has told produced
2368  * the details about the physical device - #sectors, size, etc).
2369  */
2370 static void blkfront_connect(struct blkfront_info *info)
2371 {
2372 	unsigned long long sectors;
2373 	unsigned long sector_size;
2374 	unsigned int physical_sector_size;
2375 	unsigned int binfo;
2376 	int err, i;
2377 
2378 	switch (info->connected) {
2379 	case BLKIF_STATE_CONNECTED:
2380 		/*
2381 		 * Potentially, the back-end may be signalling
2382 		 * a capacity change; update the capacity.
2383 		 */
2384 		err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2385 				   "sectors", "%Lu", &sectors);
2386 		if (XENBUS_EXIST_ERR(err))
2387 			return;
2388 		printk(KERN_INFO "Setting capacity to %Lu\n",
2389 		       sectors);
2390 		set_capacity(info->gd, sectors);
2391 		revalidate_disk(info->gd);
2392 
2393 		return;
2394 	case BLKIF_STATE_SUSPENDED:
2395 		/*
2396 		 * If we are recovering from suspension, we need to wait
2397 		 * for the backend to announce it's features before
2398 		 * reconnecting, at least we need to know if the backend
2399 		 * supports indirect descriptors, and how many.
2400 		 */
2401 		blkif_recover(info);
2402 		return;
2403 
2404 	default:
2405 		break;
2406 	}
2407 
2408 	dev_dbg(&info->xbdev->dev, "%s:%s.\n",
2409 		__func__, info->xbdev->otherend);
2410 
2411 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2412 			    "sectors", "%llu", &sectors,
2413 			    "info", "%u", &binfo,
2414 			    "sector-size", "%lu", &sector_size,
2415 			    NULL);
2416 	if (err) {
2417 		xenbus_dev_fatal(info->xbdev, err,
2418 				 "reading backend fields at %s",
2419 				 info->xbdev->otherend);
2420 		return;
2421 	}
2422 
2423 	/*
2424 	 * physcial-sector-size is a newer field, so old backends may not
2425 	 * provide this. Assume physical sector size to be the same as
2426 	 * sector_size in that case.
2427 	 */
2428 	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2429 			   "physical-sector-size", "%u", &physical_sector_size);
2430 	if (err != 1)
2431 		physical_sector_size = sector_size;
2432 
2433 	blkfront_gather_backend_features(info);
2434 	for (i = 0; i < info->nr_rings; i++) {
2435 		err = blkfront_setup_indirect(&info->rinfo[i]);
2436 		if (err) {
2437 			xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2438 					 info->xbdev->otherend);
2439 			blkif_free(info, 0);
2440 			break;
2441 		}
2442 	}
2443 
2444 	err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
2445 				  physical_sector_size);
2446 	if (err) {
2447 		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2448 				 info->xbdev->otherend);
2449 		return;
2450 	}
2451 
2452 	xenbus_switch_state(info->xbdev, XenbusStateConnected);
2453 
2454 	/* Kick pending requests. */
2455 	info->connected = BLKIF_STATE_CONNECTED;
2456 	for (i = 0; i < info->nr_rings; i++)
2457 		kick_pending_request_queues(&info->rinfo[i]);
2458 
2459 	add_disk(info->gd);
2460 
2461 	info->is_ready = 1;
2462 }
2463 
2464 /**
2465  * Callback received when the backend's state changes.
2466  */
2467 static void blkback_changed(struct xenbus_device *dev,
2468 			    enum xenbus_state backend_state)
2469 {
2470 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2471 
2472 	dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2473 
2474 	switch (backend_state) {
2475 	case XenbusStateInitWait:
2476 		if (dev->state != XenbusStateInitialising)
2477 			break;
2478 		if (talk_to_blkback(dev, info))
2479 			break;
2480 	case XenbusStateInitialising:
2481 	case XenbusStateInitialised:
2482 	case XenbusStateReconfiguring:
2483 	case XenbusStateReconfigured:
2484 	case XenbusStateUnknown:
2485 		break;
2486 
2487 	case XenbusStateConnected:
2488 		if (dev->state != XenbusStateInitialised) {
2489 			if (talk_to_blkback(dev, info))
2490 				break;
2491 		}
2492 		blkfront_connect(info);
2493 		break;
2494 
2495 	case XenbusStateClosed:
2496 		if (dev->state == XenbusStateClosed)
2497 			break;
2498 		/* Missed the backend's Closing state -- fallthrough */
2499 	case XenbusStateClosing:
2500 		if (info)
2501 			blkfront_closing(info);
2502 		break;
2503 	}
2504 }
2505 
2506 static int blkfront_remove(struct xenbus_device *xbdev)
2507 {
2508 	struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2509 	struct block_device *bdev = NULL;
2510 	struct gendisk *disk;
2511 
2512 	dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2513 
2514 	blkif_free(info, 0);
2515 
2516 	mutex_lock(&info->mutex);
2517 
2518 	disk = info->gd;
2519 	if (disk)
2520 		bdev = bdget_disk(disk, 0);
2521 
2522 	info->xbdev = NULL;
2523 	mutex_unlock(&info->mutex);
2524 
2525 	if (!bdev) {
2526 		kfree(info);
2527 		return 0;
2528 	}
2529 
2530 	/*
2531 	 * The xbdev was removed before we reached the Closed
2532 	 * state. See if it's safe to remove the disk. If the bdev
2533 	 * isn't closed yet, we let release take care of it.
2534 	 */
2535 
2536 	mutex_lock(&bdev->bd_mutex);
2537 	info = disk->private_data;
2538 
2539 	dev_warn(disk_to_dev(disk),
2540 		 "%s was hot-unplugged, %d stale handles\n",
2541 		 xbdev->nodename, bdev->bd_openers);
2542 
2543 	if (info && !bdev->bd_openers) {
2544 		xlvbd_release_gendisk(info);
2545 		disk->private_data = NULL;
2546 		kfree(info);
2547 	}
2548 
2549 	mutex_unlock(&bdev->bd_mutex);
2550 	bdput(bdev);
2551 
2552 	return 0;
2553 }
2554 
2555 static int blkfront_is_ready(struct xenbus_device *dev)
2556 {
2557 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2558 
2559 	return info->is_ready && info->xbdev;
2560 }
2561 
2562 static int blkif_open(struct block_device *bdev, fmode_t mode)
2563 {
2564 	struct gendisk *disk = bdev->bd_disk;
2565 	struct blkfront_info *info;
2566 	int err = 0;
2567 
2568 	mutex_lock(&blkfront_mutex);
2569 
2570 	info = disk->private_data;
2571 	if (!info) {
2572 		/* xbdev gone */
2573 		err = -ERESTARTSYS;
2574 		goto out;
2575 	}
2576 
2577 	mutex_lock(&info->mutex);
2578 
2579 	if (!info->gd)
2580 		/* xbdev is closed */
2581 		err = -ERESTARTSYS;
2582 
2583 	mutex_unlock(&info->mutex);
2584 
2585 out:
2586 	mutex_unlock(&blkfront_mutex);
2587 	return err;
2588 }
2589 
2590 static void blkif_release(struct gendisk *disk, fmode_t mode)
2591 {
2592 	struct blkfront_info *info = disk->private_data;
2593 	struct block_device *bdev;
2594 	struct xenbus_device *xbdev;
2595 
2596 	mutex_lock(&blkfront_mutex);
2597 
2598 	bdev = bdget_disk(disk, 0);
2599 
2600 	if (!bdev) {
2601 		WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
2602 		goto out_mutex;
2603 	}
2604 	if (bdev->bd_openers)
2605 		goto out;
2606 
2607 	/*
2608 	 * Check if we have been instructed to close. We will have
2609 	 * deferred this request, because the bdev was still open.
2610 	 */
2611 
2612 	mutex_lock(&info->mutex);
2613 	xbdev = info->xbdev;
2614 
2615 	if (xbdev && xbdev->state == XenbusStateClosing) {
2616 		/* pending switch to state closed */
2617 		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2618 		xlvbd_release_gendisk(info);
2619 		xenbus_frontend_closed(info->xbdev);
2620  	}
2621 
2622 	mutex_unlock(&info->mutex);
2623 
2624 	if (!xbdev) {
2625 		/* sudden device removal */
2626 		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2627 		xlvbd_release_gendisk(info);
2628 		disk->private_data = NULL;
2629 		kfree(info);
2630 	}
2631 
2632 out:
2633 	bdput(bdev);
2634 out_mutex:
2635 	mutex_unlock(&blkfront_mutex);
2636 }
2637 
2638 static const struct block_device_operations xlvbd_block_fops =
2639 {
2640 	.owner = THIS_MODULE,
2641 	.open = blkif_open,
2642 	.release = blkif_release,
2643 	.getgeo = blkif_getgeo,
2644 	.ioctl = blkif_ioctl,
2645 };
2646 
2647 
2648 static const struct xenbus_device_id blkfront_ids[] = {
2649 	{ "vbd" },
2650 	{ "" }
2651 };
2652 
2653 static struct xenbus_driver blkfront_driver = {
2654 	.ids  = blkfront_ids,
2655 	.probe = blkfront_probe,
2656 	.remove = blkfront_remove,
2657 	.resume = blkfront_resume,
2658 	.otherend_changed = blkback_changed,
2659 	.is_ready = blkfront_is_ready,
2660 };
2661 
2662 static int __init xlblk_init(void)
2663 {
2664 	int ret;
2665 	int nr_cpus = num_online_cpus();
2666 
2667 	if (!xen_domain())
2668 		return -ENODEV;
2669 
2670 	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2671 		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2672 			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
2673 		xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
2674 	}
2675 
2676 	if (xen_blkif_max_queues > nr_cpus) {
2677 		pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2678 			xen_blkif_max_queues, nr_cpus);
2679 		xen_blkif_max_queues = nr_cpus;
2680 	}
2681 
2682 	if (!xen_has_pv_disk_devices())
2683 		return -ENODEV;
2684 
2685 	if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2686 		printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
2687 		       XENVBD_MAJOR, DEV_NAME);
2688 		return -ENODEV;
2689 	}
2690 
2691 	ret = xenbus_register_frontend(&blkfront_driver);
2692 	if (ret) {
2693 		unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2694 		return ret;
2695 	}
2696 
2697 	return 0;
2698 }
2699 module_init(xlblk_init);
2700 
2701 
2702 static void __exit xlblk_exit(void)
2703 {
2704 	xenbus_unregister_driver(&blkfront_driver);
2705 	unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2706 	kfree(minors);
2707 }
2708 module_exit(xlblk_exit);
2709 
2710 MODULE_DESCRIPTION("Xen virtual block device frontend");
2711 MODULE_LICENSE("GPL");
2712 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2713 MODULE_ALIAS("xen:vbd");
2714 MODULE_ALIAS("xenblk");
2715