1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36 
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42 #include <linux/bitmap.h>
43 
44 #include <xen/events.h>
45 #include <xen/page.h>
46 #include <xen/xen.h>
47 #include <asm/xen/hypervisor.h>
48 #include <asm/xen/hypercall.h>
49 #include <xen/balloon.h>
50 #include "common.h"
51 
52 /*
53  * Maximum number of unused free pages to keep in the internal buffer.
54  * Setting this to a value too low will reduce memory used in each backend,
55  * but can have a performance penalty.
56  *
57  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
58  * be set to a lower value that might degrade performance on some intensive
59  * IO workloads.
60  */
61 
62 static int xen_blkif_max_buffer_pages = 1024;
63 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
64 MODULE_PARM_DESC(max_buffer_pages,
65 "Maximum number of free pages to keep in each block backend buffer");
66 
67 /*
68  * Maximum number of grants to map persistently in blkback. For maximum
69  * performance this should be the total numbers of grants that can be used
70  * to fill the ring, but since this might become too high, specially with
71  * the use of indirect descriptors, we set it to a value that provides good
72  * performance without using too much memory.
73  *
74  * When the list of persistent grants is full we clean it up using a LRU
75  * algorithm.
76  */
77 
78 static int xen_blkif_max_pgrants = 1056;
79 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
80 MODULE_PARM_DESC(max_persistent_grants,
81                  "Maximum number of grants to map persistently");
82 
83 /*
84  * The LRU mechanism to clean the lists of persistent grants needs to
85  * be executed periodically. The time interval between consecutive executions
86  * of the purge mechanism is set in ms.
87  */
88 #define LRU_INTERVAL 100
89 
90 /*
91  * When the persistent grants list is full we will remove unused grants
92  * from the list. The percent number of grants to be removed at each LRU
93  * execution.
94  */
95 #define LRU_PERCENT_CLEAN 5
96 
97 /* Run-time switchable: /sys/module/blkback/parameters/ */
98 static unsigned int log_stats;
99 module_param(log_stats, int, 0644);
100 
101 #define BLKBACK_INVALID_HANDLE (~0)
102 
103 /* Number of free pages to remove on each call to free_xenballooned_pages */
104 #define NUM_BATCH_FREE_PAGES 10
105 
106 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
107 {
108 	unsigned long flags;
109 
110 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
111 	if (list_empty(&blkif->free_pages)) {
112 		BUG_ON(blkif->free_pages_num != 0);
113 		spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
114 		return alloc_xenballooned_pages(1, page, false);
115 	}
116 	BUG_ON(blkif->free_pages_num == 0);
117 	page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
118 	list_del(&page[0]->lru);
119 	blkif->free_pages_num--;
120 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
121 
122 	return 0;
123 }
124 
125 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
126                                   int num)
127 {
128 	unsigned long flags;
129 	int i;
130 
131 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
132 	for (i = 0; i < num; i++)
133 		list_add(&page[i]->lru, &blkif->free_pages);
134 	blkif->free_pages_num += num;
135 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
136 }
137 
138 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
139 {
140 	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
141 	struct page *page[NUM_BATCH_FREE_PAGES];
142 	unsigned int num_pages = 0;
143 	unsigned long flags;
144 
145 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
146 	while (blkif->free_pages_num > num) {
147 		BUG_ON(list_empty(&blkif->free_pages));
148 		page[num_pages] = list_first_entry(&blkif->free_pages,
149 		                                   struct page, lru);
150 		list_del(&page[num_pages]->lru);
151 		blkif->free_pages_num--;
152 		if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 			spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 			free_xenballooned_pages(num_pages, page);
155 			spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 			num_pages = 0;
157 		}
158 	}
159 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 	if (num_pages != 0)
161 		free_xenballooned_pages(num_pages, page);
162 }
163 
164 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
165 
166 static int do_block_io_op(struct xen_blkif *blkif);
167 static int dispatch_rw_block_io(struct xen_blkif *blkif,
168 				struct blkif_request *req,
169 				struct pending_req *pending_req);
170 static void make_response(struct xen_blkif *blkif, u64 id,
171 			  unsigned short op, int st);
172 
173 #define foreach_grant_safe(pos, n, rbtree, node) \
174 	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
175 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
176 	     &(pos)->node != NULL; \
177 	     (pos) = container_of(n, typeof(*(pos)), node), \
178 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
179 
180 
181 /*
182  * We don't need locking around the persistent grant helpers
183  * because blkback uses a single-thread for each backed, so we
184  * can be sure that this functions will never be called recursively.
185  *
186  * The only exception to that is put_persistent_grant, that can be called
187  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
188  * bit operations to modify the flags of a persistent grant and to count
189  * the number of used grants.
190  */
191 static int add_persistent_gnt(struct xen_blkif *blkif,
192 			       struct persistent_gnt *persistent_gnt)
193 {
194 	struct rb_node **new = NULL, *parent = NULL;
195 	struct persistent_gnt *this;
196 
197 	if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
198 		if (!blkif->vbd.overflow_max_grants)
199 			blkif->vbd.overflow_max_grants = 1;
200 		return -EBUSY;
201 	}
202 	/* Figure out where to put new node */
203 	new = &blkif->persistent_gnts.rb_node;
204 	while (*new) {
205 		this = container_of(*new, struct persistent_gnt, node);
206 
207 		parent = *new;
208 		if (persistent_gnt->gnt < this->gnt)
209 			new = &((*new)->rb_left);
210 		else if (persistent_gnt->gnt > this->gnt)
211 			new = &((*new)->rb_right);
212 		else {
213 			pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
214 			return -EINVAL;
215 		}
216 	}
217 
218 	bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
219 	set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
220 	/* Add new node and rebalance tree. */
221 	rb_link_node(&(persistent_gnt->node), parent, new);
222 	rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
223 	blkif->persistent_gnt_c++;
224 	atomic_inc(&blkif->persistent_gnt_in_use);
225 	return 0;
226 }
227 
228 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
229 						 grant_ref_t gref)
230 {
231 	struct persistent_gnt *data;
232 	struct rb_node *node = NULL;
233 
234 	node = blkif->persistent_gnts.rb_node;
235 	while (node) {
236 		data = container_of(node, struct persistent_gnt, node);
237 
238 		if (gref < data->gnt)
239 			node = node->rb_left;
240 		else if (gref > data->gnt)
241 			node = node->rb_right;
242 		else {
243 			if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
244 				pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
245 				return NULL;
246 			}
247 			set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
248 			atomic_inc(&blkif->persistent_gnt_in_use);
249 			return data;
250 		}
251 	}
252 	return NULL;
253 }
254 
255 static void put_persistent_gnt(struct xen_blkif *blkif,
256                                struct persistent_gnt *persistent_gnt)
257 {
258 	if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
259 	          pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
260 	set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
261 	clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
262 	atomic_dec(&blkif->persistent_gnt_in_use);
263 }
264 
265 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266                                  unsigned int num)
267 {
268 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
269 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
270 	struct persistent_gnt *persistent_gnt;
271 	struct rb_node *n;
272 	int ret = 0;
273 	int segs_to_unmap = 0;
274 
275 	foreach_grant_safe(persistent_gnt, n, root, node) {
276 		BUG_ON(persistent_gnt->handle ==
277 			BLKBACK_INVALID_HANDLE);
278 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
279 			(unsigned long) pfn_to_kaddr(page_to_pfn(
280 				persistent_gnt->page)),
281 			GNTMAP_host_map,
282 			persistent_gnt->handle);
283 
284 		pages[segs_to_unmap] = persistent_gnt->page;
285 
286 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 			!rb_next(&persistent_gnt->node)) {
288 			ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
289 			BUG_ON(ret);
290 			put_free_pages(blkif, pages, segs_to_unmap);
291 			segs_to_unmap = 0;
292 		}
293 
294 		rb_erase(&persistent_gnt->node, root);
295 		kfree(persistent_gnt);
296 		num--;
297 	}
298 	BUG_ON(num != 0);
299 }
300 
301 static void unmap_purged_grants(struct work_struct *work)
302 {
303 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
304 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 	struct persistent_gnt *persistent_gnt;
306 	int ret, segs_to_unmap = 0;
307 	struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
308 
309 	while(!list_empty(&blkif->persistent_purge_list)) {
310 		persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
311 		                                  struct persistent_gnt,
312 		                                  remove_node);
313 		list_del(&persistent_gnt->remove_node);
314 
315 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
316 			vaddr(persistent_gnt->page),
317 			GNTMAP_host_map,
318 			persistent_gnt->handle);
319 
320 		pages[segs_to_unmap] = persistent_gnt->page;
321 
322 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
323 			ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
324 			BUG_ON(ret);
325 			put_free_pages(blkif, pages, segs_to_unmap);
326 			segs_to_unmap = 0;
327 		}
328 		kfree(persistent_gnt);
329 	}
330 	if (segs_to_unmap > 0) {
331 		ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
332 		BUG_ON(ret);
333 		put_free_pages(blkif, pages, segs_to_unmap);
334 	}
335 }
336 
337 static void purge_persistent_gnt(struct xen_blkif *blkif)
338 {
339 	struct persistent_gnt *persistent_gnt;
340 	struct rb_node *n;
341 	unsigned int num_clean, total;
342 	bool scan_used = false, clean_used = false;
343 	struct rb_root *root;
344 
345 	if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
346 	    (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
347 	    !blkif->vbd.overflow_max_grants)) {
348 		return;
349 	}
350 
351 	if (work_pending(&blkif->persistent_purge_work)) {
352 		pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
353 		return;
354 	}
355 
356 	num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
357 	num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
358 	num_clean = min(blkif->persistent_gnt_c, num_clean);
359 	if ((num_clean == 0) ||
360 	    (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
361 		return;
362 
363 	/*
364 	 * At this point, we can assure that there will be no calls
365          * to get_persistent_grant (because we are executing this code from
366          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
367          * which means that the number of currently used grants will go down,
368          * but never up, so we will always be able to remove the requested
369          * number of grants.
370 	 */
371 
372 	total = num_clean;
373 
374 	pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
375 
376 	INIT_LIST_HEAD(&blkif->persistent_purge_list);
377 	root = &blkif->persistent_gnts;
378 purge_list:
379 	foreach_grant_safe(persistent_gnt, n, root, node) {
380 		BUG_ON(persistent_gnt->handle ==
381 			BLKBACK_INVALID_HANDLE);
382 
383 		if (clean_used) {
384 			clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
385 			continue;
386 		}
387 
388 		if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
389 			continue;
390 		if (!scan_used &&
391 		    (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
392 			continue;
393 
394 		rb_erase(&persistent_gnt->node, root);
395 		list_add(&persistent_gnt->remove_node,
396 		         &blkif->persistent_purge_list);
397 		if (--num_clean == 0)
398 			goto finished;
399 	}
400 	/*
401 	 * If we get here it means we also need to start cleaning
402 	 * grants that were used since last purge in order to cope
403 	 * with the requested num
404 	 */
405 	if (!scan_used && !clean_used) {
406 		pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
407 		scan_used = true;
408 		goto purge_list;
409 	}
410 finished:
411 	if (!clean_used) {
412 		pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
413 		clean_used = true;
414 		goto purge_list;
415 	}
416 
417 	blkif->persistent_gnt_c -= (total - num_clean);
418 	blkif->vbd.overflow_max_grants = 0;
419 
420 	/* We can defer this work */
421 	INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
422 	schedule_work(&blkif->persistent_purge_work);
423 	pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
424 	return;
425 }
426 
427 /*
428  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
429  */
430 static struct pending_req *alloc_req(struct xen_blkif *blkif)
431 {
432 	struct pending_req *req = NULL;
433 	unsigned long flags;
434 
435 	spin_lock_irqsave(&blkif->pending_free_lock, flags);
436 	if (!list_empty(&blkif->pending_free)) {
437 		req = list_entry(blkif->pending_free.next, struct pending_req,
438 				 free_list);
439 		list_del(&req->free_list);
440 	}
441 	spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
442 	return req;
443 }
444 
445 /*
446  * Return the 'pending_req' structure back to the freepool. We also
447  * wake up the thread if it was waiting for a free page.
448  */
449 static void free_req(struct xen_blkif *blkif, struct pending_req *req)
450 {
451 	unsigned long flags;
452 	int was_empty;
453 
454 	spin_lock_irqsave(&blkif->pending_free_lock, flags);
455 	was_empty = list_empty(&blkif->pending_free);
456 	list_add(&req->free_list, &blkif->pending_free);
457 	spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
458 	if (was_empty)
459 		wake_up(&blkif->pending_free_wq);
460 }
461 
462 /*
463  * Routines for managing virtual block devices (vbds).
464  */
465 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
466 			     int operation)
467 {
468 	struct xen_vbd *vbd = &blkif->vbd;
469 	int rc = -EACCES;
470 
471 	if ((operation != READ) && vbd->readonly)
472 		goto out;
473 
474 	if (likely(req->nr_sects)) {
475 		blkif_sector_t end = req->sector_number + req->nr_sects;
476 
477 		if (unlikely(end < req->sector_number))
478 			goto out;
479 		if (unlikely(end > vbd_sz(vbd)))
480 			goto out;
481 	}
482 
483 	req->dev  = vbd->pdevice;
484 	req->bdev = vbd->bdev;
485 	rc = 0;
486 
487  out:
488 	return rc;
489 }
490 
491 static void xen_vbd_resize(struct xen_blkif *blkif)
492 {
493 	struct xen_vbd *vbd = &blkif->vbd;
494 	struct xenbus_transaction xbt;
495 	int err;
496 	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
497 	unsigned long long new_size = vbd_sz(vbd);
498 
499 	pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
500 		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
501 	pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
502 	vbd->size = new_size;
503 again:
504 	err = xenbus_transaction_start(&xbt);
505 	if (err) {
506 		pr_warn(DRV_PFX "Error starting transaction");
507 		return;
508 	}
509 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
510 			    (unsigned long long)vbd_sz(vbd));
511 	if (err) {
512 		pr_warn(DRV_PFX "Error writing new size");
513 		goto abort;
514 	}
515 	/*
516 	 * Write the current state; we will use this to synchronize
517 	 * the front-end. If the current state is "connected" the
518 	 * front-end will get the new size information online.
519 	 */
520 	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
521 	if (err) {
522 		pr_warn(DRV_PFX "Error writing the state");
523 		goto abort;
524 	}
525 
526 	err = xenbus_transaction_end(xbt, 0);
527 	if (err == -EAGAIN)
528 		goto again;
529 	if (err)
530 		pr_warn(DRV_PFX "Error ending transaction");
531 	return;
532 abort:
533 	xenbus_transaction_end(xbt, 1);
534 }
535 
536 /*
537  * Notification from the guest OS.
538  */
539 static void blkif_notify_work(struct xen_blkif *blkif)
540 {
541 	blkif->waiting_reqs = 1;
542 	wake_up(&blkif->wq);
543 }
544 
545 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
546 {
547 	blkif_notify_work(dev_id);
548 	return IRQ_HANDLED;
549 }
550 
551 /*
552  * SCHEDULER FUNCTIONS
553  */
554 
555 static void print_stats(struct xen_blkif *blkif)
556 {
557 	pr_info("xen-blkback (%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
558 		 "  |  ds %4llu | pg: %4u/%4d\n",
559 		 current->comm, blkif->st_oo_req,
560 		 blkif->st_rd_req, blkif->st_wr_req,
561 		 blkif->st_f_req, blkif->st_ds_req,
562 		 blkif->persistent_gnt_c,
563 		 xen_blkif_max_pgrants);
564 	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
565 	blkif->st_rd_req = 0;
566 	blkif->st_wr_req = 0;
567 	blkif->st_oo_req = 0;
568 	blkif->st_ds_req = 0;
569 }
570 
571 int xen_blkif_schedule(void *arg)
572 {
573 	struct xen_blkif *blkif = arg;
574 	struct xen_vbd *vbd = &blkif->vbd;
575 	unsigned long timeout;
576 	int ret;
577 
578 	xen_blkif_get(blkif);
579 
580 	while (!kthread_should_stop()) {
581 		if (try_to_freeze())
582 			continue;
583 		if (unlikely(vbd->size != vbd_sz(vbd)))
584 			xen_vbd_resize(blkif);
585 
586 		timeout = msecs_to_jiffies(LRU_INTERVAL);
587 
588 		timeout = wait_event_interruptible_timeout(
589 			blkif->wq,
590 			blkif->waiting_reqs || kthread_should_stop(),
591 			timeout);
592 		if (timeout == 0)
593 			goto purge_gnt_list;
594 		timeout = wait_event_interruptible_timeout(
595 			blkif->pending_free_wq,
596 			!list_empty(&blkif->pending_free) ||
597 			kthread_should_stop(),
598 			timeout);
599 		if (timeout == 0)
600 			goto purge_gnt_list;
601 
602 		blkif->waiting_reqs = 0;
603 		smp_mb(); /* clear flag *before* checking for work */
604 
605 		ret = do_block_io_op(blkif);
606 		if (ret > 0)
607 			blkif->waiting_reqs = 1;
608 		if (ret == -EACCES)
609 			wait_event_interruptible(blkif->shutdown_wq,
610 						 kthread_should_stop());
611 
612 purge_gnt_list:
613 		if (blkif->vbd.feature_gnt_persistent &&
614 		    time_after(jiffies, blkif->next_lru)) {
615 			purge_persistent_gnt(blkif);
616 			blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
617 		}
618 
619 		/* Shrink if we have more than xen_blkif_max_buffer_pages */
620 		shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
621 
622 		if (log_stats && time_after(jiffies, blkif->st_print))
623 			print_stats(blkif);
624 	}
625 
626 	/* Since we are shutting down remove all pages from the buffer */
627 	shrink_free_pagepool(blkif, 0 /* All */);
628 
629 	/* Free all persistent grant pages */
630 	if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
631 		free_persistent_gnts(blkif, &blkif->persistent_gnts,
632 			blkif->persistent_gnt_c);
633 
634 	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
635 	blkif->persistent_gnt_c = 0;
636 
637 	if (log_stats)
638 		print_stats(blkif);
639 
640 	blkif->xenblkd = NULL;
641 	xen_blkif_put(blkif);
642 
643 	return 0;
644 }
645 
646 /*
647  * Unmap the grant references, and also remove the M2P over-rides
648  * used in the 'pending_req'.
649  */
650 static void xen_blkbk_unmap(struct xen_blkif *blkif,
651                             struct grant_page *pages[],
652                             int num)
653 {
654 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
655 	struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
656 	unsigned int i, invcount = 0;
657 	int ret;
658 
659 	for (i = 0; i < num; i++) {
660 		if (pages[i]->persistent_gnt != NULL) {
661 			put_persistent_gnt(blkif, pages[i]->persistent_gnt);
662 			continue;
663 		}
664 		if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
665 			continue;
666 		unmap_pages[invcount] = pages[i]->page;
667 		gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
668 				    GNTMAP_host_map, pages[i]->handle);
669 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
670 		if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
671 			ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
672 			BUG_ON(ret);
673 			put_free_pages(blkif, unmap_pages, invcount);
674 			invcount = 0;
675 		}
676 	}
677 	if (invcount) {
678 		ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
679 		BUG_ON(ret);
680 		put_free_pages(blkif, unmap_pages, invcount);
681 	}
682 }
683 
684 static int xen_blkbk_map(struct xen_blkif *blkif,
685 			 struct grant_page *pages[],
686 			 int num, bool ro)
687 {
688 	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
689 	struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
690 	struct persistent_gnt *persistent_gnt = NULL;
691 	phys_addr_t addr = 0;
692 	int i, seg_idx, new_map_idx;
693 	int segs_to_map = 0;
694 	int ret = 0;
695 	int last_map = 0, map_until = 0;
696 	int use_persistent_gnts;
697 
698 	use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
699 
700 	/*
701 	 * Fill out preq.nr_sects with proper amount of sectors, and setup
702 	 * assign map[..] with the PFN of the page in our domain with the
703 	 * corresponding grant reference for each page.
704 	 */
705 again:
706 	for (i = map_until; i < num; i++) {
707 		uint32_t flags;
708 
709 		if (use_persistent_gnts)
710 			persistent_gnt = get_persistent_gnt(
711 				blkif,
712 				pages[i]->gref);
713 
714 		if (persistent_gnt) {
715 			/*
716 			 * We are using persistent grants and
717 			 * the grant is already mapped
718 			 */
719 			pages[i]->page = persistent_gnt->page;
720 			pages[i]->persistent_gnt = persistent_gnt;
721 		} else {
722 			if (get_free_page(blkif, &pages[i]->page))
723 				goto out_of_memory;
724 			addr = vaddr(pages[i]->page);
725 			pages_to_gnt[segs_to_map] = pages[i]->page;
726 			pages[i]->persistent_gnt = NULL;
727 			flags = GNTMAP_host_map;
728 			if (!use_persistent_gnts && ro)
729 				flags |= GNTMAP_readonly;
730 			gnttab_set_map_op(&map[segs_to_map++], addr,
731 					  flags, pages[i]->gref,
732 					  blkif->domid);
733 		}
734 		map_until = i + 1;
735 		if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
736 			break;
737 	}
738 
739 	if (segs_to_map) {
740 		ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
741 		BUG_ON(ret);
742 	}
743 
744 	/*
745 	 * Now swizzle the MFN in our domain with the MFN from the other domain
746 	 * so that when we access vaddr(pending_req,i) it has the contents of
747 	 * the page from the other domain.
748 	 */
749 	for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
750 		if (!pages[seg_idx]->persistent_gnt) {
751 			/* This is a newly mapped grant */
752 			BUG_ON(new_map_idx >= segs_to_map);
753 			if (unlikely(map[new_map_idx].status != 0)) {
754 				pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
755 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
756 				ret |= 1;
757 				goto next;
758 			}
759 			pages[seg_idx]->handle = map[new_map_idx].handle;
760 		} else {
761 			continue;
762 		}
763 		if (use_persistent_gnts &&
764 		    blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
765 			/*
766 			 * We are using persistent grants, the grant is
767 			 * not mapped but we might have room for it.
768 			 */
769 			persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
770 				                 GFP_KERNEL);
771 			if (!persistent_gnt) {
772 				/*
773 				 * If we don't have enough memory to
774 				 * allocate the persistent_gnt struct
775 				 * map this grant non-persistenly
776 				 */
777 				goto next;
778 			}
779 			persistent_gnt->gnt = map[new_map_idx].ref;
780 			persistent_gnt->handle = map[new_map_idx].handle;
781 			persistent_gnt->page = pages[seg_idx]->page;
782 			if (add_persistent_gnt(blkif,
783 			                       persistent_gnt)) {
784 				kfree(persistent_gnt);
785 				persistent_gnt = NULL;
786 				goto next;
787 			}
788 			pages[seg_idx]->persistent_gnt = persistent_gnt;
789 			pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
790 				 persistent_gnt->gnt, blkif->persistent_gnt_c,
791 				 xen_blkif_max_pgrants);
792 			goto next;
793 		}
794 		if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
795 			blkif->vbd.overflow_max_grants = 1;
796 			pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
797 			         blkif->domid, blkif->vbd.handle);
798 		}
799 		/*
800 		 * We could not map this grant persistently, so use it as
801 		 * a non-persistent grant.
802 		 */
803 next:
804 		new_map_idx++;
805 	}
806 	segs_to_map = 0;
807 	last_map = map_until;
808 	if (map_until != num)
809 		goto again;
810 
811 	return ret;
812 
813 out_of_memory:
814 	pr_alert(DRV_PFX "%s: out of memory\n", __func__);
815 	put_free_pages(blkif, pages_to_gnt, segs_to_map);
816 	return -ENOMEM;
817 }
818 
819 static int xen_blkbk_map_seg(struct pending_req *pending_req)
820 {
821 	int rc;
822 
823 	rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
824 			   pending_req->nr_pages,
825 	                   (pending_req->operation != BLKIF_OP_READ));
826 
827 	return rc;
828 }
829 
830 static int xen_blkbk_parse_indirect(struct blkif_request *req,
831 				    struct pending_req *pending_req,
832 				    struct seg_buf seg[],
833 				    struct phys_req *preq)
834 {
835 	struct grant_page **pages = pending_req->indirect_pages;
836 	struct xen_blkif *blkif = pending_req->blkif;
837 	int indirect_grefs, rc, n, nseg, i;
838 	struct blkif_request_segment_aligned *segments = NULL;
839 
840 	nseg = pending_req->nr_pages;
841 	indirect_grefs = INDIRECT_PAGES(nseg);
842 	BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
843 
844 	for (i = 0; i < indirect_grefs; i++)
845 		pages[i]->gref = req->u.indirect.indirect_grefs[i];
846 
847 	rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
848 	if (rc)
849 		goto unmap;
850 
851 	for (n = 0, i = 0; n < nseg; n++) {
852 		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
853 			/* Map indirect segments */
854 			if (segments)
855 				kunmap_atomic(segments);
856 			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
857 		}
858 		i = n % SEGS_PER_INDIRECT_FRAME;
859 		pending_req->segments[n]->gref = segments[i].gref;
860 		seg[n].nsec = segments[i].last_sect -
861 			segments[i].first_sect + 1;
862 		seg[n].offset = (segments[i].first_sect << 9);
863 		if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
864 		    (segments[i].last_sect < segments[i].first_sect)) {
865 			rc = -EINVAL;
866 			goto unmap;
867 		}
868 		preq->nr_sects += seg[n].nsec;
869 	}
870 
871 unmap:
872 	if (segments)
873 		kunmap_atomic(segments);
874 	xen_blkbk_unmap(blkif, pages, indirect_grefs);
875 	return rc;
876 }
877 
878 static int dispatch_discard_io(struct xen_blkif *blkif,
879 				struct blkif_request *req)
880 {
881 	int err = 0;
882 	int status = BLKIF_RSP_OKAY;
883 	struct block_device *bdev = blkif->vbd.bdev;
884 	unsigned long secure;
885 	struct phys_req preq;
886 
887 	xen_blkif_get(blkif);
888 
889 	preq.sector_number = req->u.discard.sector_number;
890 	preq.nr_sects      = req->u.discard.nr_sectors;
891 
892 	err = xen_vbd_translate(&preq, blkif, WRITE);
893 	if (err) {
894 		pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
895 			preq.sector_number,
896 			preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
897 		goto fail_response;
898 	}
899 	blkif->st_ds_req++;
900 
901 	secure = (blkif->vbd.discard_secure &&
902 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
903 		 BLKDEV_DISCARD_SECURE : 0;
904 
905 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
906 				   req->u.discard.nr_sectors,
907 				   GFP_KERNEL, secure);
908 fail_response:
909 	if (err == -EOPNOTSUPP) {
910 		pr_debug(DRV_PFX "discard op failed, not supported\n");
911 		status = BLKIF_RSP_EOPNOTSUPP;
912 	} else if (err)
913 		status = BLKIF_RSP_ERROR;
914 
915 	make_response(blkif, req->u.discard.id, req->operation, status);
916 	xen_blkif_put(blkif);
917 	return err;
918 }
919 
920 static int dispatch_other_io(struct xen_blkif *blkif,
921 			     struct blkif_request *req,
922 			     struct pending_req *pending_req)
923 {
924 	free_req(blkif, pending_req);
925 	make_response(blkif, req->u.other.id, req->operation,
926 		      BLKIF_RSP_EOPNOTSUPP);
927 	return -EIO;
928 }
929 
930 static void xen_blk_drain_io(struct xen_blkif *blkif)
931 {
932 	atomic_set(&blkif->drain, 1);
933 	do {
934 		/* The initial value is one, and one refcnt taken at the
935 		 * start of the xen_blkif_schedule thread. */
936 		if (atomic_read(&blkif->refcnt) <= 2)
937 			break;
938 		wait_for_completion_interruptible_timeout(
939 				&blkif->drain_complete, HZ);
940 
941 		if (!atomic_read(&blkif->drain))
942 			break;
943 	} while (!kthread_should_stop());
944 	atomic_set(&blkif->drain, 0);
945 }
946 
947 /*
948  * Completion callback on the bio's. Called as bh->b_end_io()
949  */
950 
951 static void __end_block_io_op(struct pending_req *pending_req, int error)
952 {
953 	/* An error fails the entire request. */
954 	if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
955 	    (error == -EOPNOTSUPP)) {
956 		pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
957 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
958 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
959 	} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
960 		    (error == -EOPNOTSUPP)) {
961 		pr_debug(DRV_PFX "write barrier op failed, not supported\n");
962 		xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
963 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
964 	} else if (error) {
965 		pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
966 			 " error=%d\n", error);
967 		pending_req->status = BLKIF_RSP_ERROR;
968 	}
969 
970 	/*
971 	 * If all of the bio's have completed it is time to unmap
972 	 * the grant references associated with 'request' and provide
973 	 * the proper response on the ring.
974 	 */
975 	if (atomic_dec_and_test(&pending_req->pendcnt)) {
976 		xen_blkbk_unmap(pending_req->blkif,
977 		                pending_req->segments,
978 		                pending_req->nr_pages);
979 		make_response(pending_req->blkif, pending_req->id,
980 			      pending_req->operation, pending_req->status);
981 		xen_blkif_put(pending_req->blkif);
982 		if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
983 			if (atomic_read(&pending_req->blkif->drain))
984 				complete(&pending_req->blkif->drain_complete);
985 		}
986 		free_req(pending_req->blkif, pending_req);
987 	}
988 }
989 
990 /*
991  * bio callback.
992  */
993 static void end_block_io_op(struct bio *bio, int error)
994 {
995 	__end_block_io_op(bio->bi_private, error);
996 	bio_put(bio);
997 }
998 
999 
1000 
1001 /*
1002  * Function to copy the from the ring buffer the 'struct blkif_request'
1003  * (which has the sectors we want, number of them, grant references, etc),
1004  * and transmute  it to the block API to hand it over to the proper block disk.
1005  */
1006 static int
1007 __do_block_io_op(struct xen_blkif *blkif)
1008 {
1009 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1010 	struct blkif_request req;
1011 	struct pending_req *pending_req;
1012 	RING_IDX rc, rp;
1013 	int more_to_do = 0;
1014 
1015 	rc = blk_rings->common.req_cons;
1016 	rp = blk_rings->common.sring->req_prod;
1017 	rmb(); /* Ensure we see queued requests up to 'rp'. */
1018 
1019 	if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1020 		rc = blk_rings->common.rsp_prod_pvt;
1021 		pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1022 			rp, rc, rp - rc, blkif->vbd.pdevice);
1023 		return -EACCES;
1024 	}
1025 	while (rc != rp) {
1026 
1027 		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1028 			break;
1029 
1030 		if (kthread_should_stop()) {
1031 			more_to_do = 1;
1032 			break;
1033 		}
1034 
1035 		pending_req = alloc_req(blkif);
1036 		if (NULL == pending_req) {
1037 			blkif->st_oo_req++;
1038 			more_to_do = 1;
1039 			break;
1040 		}
1041 
1042 		switch (blkif->blk_protocol) {
1043 		case BLKIF_PROTOCOL_NATIVE:
1044 			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1045 			break;
1046 		case BLKIF_PROTOCOL_X86_32:
1047 			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1048 			break;
1049 		case BLKIF_PROTOCOL_X86_64:
1050 			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1051 			break;
1052 		default:
1053 			BUG();
1054 		}
1055 		blk_rings->common.req_cons = ++rc; /* before make_response() */
1056 
1057 		/* Apply all sanity checks to /private copy/ of request. */
1058 		barrier();
1059 
1060 		switch (req.operation) {
1061 		case BLKIF_OP_READ:
1062 		case BLKIF_OP_WRITE:
1063 		case BLKIF_OP_WRITE_BARRIER:
1064 		case BLKIF_OP_FLUSH_DISKCACHE:
1065 		case BLKIF_OP_INDIRECT:
1066 			if (dispatch_rw_block_io(blkif, &req, pending_req))
1067 				goto done;
1068 			break;
1069 		case BLKIF_OP_DISCARD:
1070 			free_req(blkif, pending_req);
1071 			if (dispatch_discard_io(blkif, &req))
1072 				goto done;
1073 			break;
1074 		default:
1075 			if (dispatch_other_io(blkif, &req, pending_req))
1076 				goto done;
1077 			break;
1078 		}
1079 
1080 		/* Yield point for this unbounded loop. */
1081 		cond_resched();
1082 	}
1083 done:
1084 	return more_to_do;
1085 }
1086 
1087 static int
1088 do_block_io_op(struct xen_blkif *blkif)
1089 {
1090 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1091 	int more_to_do;
1092 
1093 	do {
1094 		more_to_do = __do_block_io_op(blkif);
1095 		if (more_to_do)
1096 			break;
1097 
1098 		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1099 	} while (more_to_do);
1100 
1101 	return more_to_do;
1102 }
1103 /*
1104  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1105  * and call the 'submit_bio' to pass it to the underlying storage.
1106  */
1107 static int dispatch_rw_block_io(struct xen_blkif *blkif,
1108 				struct blkif_request *req,
1109 				struct pending_req *pending_req)
1110 {
1111 	struct phys_req preq;
1112 	struct seg_buf *seg = pending_req->seg;
1113 	unsigned int nseg;
1114 	struct bio *bio = NULL;
1115 	struct bio **biolist = pending_req->biolist;
1116 	int i, nbio = 0;
1117 	int operation;
1118 	struct blk_plug plug;
1119 	bool drain = false;
1120 	struct grant_page **pages = pending_req->segments;
1121 	unsigned short req_operation;
1122 
1123 	req_operation = req->operation == BLKIF_OP_INDIRECT ?
1124 			req->u.indirect.indirect_op : req->operation;
1125 	if ((req->operation == BLKIF_OP_INDIRECT) &&
1126 	    (req_operation != BLKIF_OP_READ) &&
1127 	    (req_operation != BLKIF_OP_WRITE)) {
1128 		pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1129 			 req_operation);
1130 		goto fail_response;
1131 	}
1132 
1133 	switch (req_operation) {
1134 	case BLKIF_OP_READ:
1135 		blkif->st_rd_req++;
1136 		operation = READ;
1137 		break;
1138 	case BLKIF_OP_WRITE:
1139 		blkif->st_wr_req++;
1140 		operation = WRITE_ODIRECT;
1141 		break;
1142 	case BLKIF_OP_WRITE_BARRIER:
1143 		drain = true;
1144 	case BLKIF_OP_FLUSH_DISKCACHE:
1145 		blkif->st_f_req++;
1146 		operation = WRITE_FLUSH;
1147 		break;
1148 	default:
1149 		operation = 0; /* make gcc happy */
1150 		goto fail_response;
1151 		break;
1152 	}
1153 
1154 	/* Check that the number of segments is sane. */
1155 	nseg = req->operation == BLKIF_OP_INDIRECT ?
1156 	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
1157 
1158 	if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1159 	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1160 		     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1161 	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1162 		     (nseg > MAX_INDIRECT_SEGMENTS))) {
1163 		pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
1164 			 nseg);
1165 		/* Haven't submitted any bio's yet. */
1166 		goto fail_response;
1167 	}
1168 
1169 	preq.nr_sects      = 0;
1170 
1171 	pending_req->blkif     = blkif;
1172 	pending_req->id        = req->u.rw.id;
1173 	pending_req->operation = req_operation;
1174 	pending_req->status    = BLKIF_RSP_OKAY;
1175 	pending_req->nr_pages  = nseg;
1176 
1177 	if (req->operation != BLKIF_OP_INDIRECT) {
1178 		preq.dev               = req->u.rw.handle;
1179 		preq.sector_number     = req->u.rw.sector_number;
1180 		for (i = 0; i < nseg; i++) {
1181 			pages[i]->gref = req->u.rw.seg[i].gref;
1182 			seg[i].nsec = req->u.rw.seg[i].last_sect -
1183 				req->u.rw.seg[i].first_sect + 1;
1184 			seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1185 			if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1186 			    (req->u.rw.seg[i].last_sect <
1187 			     req->u.rw.seg[i].first_sect))
1188 				goto fail_response;
1189 			preq.nr_sects += seg[i].nsec;
1190 		}
1191 	} else {
1192 		preq.dev               = req->u.indirect.handle;
1193 		preq.sector_number     = req->u.indirect.sector_number;
1194 		if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1195 			goto fail_response;
1196 	}
1197 
1198 	if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1199 		pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
1200 			 operation == READ ? "read" : "write",
1201 			 preq.sector_number,
1202 			 preq.sector_number + preq.nr_sects,
1203 			 blkif->vbd.pdevice);
1204 		goto fail_response;
1205 	}
1206 
1207 	/*
1208 	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1209 	 * is set there.
1210 	 */
1211 	for (i = 0; i < nseg; i++) {
1212 		if (((int)preq.sector_number|(int)seg[i].nsec) &
1213 		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1214 			pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
1215 				 blkif->domid);
1216 			goto fail_response;
1217 		}
1218 	}
1219 
1220 	/* Wait on all outstanding I/O's and once that has been completed
1221 	 * issue the WRITE_FLUSH.
1222 	 */
1223 	if (drain)
1224 		xen_blk_drain_io(pending_req->blkif);
1225 
1226 	/*
1227 	 * If we have failed at this point, we need to undo the M2P override,
1228 	 * set gnttab_set_unmap_op on all of the grant references and perform
1229 	 * the hypercall to unmap the grants - that is all done in
1230 	 * xen_blkbk_unmap.
1231 	 */
1232 	if (xen_blkbk_map_seg(pending_req))
1233 		goto fail_flush;
1234 
1235 	/*
1236 	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1237 	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1238 	 */
1239 	xen_blkif_get(blkif);
1240 
1241 	for (i = 0; i < nseg; i++) {
1242 		while ((bio == NULL) ||
1243 		       (bio_add_page(bio,
1244 				     pages[i]->page,
1245 				     seg[i].nsec << 9,
1246 				     seg[i].offset) == 0)) {
1247 
1248 			int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1249 			bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1250 			if (unlikely(bio == NULL))
1251 				goto fail_put_bio;
1252 
1253 			biolist[nbio++] = bio;
1254 			bio->bi_bdev    = preq.bdev;
1255 			bio->bi_private = pending_req;
1256 			bio->bi_end_io  = end_block_io_op;
1257 			bio->bi_iter.bi_sector  = preq.sector_number;
1258 		}
1259 
1260 		preq.sector_number += seg[i].nsec;
1261 	}
1262 
1263 	/* This will be hit if the operation was a flush or discard. */
1264 	if (!bio) {
1265 		BUG_ON(operation != WRITE_FLUSH);
1266 
1267 		bio = bio_alloc(GFP_KERNEL, 0);
1268 		if (unlikely(bio == NULL))
1269 			goto fail_put_bio;
1270 
1271 		biolist[nbio++] = bio;
1272 		bio->bi_bdev    = preq.bdev;
1273 		bio->bi_private = pending_req;
1274 		bio->bi_end_io  = end_block_io_op;
1275 	}
1276 
1277 	atomic_set(&pending_req->pendcnt, nbio);
1278 	blk_start_plug(&plug);
1279 
1280 	for (i = 0; i < nbio; i++)
1281 		submit_bio(operation, biolist[i]);
1282 
1283 	/* Let the I/Os go.. */
1284 	blk_finish_plug(&plug);
1285 
1286 	if (operation == READ)
1287 		blkif->st_rd_sect += preq.nr_sects;
1288 	else if (operation & WRITE)
1289 		blkif->st_wr_sect += preq.nr_sects;
1290 
1291 	return 0;
1292 
1293  fail_flush:
1294 	xen_blkbk_unmap(blkif, pending_req->segments,
1295 	                pending_req->nr_pages);
1296  fail_response:
1297 	/* Haven't submitted any bio's yet. */
1298 	make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1299 	free_req(blkif, pending_req);
1300 	msleep(1); /* back off a bit */
1301 	return -EIO;
1302 
1303  fail_put_bio:
1304 	for (i = 0; i < nbio; i++)
1305 		bio_put(biolist[i]);
1306 	atomic_set(&pending_req->pendcnt, 1);
1307 	__end_block_io_op(pending_req, -EINVAL);
1308 	msleep(1); /* back off a bit */
1309 	return -EIO;
1310 }
1311 
1312 
1313 
1314 /*
1315  * Put a response on the ring on how the operation fared.
1316  */
1317 static void make_response(struct xen_blkif *blkif, u64 id,
1318 			  unsigned short op, int st)
1319 {
1320 	struct blkif_response  resp;
1321 	unsigned long     flags;
1322 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1323 	int notify;
1324 
1325 	resp.id        = id;
1326 	resp.operation = op;
1327 	resp.status    = st;
1328 
1329 	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1330 	/* Place on the response ring for the relevant domain. */
1331 	switch (blkif->blk_protocol) {
1332 	case BLKIF_PROTOCOL_NATIVE:
1333 		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1334 		       &resp, sizeof(resp));
1335 		break;
1336 	case BLKIF_PROTOCOL_X86_32:
1337 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1338 		       &resp, sizeof(resp));
1339 		break;
1340 	case BLKIF_PROTOCOL_X86_64:
1341 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1342 		       &resp, sizeof(resp));
1343 		break;
1344 	default:
1345 		BUG();
1346 	}
1347 	blk_rings->common.rsp_prod_pvt++;
1348 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1349 	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1350 	if (notify)
1351 		notify_remote_via_irq(blkif->irq);
1352 }
1353 
1354 static int __init xen_blkif_init(void)
1355 {
1356 	int rc = 0;
1357 
1358 	if (!xen_domain())
1359 		return -ENODEV;
1360 
1361 	rc = xen_blkif_interface_init();
1362 	if (rc)
1363 		goto failed_init;
1364 
1365 	rc = xen_blkif_xenbus_init();
1366 	if (rc)
1367 		goto failed_init;
1368 
1369  failed_init:
1370 	return rc;
1371 }
1372 
1373 module_init(xen_blkif_init);
1374 
1375 MODULE_LICENSE("Dual BSD/GPL");
1376 MODULE_ALIAS("xen-backend:vbd");
1377