1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36 
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42 #include <linux/bitmap.h>
43 
44 #include <xen/events.h>
45 #include <xen/page.h>
46 #include <xen/xen.h>
47 #include <asm/xen/hypervisor.h>
48 #include <asm/xen/hypercall.h>
49 #include <xen/balloon.h>
50 #include "common.h"
51 
52 /*
53  * Maximum number of unused free pages to keep in the internal buffer.
54  * Setting this to a value too low will reduce memory used in each backend,
55  * but can have a performance penalty.
56  *
57  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
58  * be set to a lower value that might degrade performance on some intensive
59  * IO workloads.
60  */
61 
62 static int xen_blkif_max_buffer_pages = 1024;
63 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
64 MODULE_PARM_DESC(max_buffer_pages,
65 "Maximum number of free pages to keep in each block backend buffer");
66 
67 /*
68  * Maximum number of grants to map persistently in blkback. For maximum
69  * performance this should be the total numbers of grants that can be used
70  * to fill the ring, but since this might become too high, specially with
71  * the use of indirect descriptors, we set it to a value that provides good
72  * performance without using too much memory.
73  *
74  * When the list of persistent grants is full we clean it up using a LRU
75  * algorithm.
76  */
77 
78 static int xen_blkif_max_pgrants = 1056;
79 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
80 MODULE_PARM_DESC(max_persistent_grants,
81                  "Maximum number of grants to map persistently");
82 
83 /*
84  * The LRU mechanism to clean the lists of persistent grants needs to
85  * be executed periodically. The time interval between consecutive executions
86  * of the purge mechanism is set in ms.
87  */
88 #define LRU_INTERVAL 100
89 
90 /*
91  * When the persistent grants list is full we will remove unused grants
92  * from the list. The percent number of grants to be removed at each LRU
93  * execution.
94  */
95 #define LRU_PERCENT_CLEAN 5
96 
97 /* Run-time switchable: /sys/module/blkback/parameters/ */
98 static unsigned int log_stats;
99 module_param(log_stats, int, 0644);
100 
101 #define BLKBACK_INVALID_HANDLE (~0)
102 
103 /* Number of free pages to remove on each call to free_xenballooned_pages */
104 #define NUM_BATCH_FREE_PAGES 10
105 
106 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
107 {
108 	unsigned long flags;
109 
110 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
111 	if (list_empty(&blkif->free_pages)) {
112 		BUG_ON(blkif->free_pages_num != 0);
113 		spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
114 		return alloc_xenballooned_pages(1, page, false);
115 	}
116 	BUG_ON(blkif->free_pages_num == 0);
117 	page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
118 	list_del(&page[0]->lru);
119 	blkif->free_pages_num--;
120 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
121 
122 	return 0;
123 }
124 
125 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
126                                   int num)
127 {
128 	unsigned long flags;
129 	int i;
130 
131 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
132 	for (i = 0; i < num; i++)
133 		list_add(&page[i]->lru, &blkif->free_pages);
134 	blkif->free_pages_num += num;
135 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
136 }
137 
138 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
139 {
140 	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
141 	struct page *page[NUM_BATCH_FREE_PAGES];
142 	unsigned int num_pages = 0;
143 	unsigned long flags;
144 
145 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
146 	while (blkif->free_pages_num > num) {
147 		BUG_ON(list_empty(&blkif->free_pages));
148 		page[num_pages] = list_first_entry(&blkif->free_pages,
149 		                                   struct page, lru);
150 		list_del(&page[num_pages]->lru);
151 		blkif->free_pages_num--;
152 		if (++num_pages == NUM_BATCH_FREE_PAGES) {
153 			spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154 			free_xenballooned_pages(num_pages, page);
155 			spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 			num_pages = 0;
157 		}
158 	}
159 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160 	if (num_pages != 0)
161 		free_xenballooned_pages(num_pages, page);
162 }
163 
164 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
165 
166 static int do_block_io_op(struct xen_blkif *blkif);
167 static int dispatch_rw_block_io(struct xen_blkif *blkif,
168 				struct blkif_request *req,
169 				struct pending_req *pending_req);
170 static void make_response(struct xen_blkif *blkif, u64 id,
171 			  unsigned short op, int st);
172 
173 #define foreach_grant_safe(pos, n, rbtree, node) \
174 	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
175 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
176 	     &(pos)->node != NULL; \
177 	     (pos) = container_of(n, typeof(*(pos)), node), \
178 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
179 
180 
181 /*
182  * We don't need locking around the persistent grant helpers
183  * because blkback uses a single-thread for each backed, so we
184  * can be sure that this functions will never be called recursively.
185  *
186  * The only exception to that is put_persistent_grant, that can be called
187  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
188  * bit operations to modify the flags of a persistent grant and to count
189  * the number of used grants.
190  */
191 static int add_persistent_gnt(struct xen_blkif *blkif,
192 			       struct persistent_gnt *persistent_gnt)
193 {
194 	struct rb_node **new = NULL, *parent = NULL;
195 	struct persistent_gnt *this;
196 
197 	if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
198 		if (!blkif->vbd.overflow_max_grants)
199 			blkif->vbd.overflow_max_grants = 1;
200 		return -EBUSY;
201 	}
202 	/* Figure out where to put new node */
203 	new = &blkif->persistent_gnts.rb_node;
204 	while (*new) {
205 		this = container_of(*new, struct persistent_gnt, node);
206 
207 		parent = *new;
208 		if (persistent_gnt->gnt < this->gnt)
209 			new = &((*new)->rb_left);
210 		else if (persistent_gnt->gnt > this->gnt)
211 			new = &((*new)->rb_right);
212 		else {
213 			pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
214 			return -EINVAL;
215 		}
216 	}
217 
218 	bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
219 	set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
220 	/* Add new node and rebalance tree. */
221 	rb_link_node(&(persistent_gnt->node), parent, new);
222 	rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
223 	blkif->persistent_gnt_c++;
224 	atomic_inc(&blkif->persistent_gnt_in_use);
225 	return 0;
226 }
227 
228 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
229 						 grant_ref_t gref)
230 {
231 	struct persistent_gnt *data;
232 	struct rb_node *node = NULL;
233 
234 	node = blkif->persistent_gnts.rb_node;
235 	while (node) {
236 		data = container_of(node, struct persistent_gnt, node);
237 
238 		if (gref < data->gnt)
239 			node = node->rb_left;
240 		else if (gref > data->gnt)
241 			node = node->rb_right;
242 		else {
243 			if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
244 				pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
245 				return NULL;
246 			}
247 			set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
248 			atomic_inc(&blkif->persistent_gnt_in_use);
249 			return data;
250 		}
251 	}
252 	return NULL;
253 }
254 
255 static void put_persistent_gnt(struct xen_blkif *blkif,
256                                struct persistent_gnt *persistent_gnt)
257 {
258 	if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
259 	          pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
260 	set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
261 	clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
262 	atomic_dec(&blkif->persistent_gnt_in_use);
263 }
264 
265 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266                                  unsigned int num)
267 {
268 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
269 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
270 	struct persistent_gnt *persistent_gnt;
271 	struct rb_node *n;
272 	int ret = 0;
273 	int segs_to_unmap = 0;
274 
275 	foreach_grant_safe(persistent_gnt, n, root, node) {
276 		BUG_ON(persistent_gnt->handle ==
277 			BLKBACK_INVALID_HANDLE);
278 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
279 			(unsigned long) pfn_to_kaddr(page_to_pfn(
280 				persistent_gnt->page)),
281 			GNTMAP_host_map,
282 			persistent_gnt->handle);
283 
284 		pages[segs_to_unmap] = persistent_gnt->page;
285 
286 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 			!rb_next(&persistent_gnt->node)) {
288 			ret = gnttab_unmap_refs(unmap, NULL, pages,
289 				segs_to_unmap);
290 			BUG_ON(ret);
291 			put_free_pages(blkif, pages, segs_to_unmap);
292 			segs_to_unmap = 0;
293 		}
294 
295 		rb_erase(&persistent_gnt->node, root);
296 		kfree(persistent_gnt);
297 		num--;
298 	}
299 	BUG_ON(num != 0);
300 }
301 
302 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
303 {
304 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
306 	struct persistent_gnt *persistent_gnt;
307 	int ret, segs_to_unmap = 0;
308 	struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
309 
310 	while(!list_empty(&blkif->persistent_purge_list)) {
311 		persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
312 		                                  struct persistent_gnt,
313 		                                  remove_node);
314 		list_del(&persistent_gnt->remove_node);
315 
316 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
317 			vaddr(persistent_gnt->page),
318 			GNTMAP_host_map,
319 			persistent_gnt->handle);
320 
321 		pages[segs_to_unmap] = persistent_gnt->page;
322 
323 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
324 			ret = gnttab_unmap_refs(unmap, NULL, pages,
325 				segs_to_unmap);
326 			BUG_ON(ret);
327 			put_free_pages(blkif, pages, segs_to_unmap);
328 			segs_to_unmap = 0;
329 		}
330 		kfree(persistent_gnt);
331 	}
332 	if (segs_to_unmap > 0) {
333 		ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
334 		BUG_ON(ret);
335 		put_free_pages(blkif, pages, segs_to_unmap);
336 	}
337 }
338 
339 static void purge_persistent_gnt(struct xen_blkif *blkif)
340 {
341 	struct persistent_gnt *persistent_gnt;
342 	struct rb_node *n;
343 	unsigned int num_clean, total;
344 	bool scan_used = false, clean_used = false;
345 	struct rb_root *root;
346 
347 	if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
348 	    (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
349 	    !blkif->vbd.overflow_max_grants)) {
350 		return;
351 	}
352 
353 	if (work_pending(&blkif->persistent_purge_work)) {
354 		pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
355 		return;
356 	}
357 
358 	num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
359 	num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
360 	num_clean = min(blkif->persistent_gnt_c, num_clean);
361 	if ((num_clean == 0) ||
362 	    (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
363 		return;
364 
365 	/*
366 	 * At this point, we can assure that there will be no calls
367          * to get_persistent_grant (because we are executing this code from
368          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
369          * which means that the number of currently used grants will go down,
370          * but never up, so we will always be able to remove the requested
371          * number of grants.
372 	 */
373 
374 	total = num_clean;
375 
376 	pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
377 
378 	BUG_ON(!list_empty(&blkif->persistent_purge_list));
379 	root = &blkif->persistent_gnts;
380 purge_list:
381 	foreach_grant_safe(persistent_gnt, n, root, node) {
382 		BUG_ON(persistent_gnt->handle ==
383 			BLKBACK_INVALID_HANDLE);
384 
385 		if (clean_used) {
386 			clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
387 			continue;
388 		}
389 
390 		if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
391 			continue;
392 		if (!scan_used &&
393 		    (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
394 			continue;
395 
396 		rb_erase(&persistent_gnt->node, root);
397 		list_add(&persistent_gnt->remove_node,
398 		         &blkif->persistent_purge_list);
399 		if (--num_clean == 0)
400 			goto finished;
401 	}
402 	/*
403 	 * If we get here it means we also need to start cleaning
404 	 * grants that were used since last purge in order to cope
405 	 * with the requested num
406 	 */
407 	if (!scan_used && !clean_used) {
408 		pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
409 		scan_used = true;
410 		goto purge_list;
411 	}
412 finished:
413 	if (!clean_used) {
414 		pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
415 		clean_used = true;
416 		goto purge_list;
417 	}
418 
419 	blkif->persistent_gnt_c -= (total - num_clean);
420 	blkif->vbd.overflow_max_grants = 0;
421 
422 	/* We can defer this work */
423 	schedule_work(&blkif->persistent_purge_work);
424 	pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
425 	return;
426 }
427 
428 /*
429  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
430  */
431 static struct pending_req *alloc_req(struct xen_blkif *blkif)
432 {
433 	struct pending_req *req = NULL;
434 	unsigned long flags;
435 
436 	spin_lock_irqsave(&blkif->pending_free_lock, flags);
437 	if (!list_empty(&blkif->pending_free)) {
438 		req = list_entry(blkif->pending_free.next, struct pending_req,
439 				 free_list);
440 		list_del(&req->free_list);
441 	}
442 	spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
443 	return req;
444 }
445 
446 /*
447  * Return the 'pending_req' structure back to the freepool. We also
448  * wake up the thread if it was waiting for a free page.
449  */
450 static void free_req(struct xen_blkif *blkif, struct pending_req *req)
451 {
452 	unsigned long flags;
453 	int was_empty;
454 
455 	spin_lock_irqsave(&blkif->pending_free_lock, flags);
456 	was_empty = list_empty(&blkif->pending_free);
457 	list_add(&req->free_list, &blkif->pending_free);
458 	spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
459 	if (was_empty)
460 		wake_up(&blkif->pending_free_wq);
461 }
462 
463 /*
464  * Routines for managing virtual block devices (vbds).
465  */
466 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
467 			     int operation)
468 {
469 	struct xen_vbd *vbd = &blkif->vbd;
470 	int rc = -EACCES;
471 
472 	if ((operation != READ) && vbd->readonly)
473 		goto out;
474 
475 	if (likely(req->nr_sects)) {
476 		blkif_sector_t end = req->sector_number + req->nr_sects;
477 
478 		if (unlikely(end < req->sector_number))
479 			goto out;
480 		if (unlikely(end > vbd_sz(vbd)))
481 			goto out;
482 	}
483 
484 	req->dev  = vbd->pdevice;
485 	req->bdev = vbd->bdev;
486 	rc = 0;
487 
488  out:
489 	return rc;
490 }
491 
492 static void xen_vbd_resize(struct xen_blkif *blkif)
493 {
494 	struct xen_vbd *vbd = &blkif->vbd;
495 	struct xenbus_transaction xbt;
496 	int err;
497 	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
498 	unsigned long long new_size = vbd_sz(vbd);
499 
500 	pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
501 		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
502 	pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
503 	vbd->size = new_size;
504 again:
505 	err = xenbus_transaction_start(&xbt);
506 	if (err) {
507 		pr_warn(DRV_PFX "Error starting transaction");
508 		return;
509 	}
510 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
511 			    (unsigned long long)vbd_sz(vbd));
512 	if (err) {
513 		pr_warn(DRV_PFX "Error writing new size");
514 		goto abort;
515 	}
516 	/*
517 	 * Write the current state; we will use this to synchronize
518 	 * the front-end. If the current state is "connected" the
519 	 * front-end will get the new size information online.
520 	 */
521 	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
522 	if (err) {
523 		pr_warn(DRV_PFX "Error writing the state");
524 		goto abort;
525 	}
526 
527 	err = xenbus_transaction_end(xbt, 0);
528 	if (err == -EAGAIN)
529 		goto again;
530 	if (err)
531 		pr_warn(DRV_PFX "Error ending transaction");
532 	return;
533 abort:
534 	xenbus_transaction_end(xbt, 1);
535 }
536 
537 /*
538  * Notification from the guest OS.
539  */
540 static void blkif_notify_work(struct xen_blkif *blkif)
541 {
542 	blkif->waiting_reqs = 1;
543 	wake_up(&blkif->wq);
544 }
545 
546 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
547 {
548 	blkif_notify_work(dev_id);
549 	return IRQ_HANDLED;
550 }
551 
552 /*
553  * SCHEDULER FUNCTIONS
554  */
555 
556 static void print_stats(struct xen_blkif *blkif)
557 {
558 	pr_info("xen-blkback (%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
559 		 "  |  ds %4llu | pg: %4u/%4d\n",
560 		 current->comm, blkif->st_oo_req,
561 		 blkif->st_rd_req, blkif->st_wr_req,
562 		 blkif->st_f_req, blkif->st_ds_req,
563 		 blkif->persistent_gnt_c,
564 		 xen_blkif_max_pgrants);
565 	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
566 	blkif->st_rd_req = 0;
567 	blkif->st_wr_req = 0;
568 	blkif->st_oo_req = 0;
569 	blkif->st_ds_req = 0;
570 }
571 
572 int xen_blkif_schedule(void *arg)
573 {
574 	struct xen_blkif *blkif = arg;
575 	struct xen_vbd *vbd = &blkif->vbd;
576 	unsigned long timeout;
577 	int ret;
578 
579 	xen_blkif_get(blkif);
580 
581 	while (!kthread_should_stop()) {
582 		if (try_to_freeze())
583 			continue;
584 		if (unlikely(vbd->size != vbd_sz(vbd)))
585 			xen_vbd_resize(blkif);
586 
587 		timeout = msecs_to_jiffies(LRU_INTERVAL);
588 
589 		timeout = wait_event_interruptible_timeout(
590 			blkif->wq,
591 			blkif->waiting_reqs || kthread_should_stop(),
592 			timeout);
593 		if (timeout == 0)
594 			goto purge_gnt_list;
595 		timeout = wait_event_interruptible_timeout(
596 			blkif->pending_free_wq,
597 			!list_empty(&blkif->pending_free) ||
598 			kthread_should_stop(),
599 			timeout);
600 		if (timeout == 0)
601 			goto purge_gnt_list;
602 
603 		blkif->waiting_reqs = 0;
604 		smp_mb(); /* clear flag *before* checking for work */
605 
606 		ret = do_block_io_op(blkif);
607 		if (ret > 0)
608 			blkif->waiting_reqs = 1;
609 		if (ret == -EACCES)
610 			wait_event_interruptible(blkif->shutdown_wq,
611 						 kthread_should_stop());
612 
613 purge_gnt_list:
614 		if (blkif->vbd.feature_gnt_persistent &&
615 		    time_after(jiffies, blkif->next_lru)) {
616 			purge_persistent_gnt(blkif);
617 			blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
618 		}
619 
620 		/* Shrink if we have more than xen_blkif_max_buffer_pages */
621 		shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
622 
623 		if (log_stats && time_after(jiffies, blkif->st_print))
624 			print_stats(blkif);
625 	}
626 
627 	/* Drain pending purge work */
628 	flush_work(&blkif->persistent_purge_work);
629 
630 	if (log_stats)
631 		print_stats(blkif);
632 
633 	blkif->xenblkd = NULL;
634 	xen_blkif_put(blkif);
635 
636 	return 0;
637 }
638 
639 /*
640  * Remove persistent grants and empty the pool of free pages
641  */
642 void xen_blkbk_free_caches(struct xen_blkif *blkif)
643 {
644 	/* Free all persistent grant pages */
645 	if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
646 		free_persistent_gnts(blkif, &blkif->persistent_gnts,
647 			blkif->persistent_gnt_c);
648 
649 	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
650 	blkif->persistent_gnt_c = 0;
651 
652 	/* Since we are shutting down remove all pages from the buffer */
653 	shrink_free_pagepool(blkif, 0 /* All */);
654 }
655 
656 /*
657  * Unmap the grant references, and also remove the M2P over-rides
658  * used in the 'pending_req'.
659  */
660 static void xen_blkbk_unmap(struct xen_blkif *blkif,
661                             struct grant_page *pages[],
662                             int num)
663 {
664 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
665 	struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
666 	unsigned int i, invcount = 0;
667 	int ret;
668 
669 	for (i = 0; i < num; i++) {
670 		if (pages[i]->persistent_gnt != NULL) {
671 			put_persistent_gnt(blkif, pages[i]->persistent_gnt);
672 			continue;
673 		}
674 		if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
675 			continue;
676 		unmap_pages[invcount] = pages[i]->page;
677 		gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
678 				    GNTMAP_host_map, pages[i]->handle);
679 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
680 		if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
681 			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
682 			                        invcount);
683 			BUG_ON(ret);
684 			put_free_pages(blkif, unmap_pages, invcount);
685 			invcount = 0;
686 		}
687 	}
688 	if (invcount) {
689 		ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
690 		BUG_ON(ret);
691 		put_free_pages(blkif, unmap_pages, invcount);
692 	}
693 }
694 
695 static int xen_blkbk_map(struct xen_blkif *blkif,
696 			 struct grant_page *pages[],
697 			 int num, bool ro)
698 {
699 	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
700 	struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
701 	struct persistent_gnt *persistent_gnt = NULL;
702 	phys_addr_t addr = 0;
703 	int i, seg_idx, new_map_idx;
704 	int segs_to_map = 0;
705 	int ret = 0;
706 	int last_map = 0, map_until = 0;
707 	int use_persistent_gnts;
708 
709 	use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
710 
711 	/*
712 	 * Fill out preq.nr_sects with proper amount of sectors, and setup
713 	 * assign map[..] with the PFN of the page in our domain with the
714 	 * corresponding grant reference for each page.
715 	 */
716 again:
717 	for (i = map_until; i < num; i++) {
718 		uint32_t flags;
719 
720 		if (use_persistent_gnts)
721 			persistent_gnt = get_persistent_gnt(
722 				blkif,
723 				pages[i]->gref);
724 
725 		if (persistent_gnt) {
726 			/*
727 			 * We are using persistent grants and
728 			 * the grant is already mapped
729 			 */
730 			pages[i]->page = persistent_gnt->page;
731 			pages[i]->persistent_gnt = persistent_gnt;
732 		} else {
733 			if (get_free_page(blkif, &pages[i]->page))
734 				goto out_of_memory;
735 			addr = vaddr(pages[i]->page);
736 			pages_to_gnt[segs_to_map] = pages[i]->page;
737 			pages[i]->persistent_gnt = NULL;
738 			flags = GNTMAP_host_map;
739 			if (!use_persistent_gnts && ro)
740 				flags |= GNTMAP_readonly;
741 			gnttab_set_map_op(&map[segs_to_map++], addr,
742 					  flags, pages[i]->gref,
743 					  blkif->domid);
744 		}
745 		map_until = i + 1;
746 		if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
747 			break;
748 	}
749 
750 	if (segs_to_map) {
751 		ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
752 		BUG_ON(ret);
753 	}
754 
755 	/*
756 	 * Now swizzle the MFN in our domain with the MFN from the other domain
757 	 * so that when we access vaddr(pending_req,i) it has the contents of
758 	 * the page from the other domain.
759 	 */
760 	for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
761 		if (!pages[seg_idx]->persistent_gnt) {
762 			/* This is a newly mapped grant */
763 			BUG_ON(new_map_idx >= segs_to_map);
764 			if (unlikely(map[new_map_idx].status != 0)) {
765 				pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
766 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
767 				ret |= 1;
768 				goto next;
769 			}
770 			pages[seg_idx]->handle = map[new_map_idx].handle;
771 		} else {
772 			continue;
773 		}
774 		if (use_persistent_gnts &&
775 		    blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
776 			/*
777 			 * We are using persistent grants, the grant is
778 			 * not mapped but we might have room for it.
779 			 */
780 			persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
781 				                 GFP_KERNEL);
782 			if (!persistent_gnt) {
783 				/*
784 				 * If we don't have enough memory to
785 				 * allocate the persistent_gnt struct
786 				 * map this grant non-persistenly
787 				 */
788 				goto next;
789 			}
790 			persistent_gnt->gnt = map[new_map_idx].ref;
791 			persistent_gnt->handle = map[new_map_idx].handle;
792 			persistent_gnt->page = pages[seg_idx]->page;
793 			if (add_persistent_gnt(blkif,
794 			                       persistent_gnt)) {
795 				kfree(persistent_gnt);
796 				persistent_gnt = NULL;
797 				goto next;
798 			}
799 			pages[seg_idx]->persistent_gnt = persistent_gnt;
800 			pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
801 				 persistent_gnt->gnt, blkif->persistent_gnt_c,
802 				 xen_blkif_max_pgrants);
803 			goto next;
804 		}
805 		if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
806 			blkif->vbd.overflow_max_grants = 1;
807 			pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
808 			         blkif->domid, blkif->vbd.handle);
809 		}
810 		/*
811 		 * We could not map this grant persistently, so use it as
812 		 * a non-persistent grant.
813 		 */
814 next:
815 		new_map_idx++;
816 	}
817 	segs_to_map = 0;
818 	last_map = map_until;
819 	if (map_until != num)
820 		goto again;
821 
822 	return ret;
823 
824 out_of_memory:
825 	pr_alert(DRV_PFX "%s: out of memory\n", __func__);
826 	put_free_pages(blkif, pages_to_gnt, segs_to_map);
827 	return -ENOMEM;
828 }
829 
830 static int xen_blkbk_map_seg(struct pending_req *pending_req)
831 {
832 	int rc;
833 
834 	rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
835 			   pending_req->nr_pages,
836 	                   (pending_req->operation != BLKIF_OP_READ));
837 
838 	return rc;
839 }
840 
841 static int xen_blkbk_parse_indirect(struct blkif_request *req,
842 				    struct pending_req *pending_req,
843 				    struct seg_buf seg[],
844 				    struct phys_req *preq)
845 {
846 	struct grant_page **pages = pending_req->indirect_pages;
847 	struct xen_blkif *blkif = pending_req->blkif;
848 	int indirect_grefs, rc, n, nseg, i;
849 	struct blkif_request_segment *segments = NULL;
850 
851 	nseg = pending_req->nr_pages;
852 	indirect_grefs = INDIRECT_PAGES(nseg);
853 	BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
854 
855 	for (i = 0; i < indirect_grefs; i++)
856 		pages[i]->gref = req->u.indirect.indirect_grefs[i];
857 
858 	rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
859 	if (rc)
860 		goto unmap;
861 
862 	for (n = 0, i = 0; n < nseg; n++) {
863 		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
864 			/* Map indirect segments */
865 			if (segments)
866 				kunmap_atomic(segments);
867 			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
868 		}
869 		i = n % SEGS_PER_INDIRECT_FRAME;
870 		pending_req->segments[n]->gref = segments[i].gref;
871 		seg[n].nsec = segments[i].last_sect -
872 			segments[i].first_sect + 1;
873 		seg[n].offset = (segments[i].first_sect << 9);
874 		if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
875 		    (segments[i].last_sect < segments[i].first_sect)) {
876 			rc = -EINVAL;
877 			goto unmap;
878 		}
879 		preq->nr_sects += seg[n].nsec;
880 	}
881 
882 unmap:
883 	if (segments)
884 		kunmap_atomic(segments);
885 	xen_blkbk_unmap(blkif, pages, indirect_grefs);
886 	return rc;
887 }
888 
889 static int dispatch_discard_io(struct xen_blkif *blkif,
890 				struct blkif_request *req)
891 {
892 	int err = 0;
893 	int status = BLKIF_RSP_OKAY;
894 	struct block_device *bdev = blkif->vbd.bdev;
895 	unsigned long secure;
896 	struct phys_req preq;
897 
898 	xen_blkif_get(blkif);
899 
900 	preq.sector_number = req->u.discard.sector_number;
901 	preq.nr_sects      = req->u.discard.nr_sectors;
902 
903 	err = xen_vbd_translate(&preq, blkif, WRITE);
904 	if (err) {
905 		pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
906 			preq.sector_number,
907 			preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
908 		goto fail_response;
909 	}
910 	blkif->st_ds_req++;
911 
912 	secure = (blkif->vbd.discard_secure &&
913 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
914 		 BLKDEV_DISCARD_SECURE : 0;
915 
916 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
917 				   req->u.discard.nr_sectors,
918 				   GFP_KERNEL, secure);
919 fail_response:
920 	if (err == -EOPNOTSUPP) {
921 		pr_debug(DRV_PFX "discard op failed, not supported\n");
922 		status = BLKIF_RSP_EOPNOTSUPP;
923 	} else if (err)
924 		status = BLKIF_RSP_ERROR;
925 
926 	make_response(blkif, req->u.discard.id, req->operation, status);
927 	xen_blkif_put(blkif);
928 	return err;
929 }
930 
931 static int dispatch_other_io(struct xen_blkif *blkif,
932 			     struct blkif_request *req,
933 			     struct pending_req *pending_req)
934 {
935 	free_req(blkif, pending_req);
936 	make_response(blkif, req->u.other.id, req->operation,
937 		      BLKIF_RSP_EOPNOTSUPP);
938 	return -EIO;
939 }
940 
941 static void xen_blk_drain_io(struct xen_blkif *blkif)
942 {
943 	atomic_set(&blkif->drain, 1);
944 	do {
945 		if (atomic_read(&blkif->inflight) == 0)
946 			break;
947 		wait_for_completion_interruptible_timeout(
948 				&blkif->drain_complete, HZ);
949 
950 		if (!atomic_read(&blkif->drain))
951 			break;
952 	} while (!kthread_should_stop());
953 	atomic_set(&blkif->drain, 0);
954 }
955 
956 /*
957  * Completion callback on the bio's. Called as bh->b_end_io()
958  */
959 
960 static void __end_block_io_op(struct pending_req *pending_req, int error)
961 {
962 	/* An error fails the entire request. */
963 	if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
964 	    (error == -EOPNOTSUPP)) {
965 		pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
966 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
967 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
968 	} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
969 		    (error == -EOPNOTSUPP)) {
970 		pr_debug(DRV_PFX "write barrier op failed, not supported\n");
971 		xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
972 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
973 	} else if (error) {
974 		pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
975 			 " error=%d\n", error);
976 		pending_req->status = BLKIF_RSP_ERROR;
977 	}
978 
979 	/*
980 	 * If all of the bio's have completed it is time to unmap
981 	 * the grant references associated with 'request' and provide
982 	 * the proper response on the ring.
983 	 */
984 	if (atomic_dec_and_test(&pending_req->pendcnt)) {
985 		struct xen_blkif *blkif = pending_req->blkif;
986 
987 		xen_blkbk_unmap(blkif,
988 		                pending_req->segments,
989 		                pending_req->nr_pages);
990 		make_response(blkif, pending_req->id,
991 			      pending_req->operation, pending_req->status);
992 		free_req(blkif, pending_req);
993 		/*
994 		 * Make sure the request is freed before releasing blkif,
995 		 * or there could be a race between free_req and the
996 		 * cleanup done in xen_blkif_free during shutdown.
997 		 *
998 		 * NB: The fact that we might try to wake up pending_free_wq
999 		 * before drain_complete (in case there's a drain going on)
1000 		 * it's not a problem with our current implementation
1001 		 * because we can assure there's no thread waiting on
1002 		 * pending_free_wq if there's a drain going on, but it has
1003 		 * to be taken into account if the current model is changed.
1004 		 */
1005 		if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1006 			complete(&blkif->drain_complete);
1007 		}
1008 		xen_blkif_put(blkif);
1009 	}
1010 }
1011 
1012 /*
1013  * bio callback.
1014  */
1015 static void end_block_io_op(struct bio *bio, int error)
1016 {
1017 	__end_block_io_op(bio->bi_private, error);
1018 	bio_put(bio);
1019 }
1020 
1021 
1022 
1023 /*
1024  * Function to copy the from the ring buffer the 'struct blkif_request'
1025  * (which has the sectors we want, number of them, grant references, etc),
1026  * and transmute  it to the block API to hand it over to the proper block disk.
1027  */
1028 static int
1029 __do_block_io_op(struct xen_blkif *blkif)
1030 {
1031 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1032 	struct blkif_request req;
1033 	struct pending_req *pending_req;
1034 	RING_IDX rc, rp;
1035 	int more_to_do = 0;
1036 
1037 	rc = blk_rings->common.req_cons;
1038 	rp = blk_rings->common.sring->req_prod;
1039 	rmb(); /* Ensure we see queued requests up to 'rp'. */
1040 
1041 	if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1042 		rc = blk_rings->common.rsp_prod_pvt;
1043 		pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1044 			rp, rc, rp - rc, blkif->vbd.pdevice);
1045 		return -EACCES;
1046 	}
1047 	while (rc != rp) {
1048 
1049 		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1050 			break;
1051 
1052 		if (kthread_should_stop()) {
1053 			more_to_do = 1;
1054 			break;
1055 		}
1056 
1057 		pending_req = alloc_req(blkif);
1058 		if (NULL == pending_req) {
1059 			blkif->st_oo_req++;
1060 			more_to_do = 1;
1061 			break;
1062 		}
1063 
1064 		switch (blkif->blk_protocol) {
1065 		case BLKIF_PROTOCOL_NATIVE:
1066 			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1067 			break;
1068 		case BLKIF_PROTOCOL_X86_32:
1069 			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1070 			break;
1071 		case BLKIF_PROTOCOL_X86_64:
1072 			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1073 			break;
1074 		default:
1075 			BUG();
1076 		}
1077 		blk_rings->common.req_cons = ++rc; /* before make_response() */
1078 
1079 		/* Apply all sanity checks to /private copy/ of request. */
1080 		barrier();
1081 
1082 		switch (req.operation) {
1083 		case BLKIF_OP_READ:
1084 		case BLKIF_OP_WRITE:
1085 		case BLKIF_OP_WRITE_BARRIER:
1086 		case BLKIF_OP_FLUSH_DISKCACHE:
1087 		case BLKIF_OP_INDIRECT:
1088 			if (dispatch_rw_block_io(blkif, &req, pending_req))
1089 				goto done;
1090 			break;
1091 		case BLKIF_OP_DISCARD:
1092 			free_req(blkif, pending_req);
1093 			if (dispatch_discard_io(blkif, &req))
1094 				goto done;
1095 			break;
1096 		default:
1097 			if (dispatch_other_io(blkif, &req, pending_req))
1098 				goto done;
1099 			break;
1100 		}
1101 
1102 		/* Yield point for this unbounded loop. */
1103 		cond_resched();
1104 	}
1105 done:
1106 	return more_to_do;
1107 }
1108 
1109 static int
1110 do_block_io_op(struct xen_blkif *blkif)
1111 {
1112 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1113 	int more_to_do;
1114 
1115 	do {
1116 		more_to_do = __do_block_io_op(blkif);
1117 		if (more_to_do)
1118 			break;
1119 
1120 		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1121 	} while (more_to_do);
1122 
1123 	return more_to_do;
1124 }
1125 /*
1126  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1127  * and call the 'submit_bio' to pass it to the underlying storage.
1128  */
1129 static int dispatch_rw_block_io(struct xen_blkif *blkif,
1130 				struct blkif_request *req,
1131 				struct pending_req *pending_req)
1132 {
1133 	struct phys_req preq;
1134 	struct seg_buf *seg = pending_req->seg;
1135 	unsigned int nseg;
1136 	struct bio *bio = NULL;
1137 	struct bio **biolist = pending_req->biolist;
1138 	int i, nbio = 0;
1139 	int operation;
1140 	struct blk_plug plug;
1141 	bool drain = false;
1142 	struct grant_page **pages = pending_req->segments;
1143 	unsigned short req_operation;
1144 
1145 	req_operation = req->operation == BLKIF_OP_INDIRECT ?
1146 			req->u.indirect.indirect_op : req->operation;
1147 	if ((req->operation == BLKIF_OP_INDIRECT) &&
1148 	    (req_operation != BLKIF_OP_READ) &&
1149 	    (req_operation != BLKIF_OP_WRITE)) {
1150 		pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1151 			 req_operation);
1152 		goto fail_response;
1153 	}
1154 
1155 	switch (req_operation) {
1156 	case BLKIF_OP_READ:
1157 		blkif->st_rd_req++;
1158 		operation = READ;
1159 		break;
1160 	case BLKIF_OP_WRITE:
1161 		blkif->st_wr_req++;
1162 		operation = WRITE_ODIRECT;
1163 		break;
1164 	case BLKIF_OP_WRITE_BARRIER:
1165 		drain = true;
1166 	case BLKIF_OP_FLUSH_DISKCACHE:
1167 		blkif->st_f_req++;
1168 		operation = WRITE_FLUSH;
1169 		break;
1170 	default:
1171 		operation = 0; /* make gcc happy */
1172 		goto fail_response;
1173 		break;
1174 	}
1175 
1176 	/* Check that the number of segments is sane. */
1177 	nseg = req->operation == BLKIF_OP_INDIRECT ?
1178 	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
1179 
1180 	if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1181 	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1182 		     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1183 	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1184 		     (nseg > MAX_INDIRECT_SEGMENTS))) {
1185 		pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
1186 			 nseg);
1187 		/* Haven't submitted any bio's yet. */
1188 		goto fail_response;
1189 	}
1190 
1191 	preq.nr_sects      = 0;
1192 
1193 	pending_req->blkif     = blkif;
1194 	pending_req->id        = req->u.rw.id;
1195 	pending_req->operation = req_operation;
1196 	pending_req->status    = BLKIF_RSP_OKAY;
1197 	pending_req->nr_pages  = nseg;
1198 
1199 	if (req->operation != BLKIF_OP_INDIRECT) {
1200 		preq.dev               = req->u.rw.handle;
1201 		preq.sector_number     = req->u.rw.sector_number;
1202 		for (i = 0; i < nseg; i++) {
1203 			pages[i]->gref = req->u.rw.seg[i].gref;
1204 			seg[i].nsec = req->u.rw.seg[i].last_sect -
1205 				req->u.rw.seg[i].first_sect + 1;
1206 			seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1207 			if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1208 			    (req->u.rw.seg[i].last_sect <
1209 			     req->u.rw.seg[i].first_sect))
1210 				goto fail_response;
1211 			preq.nr_sects += seg[i].nsec;
1212 		}
1213 	} else {
1214 		preq.dev               = req->u.indirect.handle;
1215 		preq.sector_number     = req->u.indirect.sector_number;
1216 		if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1217 			goto fail_response;
1218 	}
1219 
1220 	if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1221 		pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
1222 			 operation == READ ? "read" : "write",
1223 			 preq.sector_number,
1224 			 preq.sector_number + preq.nr_sects,
1225 			 blkif->vbd.pdevice);
1226 		goto fail_response;
1227 	}
1228 
1229 	/*
1230 	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1231 	 * is set there.
1232 	 */
1233 	for (i = 0; i < nseg; i++) {
1234 		if (((int)preq.sector_number|(int)seg[i].nsec) &
1235 		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1236 			pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
1237 				 blkif->domid);
1238 			goto fail_response;
1239 		}
1240 	}
1241 
1242 	/* Wait on all outstanding I/O's and once that has been completed
1243 	 * issue the WRITE_FLUSH.
1244 	 */
1245 	if (drain)
1246 		xen_blk_drain_io(pending_req->blkif);
1247 
1248 	/*
1249 	 * If we have failed at this point, we need to undo the M2P override,
1250 	 * set gnttab_set_unmap_op on all of the grant references and perform
1251 	 * the hypercall to unmap the grants - that is all done in
1252 	 * xen_blkbk_unmap.
1253 	 */
1254 	if (xen_blkbk_map_seg(pending_req))
1255 		goto fail_flush;
1256 
1257 	/*
1258 	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1259 	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1260 	 */
1261 	xen_blkif_get(blkif);
1262 	atomic_inc(&blkif->inflight);
1263 
1264 	for (i = 0; i < nseg; i++) {
1265 		while ((bio == NULL) ||
1266 		       (bio_add_page(bio,
1267 				     pages[i]->page,
1268 				     seg[i].nsec << 9,
1269 				     seg[i].offset) == 0)) {
1270 
1271 			int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1272 			bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1273 			if (unlikely(bio == NULL))
1274 				goto fail_put_bio;
1275 
1276 			biolist[nbio++] = bio;
1277 			bio->bi_bdev    = preq.bdev;
1278 			bio->bi_private = pending_req;
1279 			bio->bi_end_io  = end_block_io_op;
1280 			bio->bi_iter.bi_sector  = preq.sector_number;
1281 		}
1282 
1283 		preq.sector_number += seg[i].nsec;
1284 	}
1285 
1286 	/* This will be hit if the operation was a flush or discard. */
1287 	if (!bio) {
1288 		BUG_ON(operation != WRITE_FLUSH);
1289 
1290 		bio = bio_alloc(GFP_KERNEL, 0);
1291 		if (unlikely(bio == NULL))
1292 			goto fail_put_bio;
1293 
1294 		biolist[nbio++] = bio;
1295 		bio->bi_bdev    = preq.bdev;
1296 		bio->bi_private = pending_req;
1297 		bio->bi_end_io  = end_block_io_op;
1298 	}
1299 
1300 	atomic_set(&pending_req->pendcnt, nbio);
1301 	blk_start_plug(&plug);
1302 
1303 	for (i = 0; i < nbio; i++)
1304 		submit_bio(operation, biolist[i]);
1305 
1306 	/* Let the I/Os go.. */
1307 	blk_finish_plug(&plug);
1308 
1309 	if (operation == READ)
1310 		blkif->st_rd_sect += preq.nr_sects;
1311 	else if (operation & WRITE)
1312 		blkif->st_wr_sect += preq.nr_sects;
1313 
1314 	return 0;
1315 
1316  fail_flush:
1317 	xen_blkbk_unmap(blkif, pending_req->segments,
1318 	                pending_req->nr_pages);
1319  fail_response:
1320 	/* Haven't submitted any bio's yet. */
1321 	make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1322 	free_req(blkif, pending_req);
1323 	msleep(1); /* back off a bit */
1324 	return -EIO;
1325 
1326  fail_put_bio:
1327 	for (i = 0; i < nbio; i++)
1328 		bio_put(biolist[i]);
1329 	atomic_set(&pending_req->pendcnt, 1);
1330 	__end_block_io_op(pending_req, -EINVAL);
1331 	msleep(1); /* back off a bit */
1332 	return -EIO;
1333 }
1334 
1335 
1336 
1337 /*
1338  * Put a response on the ring on how the operation fared.
1339  */
1340 static void make_response(struct xen_blkif *blkif, u64 id,
1341 			  unsigned short op, int st)
1342 {
1343 	struct blkif_response  resp;
1344 	unsigned long     flags;
1345 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1346 	int notify;
1347 
1348 	resp.id        = id;
1349 	resp.operation = op;
1350 	resp.status    = st;
1351 
1352 	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1353 	/* Place on the response ring for the relevant domain. */
1354 	switch (blkif->blk_protocol) {
1355 	case BLKIF_PROTOCOL_NATIVE:
1356 		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1357 		       &resp, sizeof(resp));
1358 		break;
1359 	case BLKIF_PROTOCOL_X86_32:
1360 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1361 		       &resp, sizeof(resp));
1362 		break;
1363 	case BLKIF_PROTOCOL_X86_64:
1364 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1365 		       &resp, sizeof(resp));
1366 		break;
1367 	default:
1368 		BUG();
1369 	}
1370 	blk_rings->common.rsp_prod_pvt++;
1371 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1372 	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1373 	if (notify)
1374 		notify_remote_via_irq(blkif->irq);
1375 }
1376 
1377 static int __init xen_blkif_init(void)
1378 {
1379 	int rc = 0;
1380 
1381 	if (!xen_domain())
1382 		return -ENODEV;
1383 
1384 	rc = xen_blkif_interface_init();
1385 	if (rc)
1386 		goto failed_init;
1387 
1388 	rc = xen_blkif_xenbus_init();
1389 	if (rc)
1390 		goto failed_init;
1391 
1392  failed_init:
1393 	return rc;
1394 }
1395 
1396 module_init(xen_blkif_init);
1397 
1398 MODULE_LICENSE("Dual BSD/GPL");
1399 MODULE_ALIAS("xen-backend:vbd");
1400