1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36 
37 #define pr_fmt(fmt) "xen-blkback: " fmt
38 
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
45 
46 #include <xen/events.h>
47 #include <xen/page.h>
48 #include <xen/xen.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
53 #include "common.h"
54 
55 /*
56  * Maximum number of unused free pages to keep in the internal buffer.
57  * Setting this to a value too low will reduce memory used in each backend,
58  * but can have a performance penalty.
59  *
60  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61  * be set to a lower value that might degrade performance on some intensive
62  * IO workloads.
63  */
64 
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
69 
70 /*
71  * Maximum number of grants to map persistently in blkback. For maximum
72  * performance this should be the total numbers of grants that can be used
73  * to fill the ring, but since this might become too high, specially with
74  * the use of indirect descriptors, we set it to a value that provides good
75  * performance without using too much memory.
76  *
77  * When the list of persistent grants is full we clean it up using a LRU
78  * algorithm.
79  */
80 
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84                  "Maximum number of grants to map persistently");
85 
86 /*
87  * Maximum order of pages to be used for the shared ring between front and
88  * backend, 4KB page granularity is used.
89  */
90 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
91 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
92 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
93 /*
94  * The LRU mechanism to clean the lists of persistent grants needs to
95  * be executed periodically. The time interval between consecutive executions
96  * of the purge mechanism is set in ms.
97  */
98 #define LRU_INTERVAL 100
99 
100 /*
101  * When the persistent grants list is full we will remove unused grants
102  * from the list. The percent number of grants to be removed at each LRU
103  * execution.
104  */
105 #define LRU_PERCENT_CLEAN 5
106 
107 /* Run-time switchable: /sys/module/blkback/parameters/ */
108 static unsigned int log_stats;
109 module_param(log_stats, int, 0644);
110 
111 #define BLKBACK_INVALID_HANDLE (~0)
112 
113 /* Number of free pages to remove on each call to gnttab_free_pages */
114 #define NUM_BATCH_FREE_PAGES 10
115 
116 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
117 {
118 	unsigned long flags;
119 
120 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
121 	if (list_empty(&blkif->free_pages)) {
122 		BUG_ON(blkif->free_pages_num != 0);
123 		spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
124 		return gnttab_alloc_pages(1, page);
125 	}
126 	BUG_ON(blkif->free_pages_num == 0);
127 	page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
128 	list_del(&page[0]->lru);
129 	blkif->free_pages_num--;
130 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
131 
132 	return 0;
133 }
134 
135 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
136                                   int num)
137 {
138 	unsigned long flags;
139 	int i;
140 
141 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
142 	for (i = 0; i < num; i++)
143 		list_add(&page[i]->lru, &blkif->free_pages);
144 	blkif->free_pages_num += num;
145 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
146 }
147 
148 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
149 {
150 	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
151 	struct page *page[NUM_BATCH_FREE_PAGES];
152 	unsigned int num_pages = 0;
153 	unsigned long flags;
154 
155 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
156 	while (blkif->free_pages_num > num) {
157 		BUG_ON(list_empty(&blkif->free_pages));
158 		page[num_pages] = list_first_entry(&blkif->free_pages,
159 		                                   struct page, lru);
160 		list_del(&page[num_pages]->lru);
161 		blkif->free_pages_num--;
162 		if (++num_pages == NUM_BATCH_FREE_PAGES) {
163 			spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
164 			gnttab_free_pages(num_pages, page);
165 			spin_lock_irqsave(&blkif->free_pages_lock, flags);
166 			num_pages = 0;
167 		}
168 	}
169 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
170 	if (num_pages != 0)
171 		gnttab_free_pages(num_pages, page);
172 }
173 
174 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
175 
176 static int do_block_io_op(struct xen_blkif *blkif);
177 static int dispatch_rw_block_io(struct xen_blkif *blkif,
178 				struct blkif_request *req,
179 				struct pending_req *pending_req);
180 static void make_response(struct xen_blkif *blkif, u64 id,
181 			  unsigned short op, int st);
182 
183 #define foreach_grant_safe(pos, n, rbtree, node) \
184 	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
185 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
186 	     &(pos)->node != NULL; \
187 	     (pos) = container_of(n, typeof(*(pos)), node), \
188 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
189 
190 
191 /*
192  * We don't need locking around the persistent grant helpers
193  * because blkback uses a single-thread for each backed, so we
194  * can be sure that this functions will never be called recursively.
195  *
196  * The only exception to that is put_persistent_grant, that can be called
197  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
198  * bit operations to modify the flags of a persistent grant and to count
199  * the number of used grants.
200  */
201 static int add_persistent_gnt(struct xen_blkif *blkif,
202 			       struct persistent_gnt *persistent_gnt)
203 {
204 	struct rb_node **new = NULL, *parent = NULL;
205 	struct persistent_gnt *this;
206 
207 	if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
208 		if (!blkif->vbd.overflow_max_grants)
209 			blkif->vbd.overflow_max_grants = 1;
210 		return -EBUSY;
211 	}
212 	/* Figure out where to put new node */
213 	new = &blkif->persistent_gnts.rb_node;
214 	while (*new) {
215 		this = container_of(*new, struct persistent_gnt, node);
216 
217 		parent = *new;
218 		if (persistent_gnt->gnt < this->gnt)
219 			new = &((*new)->rb_left);
220 		else if (persistent_gnt->gnt > this->gnt)
221 			new = &((*new)->rb_right);
222 		else {
223 			pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
224 			return -EINVAL;
225 		}
226 	}
227 
228 	bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
229 	set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
230 	/* Add new node and rebalance tree. */
231 	rb_link_node(&(persistent_gnt->node), parent, new);
232 	rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
233 	blkif->persistent_gnt_c++;
234 	atomic_inc(&blkif->persistent_gnt_in_use);
235 	return 0;
236 }
237 
238 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
239 						 grant_ref_t gref)
240 {
241 	struct persistent_gnt *data;
242 	struct rb_node *node = NULL;
243 
244 	node = blkif->persistent_gnts.rb_node;
245 	while (node) {
246 		data = container_of(node, struct persistent_gnt, node);
247 
248 		if (gref < data->gnt)
249 			node = node->rb_left;
250 		else if (gref > data->gnt)
251 			node = node->rb_right;
252 		else {
253 			if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
254 				pr_alert_ratelimited("requesting a grant already in use\n");
255 				return NULL;
256 			}
257 			set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
258 			atomic_inc(&blkif->persistent_gnt_in_use);
259 			return data;
260 		}
261 	}
262 	return NULL;
263 }
264 
265 static void put_persistent_gnt(struct xen_blkif *blkif,
266                                struct persistent_gnt *persistent_gnt)
267 {
268 	if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
269 		pr_alert_ratelimited("freeing a grant already unused\n");
270 	set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
271 	clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
272 	atomic_dec(&blkif->persistent_gnt_in_use);
273 }
274 
275 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
276                                  unsigned int num)
277 {
278 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
279 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
280 	struct persistent_gnt *persistent_gnt;
281 	struct rb_node *n;
282 	int segs_to_unmap = 0;
283 	struct gntab_unmap_queue_data unmap_data;
284 
285 	unmap_data.pages = pages;
286 	unmap_data.unmap_ops = unmap;
287 	unmap_data.kunmap_ops = NULL;
288 
289 	foreach_grant_safe(persistent_gnt, n, root, node) {
290 		BUG_ON(persistent_gnt->handle ==
291 			BLKBACK_INVALID_HANDLE);
292 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
293 			(unsigned long) pfn_to_kaddr(page_to_pfn(
294 				persistent_gnt->page)),
295 			GNTMAP_host_map,
296 			persistent_gnt->handle);
297 
298 		pages[segs_to_unmap] = persistent_gnt->page;
299 
300 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
301 			!rb_next(&persistent_gnt->node)) {
302 
303 			unmap_data.count = segs_to_unmap;
304 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
305 
306 			put_free_pages(blkif, pages, segs_to_unmap);
307 			segs_to_unmap = 0;
308 		}
309 
310 		rb_erase(&persistent_gnt->node, root);
311 		kfree(persistent_gnt);
312 		num--;
313 	}
314 	BUG_ON(num != 0);
315 }
316 
317 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
318 {
319 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
320 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
321 	struct persistent_gnt *persistent_gnt;
322 	int segs_to_unmap = 0;
323 	struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
324 	struct gntab_unmap_queue_data unmap_data;
325 
326 	unmap_data.pages = pages;
327 	unmap_data.unmap_ops = unmap;
328 	unmap_data.kunmap_ops = NULL;
329 
330 	while(!list_empty(&blkif->persistent_purge_list)) {
331 		persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
332 		                                  struct persistent_gnt,
333 		                                  remove_node);
334 		list_del(&persistent_gnt->remove_node);
335 
336 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
337 			vaddr(persistent_gnt->page),
338 			GNTMAP_host_map,
339 			persistent_gnt->handle);
340 
341 		pages[segs_to_unmap] = persistent_gnt->page;
342 
343 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
344 			unmap_data.count = segs_to_unmap;
345 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
346 			put_free_pages(blkif, pages, segs_to_unmap);
347 			segs_to_unmap = 0;
348 		}
349 		kfree(persistent_gnt);
350 	}
351 	if (segs_to_unmap > 0) {
352 		unmap_data.count = segs_to_unmap;
353 		BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
354 		put_free_pages(blkif, pages, segs_to_unmap);
355 	}
356 }
357 
358 static void purge_persistent_gnt(struct xen_blkif *blkif)
359 {
360 	struct persistent_gnt *persistent_gnt;
361 	struct rb_node *n;
362 	unsigned int num_clean, total;
363 	bool scan_used = false, clean_used = false;
364 	struct rb_root *root;
365 
366 	if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
367 	    (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
368 	    !blkif->vbd.overflow_max_grants)) {
369 		return;
370 	}
371 
372 	if (work_busy(&blkif->persistent_purge_work)) {
373 		pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
374 		return;
375 	}
376 
377 	num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
378 	num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
379 	num_clean = min(blkif->persistent_gnt_c, num_clean);
380 	if ((num_clean == 0) ||
381 	    (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
382 		return;
383 
384 	/*
385 	 * At this point, we can assure that there will be no calls
386          * to get_persistent_grant (because we are executing this code from
387          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
388          * which means that the number of currently used grants will go down,
389          * but never up, so we will always be able to remove the requested
390          * number of grants.
391 	 */
392 
393 	total = num_clean;
394 
395 	pr_debug("Going to purge %u persistent grants\n", num_clean);
396 
397 	BUG_ON(!list_empty(&blkif->persistent_purge_list));
398 	root = &blkif->persistent_gnts;
399 purge_list:
400 	foreach_grant_safe(persistent_gnt, n, root, node) {
401 		BUG_ON(persistent_gnt->handle ==
402 			BLKBACK_INVALID_HANDLE);
403 
404 		if (clean_used) {
405 			clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
406 			continue;
407 		}
408 
409 		if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
410 			continue;
411 		if (!scan_used &&
412 		    (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
413 			continue;
414 
415 		rb_erase(&persistent_gnt->node, root);
416 		list_add(&persistent_gnt->remove_node,
417 		         &blkif->persistent_purge_list);
418 		if (--num_clean == 0)
419 			goto finished;
420 	}
421 	/*
422 	 * If we get here it means we also need to start cleaning
423 	 * grants that were used since last purge in order to cope
424 	 * with the requested num
425 	 */
426 	if (!scan_used && !clean_used) {
427 		pr_debug("Still missing %u purged frames\n", num_clean);
428 		scan_used = true;
429 		goto purge_list;
430 	}
431 finished:
432 	if (!clean_used) {
433 		pr_debug("Finished scanning for grants to clean, removing used flag\n");
434 		clean_used = true;
435 		goto purge_list;
436 	}
437 
438 	blkif->persistent_gnt_c -= (total - num_clean);
439 	blkif->vbd.overflow_max_grants = 0;
440 
441 	/* We can defer this work */
442 	schedule_work(&blkif->persistent_purge_work);
443 	pr_debug("Purged %u/%u\n", (total - num_clean), total);
444 	return;
445 }
446 
447 /*
448  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
449  */
450 static struct pending_req *alloc_req(struct xen_blkif *blkif)
451 {
452 	struct pending_req *req = NULL;
453 	unsigned long flags;
454 
455 	spin_lock_irqsave(&blkif->pending_free_lock, flags);
456 	if (!list_empty(&blkif->pending_free)) {
457 		req = list_entry(blkif->pending_free.next, struct pending_req,
458 				 free_list);
459 		list_del(&req->free_list);
460 	}
461 	spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
462 	return req;
463 }
464 
465 /*
466  * Return the 'pending_req' structure back to the freepool. We also
467  * wake up the thread if it was waiting for a free page.
468  */
469 static void free_req(struct xen_blkif *blkif, struct pending_req *req)
470 {
471 	unsigned long flags;
472 	int was_empty;
473 
474 	spin_lock_irqsave(&blkif->pending_free_lock, flags);
475 	was_empty = list_empty(&blkif->pending_free);
476 	list_add(&req->free_list, &blkif->pending_free);
477 	spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
478 	if (was_empty)
479 		wake_up(&blkif->pending_free_wq);
480 }
481 
482 /*
483  * Routines for managing virtual block devices (vbds).
484  */
485 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
486 			     int operation)
487 {
488 	struct xen_vbd *vbd = &blkif->vbd;
489 	int rc = -EACCES;
490 
491 	if ((operation != READ) && vbd->readonly)
492 		goto out;
493 
494 	if (likely(req->nr_sects)) {
495 		blkif_sector_t end = req->sector_number + req->nr_sects;
496 
497 		if (unlikely(end < req->sector_number))
498 			goto out;
499 		if (unlikely(end > vbd_sz(vbd)))
500 			goto out;
501 	}
502 
503 	req->dev  = vbd->pdevice;
504 	req->bdev = vbd->bdev;
505 	rc = 0;
506 
507  out:
508 	return rc;
509 }
510 
511 static void xen_vbd_resize(struct xen_blkif *blkif)
512 {
513 	struct xen_vbd *vbd = &blkif->vbd;
514 	struct xenbus_transaction xbt;
515 	int err;
516 	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
517 	unsigned long long new_size = vbd_sz(vbd);
518 
519 	pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
520 		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
521 	pr_info("VBD Resize: new size %llu\n", new_size);
522 	vbd->size = new_size;
523 again:
524 	err = xenbus_transaction_start(&xbt);
525 	if (err) {
526 		pr_warn("Error starting transaction\n");
527 		return;
528 	}
529 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
530 			    (unsigned long long)vbd_sz(vbd));
531 	if (err) {
532 		pr_warn("Error writing new size\n");
533 		goto abort;
534 	}
535 	/*
536 	 * Write the current state; we will use this to synchronize
537 	 * the front-end. If the current state is "connected" the
538 	 * front-end will get the new size information online.
539 	 */
540 	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
541 	if (err) {
542 		pr_warn("Error writing the state\n");
543 		goto abort;
544 	}
545 
546 	err = xenbus_transaction_end(xbt, 0);
547 	if (err == -EAGAIN)
548 		goto again;
549 	if (err)
550 		pr_warn("Error ending transaction\n");
551 	return;
552 abort:
553 	xenbus_transaction_end(xbt, 1);
554 }
555 
556 /*
557  * Notification from the guest OS.
558  */
559 static void blkif_notify_work(struct xen_blkif *blkif)
560 {
561 	blkif->waiting_reqs = 1;
562 	wake_up(&blkif->wq);
563 }
564 
565 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
566 {
567 	blkif_notify_work(dev_id);
568 	return IRQ_HANDLED;
569 }
570 
571 /*
572  * SCHEDULER FUNCTIONS
573  */
574 
575 static void print_stats(struct xen_blkif *blkif)
576 {
577 	pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
578 		 "  |  ds %4llu | pg: %4u/%4d\n",
579 		 current->comm, blkif->st_oo_req,
580 		 blkif->st_rd_req, blkif->st_wr_req,
581 		 blkif->st_f_req, blkif->st_ds_req,
582 		 blkif->persistent_gnt_c,
583 		 xen_blkif_max_pgrants);
584 	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
585 	blkif->st_rd_req = 0;
586 	blkif->st_wr_req = 0;
587 	blkif->st_oo_req = 0;
588 	blkif->st_ds_req = 0;
589 }
590 
591 int xen_blkif_schedule(void *arg)
592 {
593 	struct xen_blkif *blkif = arg;
594 	struct xen_vbd *vbd = &blkif->vbd;
595 	unsigned long timeout;
596 	int ret;
597 
598 	xen_blkif_get(blkif);
599 
600 	while (!kthread_should_stop()) {
601 		if (try_to_freeze())
602 			continue;
603 		if (unlikely(vbd->size != vbd_sz(vbd)))
604 			xen_vbd_resize(blkif);
605 
606 		timeout = msecs_to_jiffies(LRU_INTERVAL);
607 
608 		timeout = wait_event_interruptible_timeout(
609 			blkif->wq,
610 			blkif->waiting_reqs || kthread_should_stop(),
611 			timeout);
612 		if (timeout == 0)
613 			goto purge_gnt_list;
614 		timeout = wait_event_interruptible_timeout(
615 			blkif->pending_free_wq,
616 			!list_empty(&blkif->pending_free) ||
617 			kthread_should_stop(),
618 			timeout);
619 		if (timeout == 0)
620 			goto purge_gnt_list;
621 
622 		blkif->waiting_reqs = 0;
623 		smp_mb(); /* clear flag *before* checking for work */
624 
625 		ret = do_block_io_op(blkif);
626 		if (ret > 0)
627 			blkif->waiting_reqs = 1;
628 		if (ret == -EACCES)
629 			wait_event_interruptible(blkif->shutdown_wq,
630 						 kthread_should_stop());
631 
632 purge_gnt_list:
633 		if (blkif->vbd.feature_gnt_persistent &&
634 		    time_after(jiffies, blkif->next_lru)) {
635 			purge_persistent_gnt(blkif);
636 			blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
637 		}
638 
639 		/* Shrink if we have more than xen_blkif_max_buffer_pages */
640 		shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
641 
642 		if (log_stats && time_after(jiffies, blkif->st_print))
643 			print_stats(blkif);
644 	}
645 
646 	/* Drain pending purge work */
647 	flush_work(&blkif->persistent_purge_work);
648 
649 	if (log_stats)
650 		print_stats(blkif);
651 
652 	blkif->xenblkd = NULL;
653 	xen_blkif_put(blkif);
654 
655 	return 0;
656 }
657 
658 /*
659  * Remove persistent grants and empty the pool of free pages
660  */
661 void xen_blkbk_free_caches(struct xen_blkif *blkif)
662 {
663 	/* Free all persistent grant pages */
664 	if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
665 		free_persistent_gnts(blkif, &blkif->persistent_gnts,
666 			blkif->persistent_gnt_c);
667 
668 	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
669 	blkif->persistent_gnt_c = 0;
670 
671 	/* Since we are shutting down remove all pages from the buffer */
672 	shrink_free_pagepool(blkif, 0 /* All */);
673 }
674 
675 static unsigned int xen_blkbk_unmap_prepare(
676 	struct xen_blkif *blkif,
677 	struct grant_page **pages,
678 	unsigned int num,
679 	struct gnttab_unmap_grant_ref *unmap_ops,
680 	struct page **unmap_pages)
681 {
682 	unsigned int i, invcount = 0;
683 
684 	for (i = 0; i < num; i++) {
685 		if (pages[i]->persistent_gnt != NULL) {
686 			put_persistent_gnt(blkif, pages[i]->persistent_gnt);
687 			continue;
688 		}
689 		if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
690 			continue;
691 		unmap_pages[invcount] = pages[i]->page;
692 		gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
693 				    GNTMAP_host_map, pages[i]->handle);
694 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
695 		invcount++;
696        }
697 
698        return invcount;
699 }
700 
701 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
702 {
703 	struct pending_req* pending_req = (struct pending_req*) (data->data);
704 	struct xen_blkif *blkif = pending_req->blkif;
705 
706 	/* BUG_ON used to reproduce existing behaviour,
707 	   but is this the best way to deal with this? */
708 	BUG_ON(result);
709 
710 	put_free_pages(blkif, data->pages, data->count);
711 	make_response(blkif, pending_req->id,
712 		      pending_req->operation, pending_req->status);
713 	free_req(blkif, pending_req);
714 	/*
715 	 * Make sure the request is freed before releasing blkif,
716 	 * or there could be a race between free_req and the
717 	 * cleanup done in xen_blkif_free during shutdown.
718 	 *
719 	 * NB: The fact that we might try to wake up pending_free_wq
720 	 * before drain_complete (in case there's a drain going on)
721 	 * it's not a problem with our current implementation
722 	 * because we can assure there's no thread waiting on
723 	 * pending_free_wq if there's a drain going on, but it has
724 	 * to be taken into account if the current model is changed.
725 	 */
726 	if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
727 		complete(&blkif->drain_complete);
728 	}
729 	xen_blkif_put(blkif);
730 }
731 
732 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
733 {
734 	struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
735 	struct xen_blkif *blkif = req->blkif;
736 	struct grant_page **pages = req->segments;
737 	unsigned int invcount;
738 
739 	invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs,
740 					   req->unmap, req->unmap_pages);
741 
742 	work->data = req;
743 	work->done = xen_blkbk_unmap_and_respond_callback;
744 	work->unmap_ops = req->unmap;
745 	work->kunmap_ops = NULL;
746 	work->pages = req->unmap_pages;
747 	work->count = invcount;
748 
749 	gnttab_unmap_refs_async(&req->gnttab_unmap_data);
750 }
751 
752 
753 /*
754  * Unmap the grant references.
755  *
756  * This could accumulate ops up to the batch size to reduce the number
757  * of hypercalls, but since this is only used in error paths there's
758  * no real need.
759  */
760 static void xen_blkbk_unmap(struct xen_blkif *blkif,
761                             struct grant_page *pages[],
762                             int num)
763 {
764 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
765 	struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
766 	unsigned int invcount = 0;
767 	int ret;
768 
769 	while (num) {
770 		unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
771 
772 		invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
773 						   unmap, unmap_pages);
774 		if (invcount) {
775 			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
776 			BUG_ON(ret);
777 			put_free_pages(blkif, unmap_pages, invcount);
778 		}
779 		pages += batch;
780 		num -= batch;
781 	}
782 }
783 
784 static int xen_blkbk_map(struct xen_blkif *blkif,
785 			 struct grant_page *pages[],
786 			 int num, bool ro)
787 {
788 	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
789 	struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
790 	struct persistent_gnt *persistent_gnt = NULL;
791 	phys_addr_t addr = 0;
792 	int i, seg_idx, new_map_idx;
793 	int segs_to_map = 0;
794 	int ret = 0;
795 	int last_map = 0, map_until = 0;
796 	int use_persistent_gnts;
797 
798 	use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
799 
800 	/*
801 	 * Fill out preq.nr_sects with proper amount of sectors, and setup
802 	 * assign map[..] with the PFN of the page in our domain with the
803 	 * corresponding grant reference for each page.
804 	 */
805 again:
806 	for (i = map_until; i < num; i++) {
807 		uint32_t flags;
808 
809 		if (use_persistent_gnts)
810 			persistent_gnt = get_persistent_gnt(
811 				blkif,
812 				pages[i]->gref);
813 
814 		if (persistent_gnt) {
815 			/*
816 			 * We are using persistent grants and
817 			 * the grant is already mapped
818 			 */
819 			pages[i]->page = persistent_gnt->page;
820 			pages[i]->persistent_gnt = persistent_gnt;
821 		} else {
822 			if (get_free_page(blkif, &pages[i]->page))
823 				goto out_of_memory;
824 			addr = vaddr(pages[i]->page);
825 			pages_to_gnt[segs_to_map] = pages[i]->page;
826 			pages[i]->persistent_gnt = NULL;
827 			flags = GNTMAP_host_map;
828 			if (!use_persistent_gnts && ro)
829 				flags |= GNTMAP_readonly;
830 			gnttab_set_map_op(&map[segs_to_map++], addr,
831 					  flags, pages[i]->gref,
832 					  blkif->domid);
833 		}
834 		map_until = i + 1;
835 		if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
836 			break;
837 	}
838 
839 	if (segs_to_map) {
840 		ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
841 		BUG_ON(ret);
842 	}
843 
844 	/*
845 	 * Now swizzle the MFN in our domain with the MFN from the other domain
846 	 * so that when we access vaddr(pending_req,i) it has the contents of
847 	 * the page from the other domain.
848 	 */
849 	for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
850 		if (!pages[seg_idx]->persistent_gnt) {
851 			/* This is a newly mapped grant */
852 			BUG_ON(new_map_idx >= segs_to_map);
853 			if (unlikely(map[new_map_idx].status != 0)) {
854 				pr_debug("invalid buffer -- could not remap it\n");
855 				put_free_pages(blkif, &pages[seg_idx]->page, 1);
856 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
857 				ret |= 1;
858 				goto next;
859 			}
860 			pages[seg_idx]->handle = map[new_map_idx].handle;
861 		} else {
862 			continue;
863 		}
864 		if (use_persistent_gnts &&
865 		    blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
866 			/*
867 			 * We are using persistent grants, the grant is
868 			 * not mapped but we might have room for it.
869 			 */
870 			persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
871 				                 GFP_KERNEL);
872 			if (!persistent_gnt) {
873 				/*
874 				 * If we don't have enough memory to
875 				 * allocate the persistent_gnt struct
876 				 * map this grant non-persistenly
877 				 */
878 				goto next;
879 			}
880 			persistent_gnt->gnt = map[new_map_idx].ref;
881 			persistent_gnt->handle = map[new_map_idx].handle;
882 			persistent_gnt->page = pages[seg_idx]->page;
883 			if (add_persistent_gnt(blkif,
884 			                       persistent_gnt)) {
885 				kfree(persistent_gnt);
886 				persistent_gnt = NULL;
887 				goto next;
888 			}
889 			pages[seg_idx]->persistent_gnt = persistent_gnt;
890 			pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
891 				 persistent_gnt->gnt, blkif->persistent_gnt_c,
892 				 xen_blkif_max_pgrants);
893 			goto next;
894 		}
895 		if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
896 			blkif->vbd.overflow_max_grants = 1;
897 			pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
898 			         blkif->domid, blkif->vbd.handle);
899 		}
900 		/*
901 		 * We could not map this grant persistently, so use it as
902 		 * a non-persistent grant.
903 		 */
904 next:
905 		new_map_idx++;
906 	}
907 	segs_to_map = 0;
908 	last_map = map_until;
909 	if (map_until != num)
910 		goto again;
911 
912 	return ret;
913 
914 out_of_memory:
915 	pr_alert("%s: out of memory\n", __func__);
916 	put_free_pages(blkif, pages_to_gnt, segs_to_map);
917 	return -ENOMEM;
918 }
919 
920 static int xen_blkbk_map_seg(struct pending_req *pending_req)
921 {
922 	int rc;
923 
924 	rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
925 			   pending_req->nr_segs,
926 	                   (pending_req->operation != BLKIF_OP_READ));
927 
928 	return rc;
929 }
930 
931 static int xen_blkbk_parse_indirect(struct blkif_request *req,
932 				    struct pending_req *pending_req,
933 				    struct seg_buf seg[],
934 				    struct phys_req *preq)
935 {
936 	struct grant_page **pages = pending_req->indirect_pages;
937 	struct xen_blkif *blkif = pending_req->blkif;
938 	int indirect_grefs, rc, n, nseg, i;
939 	struct blkif_request_segment *segments = NULL;
940 
941 	nseg = pending_req->nr_segs;
942 	indirect_grefs = INDIRECT_PAGES(nseg);
943 	BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
944 
945 	for (i = 0; i < indirect_grefs; i++)
946 		pages[i]->gref = req->u.indirect.indirect_grefs[i];
947 
948 	rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
949 	if (rc)
950 		goto unmap;
951 
952 	for (n = 0, i = 0; n < nseg; n++) {
953 		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
954 			/* Map indirect segments */
955 			if (segments)
956 				kunmap_atomic(segments);
957 			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
958 		}
959 		i = n % SEGS_PER_INDIRECT_FRAME;
960 		pending_req->segments[n]->gref = segments[i].gref;
961 		seg[n].nsec = segments[i].last_sect -
962 			segments[i].first_sect + 1;
963 		seg[n].offset = (segments[i].first_sect << 9);
964 		if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
965 		    (segments[i].last_sect < segments[i].first_sect)) {
966 			rc = -EINVAL;
967 			goto unmap;
968 		}
969 		preq->nr_sects += seg[n].nsec;
970 	}
971 
972 unmap:
973 	if (segments)
974 		kunmap_atomic(segments);
975 	xen_blkbk_unmap(blkif, pages, indirect_grefs);
976 	return rc;
977 }
978 
979 static int dispatch_discard_io(struct xen_blkif *blkif,
980 				struct blkif_request *req)
981 {
982 	int err = 0;
983 	int status = BLKIF_RSP_OKAY;
984 	struct block_device *bdev = blkif->vbd.bdev;
985 	unsigned long secure;
986 	struct phys_req preq;
987 
988 	xen_blkif_get(blkif);
989 
990 	preq.sector_number = req->u.discard.sector_number;
991 	preq.nr_sects      = req->u.discard.nr_sectors;
992 
993 	err = xen_vbd_translate(&preq, blkif, WRITE);
994 	if (err) {
995 		pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
996 			preq.sector_number,
997 			preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
998 		goto fail_response;
999 	}
1000 	blkif->st_ds_req++;
1001 
1002 	secure = (blkif->vbd.discard_secure &&
1003 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1004 		 BLKDEV_DISCARD_SECURE : 0;
1005 
1006 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1007 				   req->u.discard.nr_sectors,
1008 				   GFP_KERNEL, secure);
1009 fail_response:
1010 	if (err == -EOPNOTSUPP) {
1011 		pr_debug("discard op failed, not supported\n");
1012 		status = BLKIF_RSP_EOPNOTSUPP;
1013 	} else if (err)
1014 		status = BLKIF_RSP_ERROR;
1015 
1016 	make_response(blkif, req->u.discard.id, req->operation, status);
1017 	xen_blkif_put(blkif);
1018 	return err;
1019 }
1020 
1021 static int dispatch_other_io(struct xen_blkif *blkif,
1022 			     struct blkif_request *req,
1023 			     struct pending_req *pending_req)
1024 {
1025 	free_req(blkif, pending_req);
1026 	make_response(blkif, req->u.other.id, req->operation,
1027 		      BLKIF_RSP_EOPNOTSUPP);
1028 	return -EIO;
1029 }
1030 
1031 static void xen_blk_drain_io(struct xen_blkif *blkif)
1032 {
1033 	atomic_set(&blkif->drain, 1);
1034 	do {
1035 		if (atomic_read(&blkif->inflight) == 0)
1036 			break;
1037 		wait_for_completion_interruptible_timeout(
1038 				&blkif->drain_complete, HZ);
1039 
1040 		if (!atomic_read(&blkif->drain))
1041 			break;
1042 	} while (!kthread_should_stop());
1043 	atomic_set(&blkif->drain, 0);
1044 }
1045 
1046 /*
1047  * Completion callback on the bio's. Called as bh->b_end_io()
1048  */
1049 
1050 static void __end_block_io_op(struct pending_req *pending_req, int error)
1051 {
1052 	/* An error fails the entire request. */
1053 	if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
1054 	    (error == -EOPNOTSUPP)) {
1055 		pr_debug("flush diskcache op failed, not supported\n");
1056 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
1057 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1058 	} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1059 		    (error == -EOPNOTSUPP)) {
1060 		pr_debug("write barrier op failed, not supported\n");
1061 		xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
1062 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1063 	} else if (error) {
1064 		pr_debug("Buffer not up-to-date at end of operation,"
1065 			 " error=%d\n", error);
1066 		pending_req->status = BLKIF_RSP_ERROR;
1067 	}
1068 
1069 	/*
1070 	 * If all of the bio's have completed it is time to unmap
1071 	 * the grant references associated with 'request' and provide
1072 	 * the proper response on the ring.
1073 	 */
1074 	if (atomic_dec_and_test(&pending_req->pendcnt))
1075 		xen_blkbk_unmap_and_respond(pending_req);
1076 }
1077 
1078 /*
1079  * bio callback.
1080  */
1081 static void end_block_io_op(struct bio *bio)
1082 {
1083 	__end_block_io_op(bio->bi_private, bio->bi_error);
1084 	bio_put(bio);
1085 }
1086 
1087 
1088 
1089 /*
1090  * Function to copy the from the ring buffer the 'struct blkif_request'
1091  * (which has the sectors we want, number of them, grant references, etc),
1092  * and transmute  it to the block API to hand it over to the proper block disk.
1093  */
1094 static int
1095 __do_block_io_op(struct xen_blkif *blkif)
1096 {
1097 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1098 	struct blkif_request req;
1099 	struct pending_req *pending_req;
1100 	RING_IDX rc, rp;
1101 	int more_to_do = 0;
1102 
1103 	rc = blk_rings->common.req_cons;
1104 	rp = blk_rings->common.sring->req_prod;
1105 	rmb(); /* Ensure we see queued requests up to 'rp'. */
1106 
1107 	if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1108 		rc = blk_rings->common.rsp_prod_pvt;
1109 		pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1110 			rp, rc, rp - rc, blkif->vbd.pdevice);
1111 		return -EACCES;
1112 	}
1113 	while (rc != rp) {
1114 
1115 		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1116 			break;
1117 
1118 		if (kthread_should_stop()) {
1119 			more_to_do = 1;
1120 			break;
1121 		}
1122 
1123 		pending_req = alloc_req(blkif);
1124 		if (NULL == pending_req) {
1125 			blkif->st_oo_req++;
1126 			more_to_do = 1;
1127 			break;
1128 		}
1129 
1130 		switch (blkif->blk_protocol) {
1131 		case BLKIF_PROTOCOL_NATIVE:
1132 			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1133 			break;
1134 		case BLKIF_PROTOCOL_X86_32:
1135 			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1136 			break;
1137 		case BLKIF_PROTOCOL_X86_64:
1138 			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1139 			break;
1140 		default:
1141 			BUG();
1142 		}
1143 		blk_rings->common.req_cons = ++rc; /* before make_response() */
1144 
1145 		/* Apply all sanity checks to /private copy/ of request. */
1146 		barrier();
1147 
1148 		switch (req.operation) {
1149 		case BLKIF_OP_READ:
1150 		case BLKIF_OP_WRITE:
1151 		case BLKIF_OP_WRITE_BARRIER:
1152 		case BLKIF_OP_FLUSH_DISKCACHE:
1153 		case BLKIF_OP_INDIRECT:
1154 			if (dispatch_rw_block_io(blkif, &req, pending_req))
1155 				goto done;
1156 			break;
1157 		case BLKIF_OP_DISCARD:
1158 			free_req(blkif, pending_req);
1159 			if (dispatch_discard_io(blkif, &req))
1160 				goto done;
1161 			break;
1162 		default:
1163 			if (dispatch_other_io(blkif, &req, pending_req))
1164 				goto done;
1165 			break;
1166 		}
1167 
1168 		/* Yield point for this unbounded loop. */
1169 		cond_resched();
1170 	}
1171 done:
1172 	return more_to_do;
1173 }
1174 
1175 static int
1176 do_block_io_op(struct xen_blkif *blkif)
1177 {
1178 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1179 	int more_to_do;
1180 
1181 	do {
1182 		more_to_do = __do_block_io_op(blkif);
1183 		if (more_to_do)
1184 			break;
1185 
1186 		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1187 	} while (more_to_do);
1188 
1189 	return more_to_do;
1190 }
1191 /*
1192  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1193  * and call the 'submit_bio' to pass it to the underlying storage.
1194  */
1195 static int dispatch_rw_block_io(struct xen_blkif *blkif,
1196 				struct blkif_request *req,
1197 				struct pending_req *pending_req)
1198 {
1199 	struct phys_req preq;
1200 	struct seg_buf *seg = pending_req->seg;
1201 	unsigned int nseg;
1202 	struct bio *bio = NULL;
1203 	struct bio **biolist = pending_req->biolist;
1204 	int i, nbio = 0;
1205 	int operation;
1206 	struct blk_plug plug;
1207 	bool drain = false;
1208 	struct grant_page **pages = pending_req->segments;
1209 	unsigned short req_operation;
1210 
1211 	req_operation = req->operation == BLKIF_OP_INDIRECT ?
1212 			req->u.indirect.indirect_op : req->operation;
1213 
1214 	if ((req->operation == BLKIF_OP_INDIRECT) &&
1215 	    (req_operation != BLKIF_OP_READ) &&
1216 	    (req_operation != BLKIF_OP_WRITE)) {
1217 		pr_debug("Invalid indirect operation (%u)\n", req_operation);
1218 		goto fail_response;
1219 	}
1220 
1221 	switch (req_operation) {
1222 	case BLKIF_OP_READ:
1223 		blkif->st_rd_req++;
1224 		operation = READ;
1225 		break;
1226 	case BLKIF_OP_WRITE:
1227 		blkif->st_wr_req++;
1228 		operation = WRITE_ODIRECT;
1229 		break;
1230 	case BLKIF_OP_WRITE_BARRIER:
1231 		drain = true;
1232 	case BLKIF_OP_FLUSH_DISKCACHE:
1233 		blkif->st_f_req++;
1234 		operation = WRITE_FLUSH;
1235 		break;
1236 	default:
1237 		operation = 0; /* make gcc happy */
1238 		goto fail_response;
1239 		break;
1240 	}
1241 
1242 	/* Check that the number of segments is sane. */
1243 	nseg = req->operation == BLKIF_OP_INDIRECT ?
1244 	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
1245 
1246 	if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1247 	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1248 		     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1249 	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1250 		     (nseg > MAX_INDIRECT_SEGMENTS))) {
1251 		pr_debug("Bad number of segments in request (%d)\n", nseg);
1252 		/* Haven't submitted any bio's yet. */
1253 		goto fail_response;
1254 	}
1255 
1256 	preq.nr_sects      = 0;
1257 
1258 	pending_req->blkif     = blkif;
1259 	pending_req->id        = req->u.rw.id;
1260 	pending_req->operation = req_operation;
1261 	pending_req->status    = BLKIF_RSP_OKAY;
1262 	pending_req->nr_segs   = nseg;
1263 
1264 	if (req->operation != BLKIF_OP_INDIRECT) {
1265 		preq.dev               = req->u.rw.handle;
1266 		preq.sector_number     = req->u.rw.sector_number;
1267 		for (i = 0; i < nseg; i++) {
1268 			pages[i]->gref = req->u.rw.seg[i].gref;
1269 			seg[i].nsec = req->u.rw.seg[i].last_sect -
1270 				req->u.rw.seg[i].first_sect + 1;
1271 			seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1272 			if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1273 			    (req->u.rw.seg[i].last_sect <
1274 			     req->u.rw.seg[i].first_sect))
1275 				goto fail_response;
1276 			preq.nr_sects += seg[i].nsec;
1277 		}
1278 	} else {
1279 		preq.dev               = req->u.indirect.handle;
1280 		preq.sector_number     = req->u.indirect.sector_number;
1281 		if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1282 			goto fail_response;
1283 	}
1284 
1285 	if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1286 		pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1287 			 operation == READ ? "read" : "write",
1288 			 preq.sector_number,
1289 			 preq.sector_number + preq.nr_sects,
1290 			 blkif->vbd.pdevice);
1291 		goto fail_response;
1292 	}
1293 
1294 	/*
1295 	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1296 	 * is set there.
1297 	 */
1298 	for (i = 0; i < nseg; i++) {
1299 		if (((int)preq.sector_number|(int)seg[i].nsec) &
1300 		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1301 			pr_debug("Misaligned I/O request from domain %d\n",
1302 				 blkif->domid);
1303 			goto fail_response;
1304 		}
1305 	}
1306 
1307 	/* Wait on all outstanding I/O's and once that has been completed
1308 	 * issue the WRITE_FLUSH.
1309 	 */
1310 	if (drain)
1311 		xen_blk_drain_io(pending_req->blkif);
1312 
1313 	/*
1314 	 * If we have failed at this point, we need to undo the M2P override,
1315 	 * set gnttab_set_unmap_op on all of the grant references and perform
1316 	 * the hypercall to unmap the grants - that is all done in
1317 	 * xen_blkbk_unmap.
1318 	 */
1319 	if (xen_blkbk_map_seg(pending_req))
1320 		goto fail_flush;
1321 
1322 	/*
1323 	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1324 	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1325 	 */
1326 	xen_blkif_get(blkif);
1327 	atomic_inc(&blkif->inflight);
1328 
1329 	for (i = 0; i < nseg; i++) {
1330 		while ((bio == NULL) ||
1331 		       (bio_add_page(bio,
1332 				     pages[i]->page,
1333 				     seg[i].nsec << 9,
1334 				     seg[i].offset) == 0)) {
1335 
1336 			int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1337 			bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1338 			if (unlikely(bio == NULL))
1339 				goto fail_put_bio;
1340 
1341 			biolist[nbio++] = bio;
1342 			bio->bi_bdev    = preq.bdev;
1343 			bio->bi_private = pending_req;
1344 			bio->bi_end_io  = end_block_io_op;
1345 			bio->bi_iter.bi_sector  = preq.sector_number;
1346 		}
1347 
1348 		preq.sector_number += seg[i].nsec;
1349 	}
1350 
1351 	/* This will be hit if the operation was a flush or discard. */
1352 	if (!bio) {
1353 		BUG_ON(operation != WRITE_FLUSH);
1354 
1355 		bio = bio_alloc(GFP_KERNEL, 0);
1356 		if (unlikely(bio == NULL))
1357 			goto fail_put_bio;
1358 
1359 		biolist[nbio++] = bio;
1360 		bio->bi_bdev    = preq.bdev;
1361 		bio->bi_private = pending_req;
1362 		bio->bi_end_io  = end_block_io_op;
1363 	}
1364 
1365 	atomic_set(&pending_req->pendcnt, nbio);
1366 	blk_start_plug(&plug);
1367 
1368 	for (i = 0; i < nbio; i++)
1369 		submit_bio(operation, biolist[i]);
1370 
1371 	/* Let the I/Os go.. */
1372 	blk_finish_plug(&plug);
1373 
1374 	if (operation == READ)
1375 		blkif->st_rd_sect += preq.nr_sects;
1376 	else if (operation & WRITE)
1377 		blkif->st_wr_sect += preq.nr_sects;
1378 
1379 	return 0;
1380 
1381  fail_flush:
1382 	xen_blkbk_unmap(blkif, pending_req->segments,
1383 	                pending_req->nr_segs);
1384  fail_response:
1385 	/* Haven't submitted any bio's yet. */
1386 	make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1387 	free_req(blkif, pending_req);
1388 	msleep(1); /* back off a bit */
1389 	return -EIO;
1390 
1391  fail_put_bio:
1392 	for (i = 0; i < nbio; i++)
1393 		bio_put(biolist[i]);
1394 	atomic_set(&pending_req->pendcnt, 1);
1395 	__end_block_io_op(pending_req, -EINVAL);
1396 	msleep(1); /* back off a bit */
1397 	return -EIO;
1398 }
1399 
1400 
1401 
1402 /*
1403  * Put a response on the ring on how the operation fared.
1404  */
1405 static void make_response(struct xen_blkif *blkif, u64 id,
1406 			  unsigned short op, int st)
1407 {
1408 	struct blkif_response  resp;
1409 	unsigned long     flags;
1410 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1411 	int notify;
1412 
1413 	resp.id        = id;
1414 	resp.operation = op;
1415 	resp.status    = st;
1416 
1417 	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1418 	/* Place on the response ring for the relevant domain. */
1419 	switch (blkif->blk_protocol) {
1420 	case BLKIF_PROTOCOL_NATIVE:
1421 		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1422 		       &resp, sizeof(resp));
1423 		break;
1424 	case BLKIF_PROTOCOL_X86_32:
1425 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1426 		       &resp, sizeof(resp));
1427 		break;
1428 	case BLKIF_PROTOCOL_X86_64:
1429 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1430 		       &resp, sizeof(resp));
1431 		break;
1432 	default:
1433 		BUG();
1434 	}
1435 	blk_rings->common.rsp_prod_pvt++;
1436 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1437 	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1438 	if (notify)
1439 		notify_remote_via_irq(blkif->irq);
1440 }
1441 
1442 static int __init xen_blkif_init(void)
1443 {
1444 	int rc = 0;
1445 
1446 	if (!xen_domain())
1447 		return -ENODEV;
1448 
1449 	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1450 		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1451 			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1452 		xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1453 	}
1454 
1455 	rc = xen_blkif_interface_init();
1456 	if (rc)
1457 		goto failed_init;
1458 
1459 	rc = xen_blkif_xenbus_init();
1460 	if (rc)
1461 		goto failed_init;
1462 
1463  failed_init:
1464 	return rc;
1465 }
1466 
1467 module_init(xen_blkif_init);
1468 
1469 MODULE_LICENSE("Dual BSD/GPL");
1470 MODULE_ALIAS("xen-backend:vbd");
1471