1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36 
37 #define pr_fmt(fmt) "xen-blkback: " fmt
38 
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
45 
46 #include <xen/events.h>
47 #include <xen/page.h>
48 #include <xen/xen.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
53 #include "common.h"
54 
55 /*
56  * Maximum number of unused free pages to keep in the internal buffer.
57  * Setting this to a value too low will reduce memory used in each backend,
58  * but can have a performance penalty.
59  *
60  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61  * be set to a lower value that might degrade performance on some intensive
62  * IO workloads.
63  */
64 
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
69 
70 /*
71  * Maximum number of grants to map persistently in blkback. For maximum
72  * performance this should be the total numbers of grants that can be used
73  * to fill the ring, but since this might become too high, specially with
74  * the use of indirect descriptors, we set it to a value that provides good
75  * performance without using too much memory.
76  *
77  * When the list of persistent grants is full we clean it up using a LRU
78  * algorithm.
79  */
80 
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84                  "Maximum number of grants to map persistently");
85 
86 /*
87  * The LRU mechanism to clean the lists of persistent grants needs to
88  * be executed periodically. The time interval between consecutive executions
89  * of the purge mechanism is set in ms.
90  */
91 #define LRU_INTERVAL 100
92 
93 /*
94  * When the persistent grants list is full we will remove unused grants
95  * from the list. The percent number of grants to be removed at each LRU
96  * execution.
97  */
98 #define LRU_PERCENT_CLEAN 5
99 
100 /* Run-time switchable: /sys/module/blkback/parameters/ */
101 static unsigned int log_stats;
102 module_param(log_stats, int, 0644);
103 
104 #define BLKBACK_INVALID_HANDLE (~0)
105 
106 /* Number of free pages to remove on each call to gnttab_free_pages */
107 #define NUM_BATCH_FREE_PAGES 10
108 
109 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
110 {
111 	unsigned long flags;
112 
113 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
114 	if (list_empty(&blkif->free_pages)) {
115 		BUG_ON(blkif->free_pages_num != 0);
116 		spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
117 		return gnttab_alloc_pages(1, page);
118 	}
119 	BUG_ON(blkif->free_pages_num == 0);
120 	page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
121 	list_del(&page[0]->lru);
122 	blkif->free_pages_num--;
123 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
124 
125 	return 0;
126 }
127 
128 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
129                                   int num)
130 {
131 	unsigned long flags;
132 	int i;
133 
134 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
135 	for (i = 0; i < num; i++)
136 		list_add(&page[i]->lru, &blkif->free_pages);
137 	blkif->free_pages_num += num;
138 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
139 }
140 
141 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
142 {
143 	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
144 	struct page *page[NUM_BATCH_FREE_PAGES];
145 	unsigned int num_pages = 0;
146 	unsigned long flags;
147 
148 	spin_lock_irqsave(&blkif->free_pages_lock, flags);
149 	while (blkif->free_pages_num > num) {
150 		BUG_ON(list_empty(&blkif->free_pages));
151 		page[num_pages] = list_first_entry(&blkif->free_pages,
152 		                                   struct page, lru);
153 		list_del(&page[num_pages]->lru);
154 		blkif->free_pages_num--;
155 		if (++num_pages == NUM_BATCH_FREE_PAGES) {
156 			spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
157 			gnttab_free_pages(num_pages, page);
158 			spin_lock_irqsave(&blkif->free_pages_lock, flags);
159 			num_pages = 0;
160 		}
161 	}
162 	spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
163 	if (num_pages != 0)
164 		gnttab_free_pages(num_pages, page);
165 }
166 
167 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
168 
169 static int do_block_io_op(struct xen_blkif *blkif);
170 static int dispatch_rw_block_io(struct xen_blkif *blkif,
171 				struct blkif_request *req,
172 				struct pending_req *pending_req);
173 static void make_response(struct xen_blkif *blkif, u64 id,
174 			  unsigned short op, int st);
175 
176 #define foreach_grant_safe(pos, n, rbtree, node) \
177 	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
178 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
179 	     &(pos)->node != NULL; \
180 	     (pos) = container_of(n, typeof(*(pos)), node), \
181 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
182 
183 
184 /*
185  * We don't need locking around the persistent grant helpers
186  * because blkback uses a single-thread for each backed, so we
187  * can be sure that this functions will never be called recursively.
188  *
189  * The only exception to that is put_persistent_grant, that can be called
190  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
191  * bit operations to modify the flags of a persistent grant and to count
192  * the number of used grants.
193  */
194 static int add_persistent_gnt(struct xen_blkif *blkif,
195 			       struct persistent_gnt *persistent_gnt)
196 {
197 	struct rb_node **new = NULL, *parent = NULL;
198 	struct persistent_gnt *this;
199 
200 	if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
201 		if (!blkif->vbd.overflow_max_grants)
202 			blkif->vbd.overflow_max_grants = 1;
203 		return -EBUSY;
204 	}
205 	/* Figure out where to put new node */
206 	new = &blkif->persistent_gnts.rb_node;
207 	while (*new) {
208 		this = container_of(*new, struct persistent_gnt, node);
209 
210 		parent = *new;
211 		if (persistent_gnt->gnt < this->gnt)
212 			new = &((*new)->rb_left);
213 		else if (persistent_gnt->gnt > this->gnt)
214 			new = &((*new)->rb_right);
215 		else {
216 			pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
217 			return -EINVAL;
218 		}
219 	}
220 
221 	bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
222 	set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
223 	/* Add new node and rebalance tree. */
224 	rb_link_node(&(persistent_gnt->node), parent, new);
225 	rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
226 	blkif->persistent_gnt_c++;
227 	atomic_inc(&blkif->persistent_gnt_in_use);
228 	return 0;
229 }
230 
231 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
232 						 grant_ref_t gref)
233 {
234 	struct persistent_gnt *data;
235 	struct rb_node *node = NULL;
236 
237 	node = blkif->persistent_gnts.rb_node;
238 	while (node) {
239 		data = container_of(node, struct persistent_gnt, node);
240 
241 		if (gref < data->gnt)
242 			node = node->rb_left;
243 		else if (gref > data->gnt)
244 			node = node->rb_right;
245 		else {
246 			if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
247 				pr_alert_ratelimited("requesting a grant already in use\n");
248 				return NULL;
249 			}
250 			set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
251 			atomic_inc(&blkif->persistent_gnt_in_use);
252 			return data;
253 		}
254 	}
255 	return NULL;
256 }
257 
258 static void put_persistent_gnt(struct xen_blkif *blkif,
259                                struct persistent_gnt *persistent_gnt)
260 {
261 	if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
262 		pr_alert_ratelimited("freeing a grant already unused\n");
263 	set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
264 	clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
265 	atomic_dec(&blkif->persistent_gnt_in_use);
266 }
267 
268 static void free_persistent_gnts_unmap_callback(int result,
269 						struct gntab_unmap_queue_data *data)
270 {
271 	struct completion *c = data->data;
272 
273 	/* BUG_ON used to reproduce existing behaviour,
274 	   but is this the best way to deal with this? */
275 	BUG_ON(result);
276 	complete(c);
277 }
278 
279 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
280                                  unsigned int num)
281 {
282 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
283 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
284 	struct persistent_gnt *persistent_gnt;
285 	struct rb_node *n;
286 	int segs_to_unmap = 0;
287 	struct gntab_unmap_queue_data unmap_data;
288 	struct completion unmap_completion;
289 
290 	init_completion(&unmap_completion);
291 
292 	unmap_data.data = &unmap_completion;
293 	unmap_data.done = &free_persistent_gnts_unmap_callback;
294 	unmap_data.pages = pages;
295 	unmap_data.unmap_ops = unmap;
296 	unmap_data.kunmap_ops = NULL;
297 
298 	foreach_grant_safe(persistent_gnt, n, root, node) {
299 		BUG_ON(persistent_gnt->handle ==
300 			BLKBACK_INVALID_HANDLE);
301 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
302 			(unsigned long) pfn_to_kaddr(page_to_pfn(
303 				persistent_gnt->page)),
304 			GNTMAP_host_map,
305 			persistent_gnt->handle);
306 
307 		pages[segs_to_unmap] = persistent_gnt->page;
308 
309 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
310 			!rb_next(&persistent_gnt->node)) {
311 
312 			unmap_data.count = segs_to_unmap;
313 			gnttab_unmap_refs_async(&unmap_data);
314 			wait_for_completion(&unmap_completion);
315 
316 			put_free_pages(blkif, pages, segs_to_unmap);
317 			segs_to_unmap = 0;
318 		}
319 
320 		rb_erase(&persistent_gnt->node, root);
321 		kfree(persistent_gnt);
322 		num--;
323 	}
324 	BUG_ON(num != 0);
325 }
326 
327 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
328 {
329 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
330 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
331 	struct persistent_gnt *persistent_gnt;
332 	int ret, segs_to_unmap = 0;
333 	struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
334 
335 	while(!list_empty(&blkif->persistent_purge_list)) {
336 		persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
337 		                                  struct persistent_gnt,
338 		                                  remove_node);
339 		list_del(&persistent_gnt->remove_node);
340 
341 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
342 			vaddr(persistent_gnt->page),
343 			GNTMAP_host_map,
344 			persistent_gnt->handle);
345 
346 		pages[segs_to_unmap] = persistent_gnt->page;
347 
348 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
349 			ret = gnttab_unmap_refs(unmap, NULL, pages,
350 				segs_to_unmap);
351 			BUG_ON(ret);
352 			put_free_pages(blkif, pages, segs_to_unmap);
353 			segs_to_unmap = 0;
354 		}
355 		kfree(persistent_gnt);
356 	}
357 	if (segs_to_unmap > 0) {
358 		ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
359 		BUG_ON(ret);
360 		put_free_pages(blkif, pages, segs_to_unmap);
361 	}
362 }
363 
364 static void purge_persistent_gnt(struct xen_blkif *blkif)
365 {
366 	struct persistent_gnt *persistent_gnt;
367 	struct rb_node *n;
368 	unsigned int num_clean, total;
369 	bool scan_used = false, clean_used = false;
370 	struct rb_root *root;
371 
372 	if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
373 	    (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
374 	    !blkif->vbd.overflow_max_grants)) {
375 		return;
376 	}
377 
378 	if (work_pending(&blkif->persistent_purge_work)) {
379 		pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
380 		return;
381 	}
382 
383 	num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
384 	num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
385 	num_clean = min(blkif->persistent_gnt_c, num_clean);
386 	if ((num_clean == 0) ||
387 	    (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
388 		return;
389 
390 	/*
391 	 * At this point, we can assure that there will be no calls
392          * to get_persistent_grant (because we are executing this code from
393          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
394          * which means that the number of currently used grants will go down,
395          * but never up, so we will always be able to remove the requested
396          * number of grants.
397 	 */
398 
399 	total = num_clean;
400 
401 	pr_debug("Going to purge %u persistent grants\n", num_clean);
402 
403 	BUG_ON(!list_empty(&blkif->persistent_purge_list));
404 	root = &blkif->persistent_gnts;
405 purge_list:
406 	foreach_grant_safe(persistent_gnt, n, root, node) {
407 		BUG_ON(persistent_gnt->handle ==
408 			BLKBACK_INVALID_HANDLE);
409 
410 		if (clean_used) {
411 			clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
412 			continue;
413 		}
414 
415 		if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
416 			continue;
417 		if (!scan_used &&
418 		    (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
419 			continue;
420 
421 		rb_erase(&persistent_gnt->node, root);
422 		list_add(&persistent_gnt->remove_node,
423 		         &blkif->persistent_purge_list);
424 		if (--num_clean == 0)
425 			goto finished;
426 	}
427 	/*
428 	 * If we get here it means we also need to start cleaning
429 	 * grants that were used since last purge in order to cope
430 	 * with the requested num
431 	 */
432 	if (!scan_used && !clean_used) {
433 		pr_debug("Still missing %u purged frames\n", num_clean);
434 		scan_used = true;
435 		goto purge_list;
436 	}
437 finished:
438 	if (!clean_used) {
439 		pr_debug("Finished scanning for grants to clean, removing used flag\n");
440 		clean_used = true;
441 		goto purge_list;
442 	}
443 
444 	blkif->persistent_gnt_c -= (total - num_clean);
445 	blkif->vbd.overflow_max_grants = 0;
446 
447 	/* We can defer this work */
448 	schedule_work(&blkif->persistent_purge_work);
449 	pr_debug("Purged %u/%u\n", (total - num_clean), total);
450 	return;
451 }
452 
453 /*
454  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
455  */
456 static struct pending_req *alloc_req(struct xen_blkif *blkif)
457 {
458 	struct pending_req *req = NULL;
459 	unsigned long flags;
460 
461 	spin_lock_irqsave(&blkif->pending_free_lock, flags);
462 	if (!list_empty(&blkif->pending_free)) {
463 		req = list_entry(blkif->pending_free.next, struct pending_req,
464 				 free_list);
465 		list_del(&req->free_list);
466 	}
467 	spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
468 	return req;
469 }
470 
471 /*
472  * Return the 'pending_req' structure back to the freepool. We also
473  * wake up the thread if it was waiting for a free page.
474  */
475 static void free_req(struct xen_blkif *blkif, struct pending_req *req)
476 {
477 	unsigned long flags;
478 	int was_empty;
479 
480 	spin_lock_irqsave(&blkif->pending_free_lock, flags);
481 	was_empty = list_empty(&blkif->pending_free);
482 	list_add(&req->free_list, &blkif->pending_free);
483 	spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
484 	if (was_empty)
485 		wake_up(&blkif->pending_free_wq);
486 }
487 
488 /*
489  * Routines for managing virtual block devices (vbds).
490  */
491 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
492 			     int operation)
493 {
494 	struct xen_vbd *vbd = &blkif->vbd;
495 	int rc = -EACCES;
496 
497 	if ((operation != READ) && vbd->readonly)
498 		goto out;
499 
500 	if (likely(req->nr_sects)) {
501 		blkif_sector_t end = req->sector_number + req->nr_sects;
502 
503 		if (unlikely(end < req->sector_number))
504 			goto out;
505 		if (unlikely(end > vbd_sz(vbd)))
506 			goto out;
507 	}
508 
509 	req->dev  = vbd->pdevice;
510 	req->bdev = vbd->bdev;
511 	rc = 0;
512 
513  out:
514 	return rc;
515 }
516 
517 static void xen_vbd_resize(struct xen_blkif *blkif)
518 {
519 	struct xen_vbd *vbd = &blkif->vbd;
520 	struct xenbus_transaction xbt;
521 	int err;
522 	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
523 	unsigned long long new_size = vbd_sz(vbd);
524 
525 	pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
526 		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
527 	pr_info("VBD Resize: new size %llu\n", new_size);
528 	vbd->size = new_size;
529 again:
530 	err = xenbus_transaction_start(&xbt);
531 	if (err) {
532 		pr_warn("Error starting transaction\n");
533 		return;
534 	}
535 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
536 			    (unsigned long long)vbd_sz(vbd));
537 	if (err) {
538 		pr_warn("Error writing new size\n");
539 		goto abort;
540 	}
541 	/*
542 	 * Write the current state; we will use this to synchronize
543 	 * the front-end. If the current state is "connected" the
544 	 * front-end will get the new size information online.
545 	 */
546 	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
547 	if (err) {
548 		pr_warn("Error writing the state\n");
549 		goto abort;
550 	}
551 
552 	err = xenbus_transaction_end(xbt, 0);
553 	if (err == -EAGAIN)
554 		goto again;
555 	if (err)
556 		pr_warn("Error ending transaction\n");
557 	return;
558 abort:
559 	xenbus_transaction_end(xbt, 1);
560 }
561 
562 /*
563  * Notification from the guest OS.
564  */
565 static void blkif_notify_work(struct xen_blkif *blkif)
566 {
567 	blkif->waiting_reqs = 1;
568 	wake_up(&blkif->wq);
569 }
570 
571 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
572 {
573 	blkif_notify_work(dev_id);
574 	return IRQ_HANDLED;
575 }
576 
577 /*
578  * SCHEDULER FUNCTIONS
579  */
580 
581 static void print_stats(struct xen_blkif *blkif)
582 {
583 	pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
584 		 "  |  ds %4llu | pg: %4u/%4d\n",
585 		 current->comm, blkif->st_oo_req,
586 		 blkif->st_rd_req, blkif->st_wr_req,
587 		 blkif->st_f_req, blkif->st_ds_req,
588 		 blkif->persistent_gnt_c,
589 		 xen_blkif_max_pgrants);
590 	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
591 	blkif->st_rd_req = 0;
592 	blkif->st_wr_req = 0;
593 	blkif->st_oo_req = 0;
594 	blkif->st_ds_req = 0;
595 }
596 
597 int xen_blkif_schedule(void *arg)
598 {
599 	struct xen_blkif *blkif = arg;
600 	struct xen_vbd *vbd = &blkif->vbd;
601 	unsigned long timeout;
602 	int ret;
603 
604 	xen_blkif_get(blkif);
605 
606 	while (!kthread_should_stop()) {
607 		if (try_to_freeze())
608 			continue;
609 		if (unlikely(vbd->size != vbd_sz(vbd)))
610 			xen_vbd_resize(blkif);
611 
612 		timeout = msecs_to_jiffies(LRU_INTERVAL);
613 
614 		timeout = wait_event_interruptible_timeout(
615 			blkif->wq,
616 			blkif->waiting_reqs || kthread_should_stop(),
617 			timeout);
618 		if (timeout == 0)
619 			goto purge_gnt_list;
620 		timeout = wait_event_interruptible_timeout(
621 			blkif->pending_free_wq,
622 			!list_empty(&blkif->pending_free) ||
623 			kthread_should_stop(),
624 			timeout);
625 		if (timeout == 0)
626 			goto purge_gnt_list;
627 
628 		blkif->waiting_reqs = 0;
629 		smp_mb(); /* clear flag *before* checking for work */
630 
631 		ret = do_block_io_op(blkif);
632 		if (ret > 0)
633 			blkif->waiting_reqs = 1;
634 		if (ret == -EACCES)
635 			wait_event_interruptible(blkif->shutdown_wq,
636 						 kthread_should_stop());
637 
638 purge_gnt_list:
639 		if (blkif->vbd.feature_gnt_persistent &&
640 		    time_after(jiffies, blkif->next_lru)) {
641 			purge_persistent_gnt(blkif);
642 			blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
643 		}
644 
645 		/* Shrink if we have more than xen_blkif_max_buffer_pages */
646 		shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
647 
648 		if (log_stats && time_after(jiffies, blkif->st_print))
649 			print_stats(blkif);
650 	}
651 
652 	/* Drain pending purge work */
653 	flush_work(&blkif->persistent_purge_work);
654 
655 	if (log_stats)
656 		print_stats(blkif);
657 
658 	blkif->xenblkd = NULL;
659 	xen_blkif_put(blkif);
660 
661 	return 0;
662 }
663 
664 /*
665  * Remove persistent grants and empty the pool of free pages
666  */
667 void xen_blkbk_free_caches(struct xen_blkif *blkif)
668 {
669 	/* Free all persistent grant pages */
670 	if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
671 		free_persistent_gnts(blkif, &blkif->persistent_gnts,
672 			blkif->persistent_gnt_c);
673 
674 	BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
675 	blkif->persistent_gnt_c = 0;
676 
677 	/* Since we are shutting down remove all pages from the buffer */
678 	shrink_free_pagepool(blkif, 0 /* All */);
679 }
680 
681 static unsigned int xen_blkbk_unmap_prepare(
682 	struct xen_blkif *blkif,
683 	struct grant_page **pages,
684 	unsigned int num,
685 	struct gnttab_unmap_grant_ref *unmap_ops,
686 	struct page **unmap_pages)
687 {
688 	unsigned int i, invcount = 0;
689 
690 	for (i = 0; i < num; i++) {
691 		if (pages[i]->persistent_gnt != NULL) {
692 			put_persistent_gnt(blkif, pages[i]->persistent_gnt);
693 			continue;
694 		}
695 		if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
696 			continue;
697 		unmap_pages[invcount] = pages[i]->page;
698 		gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
699 				    GNTMAP_host_map, pages[i]->handle);
700 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
701 		invcount++;
702        }
703 
704        return invcount;
705 }
706 
707 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
708 {
709 	struct pending_req* pending_req = (struct pending_req*) (data->data);
710 	struct xen_blkif *blkif = pending_req->blkif;
711 
712 	/* BUG_ON used to reproduce existing behaviour,
713 	   but is this the best way to deal with this? */
714 	BUG_ON(result);
715 
716 	put_free_pages(blkif, data->pages, data->count);
717 	make_response(blkif, pending_req->id,
718 		      pending_req->operation, pending_req->status);
719 	free_req(blkif, pending_req);
720 	/*
721 	 * Make sure the request is freed before releasing blkif,
722 	 * or there could be a race between free_req and the
723 	 * cleanup done in xen_blkif_free during shutdown.
724 	 *
725 	 * NB: The fact that we might try to wake up pending_free_wq
726 	 * before drain_complete (in case there's a drain going on)
727 	 * it's not a problem with our current implementation
728 	 * because we can assure there's no thread waiting on
729 	 * pending_free_wq if there's a drain going on, but it has
730 	 * to be taken into account if the current model is changed.
731 	 */
732 	if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
733 		complete(&blkif->drain_complete);
734 	}
735 	xen_blkif_put(blkif);
736 }
737 
738 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
739 {
740 	struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
741 	struct xen_blkif *blkif = req->blkif;
742 	struct grant_page **pages = req->segments;
743 	unsigned int invcount;
744 
745 	invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_pages,
746 					   req->unmap, req->unmap_pages);
747 
748 	work->data = req;
749 	work->done = xen_blkbk_unmap_and_respond_callback;
750 	work->unmap_ops = req->unmap;
751 	work->kunmap_ops = NULL;
752 	work->pages = req->unmap_pages;
753 	work->count = invcount;
754 
755 	gnttab_unmap_refs_async(&req->gnttab_unmap_data);
756 }
757 
758 
759 /*
760  * Unmap the grant references.
761  *
762  * This could accumulate ops up to the batch size to reduce the number
763  * of hypercalls, but since this is only used in error paths there's
764  * no real need.
765  */
766 static void xen_blkbk_unmap(struct xen_blkif *blkif,
767                             struct grant_page *pages[],
768                             int num)
769 {
770 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
771 	struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
772 	unsigned int invcount = 0;
773 	int ret;
774 
775 	while (num) {
776 		unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
777 
778 		invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
779 						   unmap, unmap_pages);
780 		if (invcount) {
781 			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
782 			BUG_ON(ret);
783 			put_free_pages(blkif, unmap_pages, invcount);
784 		}
785 		pages += batch;
786 		num -= batch;
787 	}
788 }
789 
790 static int xen_blkbk_map(struct xen_blkif *blkif,
791 			 struct grant_page *pages[],
792 			 int num, bool ro)
793 {
794 	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
795 	struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
796 	struct persistent_gnt *persistent_gnt = NULL;
797 	phys_addr_t addr = 0;
798 	int i, seg_idx, new_map_idx;
799 	int segs_to_map = 0;
800 	int ret = 0;
801 	int last_map = 0, map_until = 0;
802 	int use_persistent_gnts;
803 
804 	use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
805 
806 	/*
807 	 * Fill out preq.nr_sects with proper amount of sectors, and setup
808 	 * assign map[..] with the PFN of the page in our domain with the
809 	 * corresponding grant reference for each page.
810 	 */
811 again:
812 	for (i = map_until; i < num; i++) {
813 		uint32_t flags;
814 
815 		if (use_persistent_gnts)
816 			persistent_gnt = get_persistent_gnt(
817 				blkif,
818 				pages[i]->gref);
819 
820 		if (persistent_gnt) {
821 			/*
822 			 * We are using persistent grants and
823 			 * the grant is already mapped
824 			 */
825 			pages[i]->page = persistent_gnt->page;
826 			pages[i]->persistent_gnt = persistent_gnt;
827 		} else {
828 			if (get_free_page(blkif, &pages[i]->page))
829 				goto out_of_memory;
830 			addr = vaddr(pages[i]->page);
831 			pages_to_gnt[segs_to_map] = pages[i]->page;
832 			pages[i]->persistent_gnt = NULL;
833 			flags = GNTMAP_host_map;
834 			if (!use_persistent_gnts && ro)
835 				flags |= GNTMAP_readonly;
836 			gnttab_set_map_op(&map[segs_to_map++], addr,
837 					  flags, pages[i]->gref,
838 					  blkif->domid);
839 		}
840 		map_until = i + 1;
841 		if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
842 			break;
843 	}
844 
845 	if (segs_to_map) {
846 		ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
847 		BUG_ON(ret);
848 	}
849 
850 	/*
851 	 * Now swizzle the MFN in our domain with the MFN from the other domain
852 	 * so that when we access vaddr(pending_req,i) it has the contents of
853 	 * the page from the other domain.
854 	 */
855 	for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
856 		if (!pages[seg_idx]->persistent_gnt) {
857 			/* This is a newly mapped grant */
858 			BUG_ON(new_map_idx >= segs_to_map);
859 			if (unlikely(map[new_map_idx].status != 0)) {
860 				pr_debug("invalid buffer -- could not remap it\n");
861 				put_free_pages(blkif, &pages[seg_idx]->page, 1);
862 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
863 				ret |= 1;
864 				goto next;
865 			}
866 			pages[seg_idx]->handle = map[new_map_idx].handle;
867 		} else {
868 			continue;
869 		}
870 		if (use_persistent_gnts &&
871 		    blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
872 			/*
873 			 * We are using persistent grants, the grant is
874 			 * not mapped but we might have room for it.
875 			 */
876 			persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
877 				                 GFP_KERNEL);
878 			if (!persistent_gnt) {
879 				/*
880 				 * If we don't have enough memory to
881 				 * allocate the persistent_gnt struct
882 				 * map this grant non-persistenly
883 				 */
884 				goto next;
885 			}
886 			persistent_gnt->gnt = map[new_map_idx].ref;
887 			persistent_gnt->handle = map[new_map_idx].handle;
888 			persistent_gnt->page = pages[seg_idx]->page;
889 			if (add_persistent_gnt(blkif,
890 			                       persistent_gnt)) {
891 				kfree(persistent_gnt);
892 				persistent_gnt = NULL;
893 				goto next;
894 			}
895 			pages[seg_idx]->persistent_gnt = persistent_gnt;
896 			pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
897 				 persistent_gnt->gnt, blkif->persistent_gnt_c,
898 				 xen_blkif_max_pgrants);
899 			goto next;
900 		}
901 		if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
902 			blkif->vbd.overflow_max_grants = 1;
903 			pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
904 			         blkif->domid, blkif->vbd.handle);
905 		}
906 		/*
907 		 * We could not map this grant persistently, so use it as
908 		 * a non-persistent grant.
909 		 */
910 next:
911 		new_map_idx++;
912 	}
913 	segs_to_map = 0;
914 	last_map = map_until;
915 	if (map_until != num)
916 		goto again;
917 
918 	return ret;
919 
920 out_of_memory:
921 	pr_alert("%s: out of memory\n", __func__);
922 	put_free_pages(blkif, pages_to_gnt, segs_to_map);
923 	return -ENOMEM;
924 }
925 
926 static int xen_blkbk_map_seg(struct pending_req *pending_req)
927 {
928 	int rc;
929 
930 	rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
931 			   pending_req->nr_pages,
932 	                   (pending_req->operation != BLKIF_OP_READ));
933 
934 	return rc;
935 }
936 
937 static int xen_blkbk_parse_indirect(struct blkif_request *req,
938 				    struct pending_req *pending_req,
939 				    struct seg_buf seg[],
940 				    struct phys_req *preq)
941 {
942 	struct grant_page **pages = pending_req->indirect_pages;
943 	struct xen_blkif *blkif = pending_req->blkif;
944 	int indirect_grefs, rc, n, nseg, i;
945 	struct blkif_request_segment *segments = NULL;
946 
947 	nseg = pending_req->nr_pages;
948 	indirect_grefs = INDIRECT_PAGES(nseg);
949 	BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
950 
951 	for (i = 0; i < indirect_grefs; i++)
952 		pages[i]->gref = req->u.indirect.indirect_grefs[i];
953 
954 	rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
955 	if (rc)
956 		goto unmap;
957 
958 	for (n = 0, i = 0; n < nseg; n++) {
959 		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
960 			/* Map indirect segments */
961 			if (segments)
962 				kunmap_atomic(segments);
963 			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
964 		}
965 		i = n % SEGS_PER_INDIRECT_FRAME;
966 		pending_req->segments[n]->gref = segments[i].gref;
967 		seg[n].nsec = segments[i].last_sect -
968 			segments[i].first_sect + 1;
969 		seg[n].offset = (segments[i].first_sect << 9);
970 		if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
971 		    (segments[i].last_sect < segments[i].first_sect)) {
972 			rc = -EINVAL;
973 			goto unmap;
974 		}
975 		preq->nr_sects += seg[n].nsec;
976 	}
977 
978 unmap:
979 	if (segments)
980 		kunmap_atomic(segments);
981 	xen_blkbk_unmap(blkif, pages, indirect_grefs);
982 	return rc;
983 }
984 
985 static int dispatch_discard_io(struct xen_blkif *blkif,
986 				struct blkif_request *req)
987 {
988 	int err = 0;
989 	int status = BLKIF_RSP_OKAY;
990 	struct block_device *bdev = blkif->vbd.bdev;
991 	unsigned long secure;
992 	struct phys_req preq;
993 
994 	xen_blkif_get(blkif);
995 
996 	preq.sector_number = req->u.discard.sector_number;
997 	preq.nr_sects      = req->u.discard.nr_sectors;
998 
999 	err = xen_vbd_translate(&preq, blkif, WRITE);
1000 	if (err) {
1001 		pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1002 			preq.sector_number,
1003 			preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1004 		goto fail_response;
1005 	}
1006 	blkif->st_ds_req++;
1007 
1008 	secure = (blkif->vbd.discard_secure &&
1009 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1010 		 BLKDEV_DISCARD_SECURE : 0;
1011 
1012 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1013 				   req->u.discard.nr_sectors,
1014 				   GFP_KERNEL, secure);
1015 fail_response:
1016 	if (err == -EOPNOTSUPP) {
1017 		pr_debug("discard op failed, not supported\n");
1018 		status = BLKIF_RSP_EOPNOTSUPP;
1019 	} else if (err)
1020 		status = BLKIF_RSP_ERROR;
1021 
1022 	make_response(blkif, req->u.discard.id, req->operation, status);
1023 	xen_blkif_put(blkif);
1024 	return err;
1025 }
1026 
1027 static int dispatch_other_io(struct xen_blkif *blkif,
1028 			     struct blkif_request *req,
1029 			     struct pending_req *pending_req)
1030 {
1031 	free_req(blkif, pending_req);
1032 	make_response(blkif, req->u.other.id, req->operation,
1033 		      BLKIF_RSP_EOPNOTSUPP);
1034 	return -EIO;
1035 }
1036 
1037 static void xen_blk_drain_io(struct xen_blkif *blkif)
1038 {
1039 	atomic_set(&blkif->drain, 1);
1040 	do {
1041 		if (atomic_read(&blkif->inflight) == 0)
1042 			break;
1043 		wait_for_completion_interruptible_timeout(
1044 				&blkif->drain_complete, HZ);
1045 
1046 		if (!atomic_read(&blkif->drain))
1047 			break;
1048 	} while (!kthread_should_stop());
1049 	atomic_set(&blkif->drain, 0);
1050 }
1051 
1052 /*
1053  * Completion callback on the bio's. Called as bh->b_end_io()
1054  */
1055 
1056 static void __end_block_io_op(struct pending_req *pending_req, int error)
1057 {
1058 	/* An error fails the entire request. */
1059 	if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
1060 	    (error == -EOPNOTSUPP)) {
1061 		pr_debug("flush diskcache op failed, not supported\n");
1062 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
1063 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1064 	} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1065 		    (error == -EOPNOTSUPP)) {
1066 		pr_debug("write barrier op failed, not supported\n");
1067 		xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
1068 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1069 	} else if (error) {
1070 		pr_debug("Buffer not up-to-date at end of operation,"
1071 			 " error=%d\n", error);
1072 		pending_req->status = BLKIF_RSP_ERROR;
1073 	}
1074 
1075 	/*
1076 	 * If all of the bio's have completed it is time to unmap
1077 	 * the grant references associated with 'request' and provide
1078 	 * the proper response on the ring.
1079 	 */
1080 	if (atomic_dec_and_test(&pending_req->pendcnt))
1081 		xen_blkbk_unmap_and_respond(pending_req);
1082 }
1083 
1084 /*
1085  * bio callback.
1086  */
1087 static void end_block_io_op(struct bio *bio, int error)
1088 {
1089 	__end_block_io_op(bio->bi_private, error);
1090 	bio_put(bio);
1091 }
1092 
1093 
1094 
1095 /*
1096  * Function to copy the from the ring buffer the 'struct blkif_request'
1097  * (which has the sectors we want, number of them, grant references, etc),
1098  * and transmute  it to the block API to hand it over to the proper block disk.
1099  */
1100 static int
1101 __do_block_io_op(struct xen_blkif *blkif)
1102 {
1103 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1104 	struct blkif_request req;
1105 	struct pending_req *pending_req;
1106 	RING_IDX rc, rp;
1107 	int more_to_do = 0;
1108 
1109 	rc = blk_rings->common.req_cons;
1110 	rp = blk_rings->common.sring->req_prod;
1111 	rmb(); /* Ensure we see queued requests up to 'rp'. */
1112 
1113 	if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1114 		rc = blk_rings->common.rsp_prod_pvt;
1115 		pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1116 			rp, rc, rp - rc, blkif->vbd.pdevice);
1117 		return -EACCES;
1118 	}
1119 	while (rc != rp) {
1120 
1121 		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1122 			break;
1123 
1124 		if (kthread_should_stop()) {
1125 			more_to_do = 1;
1126 			break;
1127 		}
1128 
1129 		pending_req = alloc_req(blkif);
1130 		if (NULL == pending_req) {
1131 			blkif->st_oo_req++;
1132 			more_to_do = 1;
1133 			break;
1134 		}
1135 
1136 		switch (blkif->blk_protocol) {
1137 		case BLKIF_PROTOCOL_NATIVE:
1138 			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1139 			break;
1140 		case BLKIF_PROTOCOL_X86_32:
1141 			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1142 			break;
1143 		case BLKIF_PROTOCOL_X86_64:
1144 			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1145 			break;
1146 		default:
1147 			BUG();
1148 		}
1149 		blk_rings->common.req_cons = ++rc; /* before make_response() */
1150 
1151 		/* Apply all sanity checks to /private copy/ of request. */
1152 		barrier();
1153 
1154 		switch (req.operation) {
1155 		case BLKIF_OP_READ:
1156 		case BLKIF_OP_WRITE:
1157 		case BLKIF_OP_WRITE_BARRIER:
1158 		case BLKIF_OP_FLUSH_DISKCACHE:
1159 		case BLKIF_OP_INDIRECT:
1160 			if (dispatch_rw_block_io(blkif, &req, pending_req))
1161 				goto done;
1162 			break;
1163 		case BLKIF_OP_DISCARD:
1164 			free_req(blkif, pending_req);
1165 			if (dispatch_discard_io(blkif, &req))
1166 				goto done;
1167 			break;
1168 		default:
1169 			if (dispatch_other_io(blkif, &req, pending_req))
1170 				goto done;
1171 			break;
1172 		}
1173 
1174 		/* Yield point for this unbounded loop. */
1175 		cond_resched();
1176 	}
1177 done:
1178 	return more_to_do;
1179 }
1180 
1181 static int
1182 do_block_io_op(struct xen_blkif *blkif)
1183 {
1184 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1185 	int more_to_do;
1186 
1187 	do {
1188 		more_to_do = __do_block_io_op(blkif);
1189 		if (more_to_do)
1190 			break;
1191 
1192 		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1193 	} while (more_to_do);
1194 
1195 	return more_to_do;
1196 }
1197 /*
1198  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1199  * and call the 'submit_bio' to pass it to the underlying storage.
1200  */
1201 static int dispatch_rw_block_io(struct xen_blkif *blkif,
1202 				struct blkif_request *req,
1203 				struct pending_req *pending_req)
1204 {
1205 	struct phys_req preq;
1206 	struct seg_buf *seg = pending_req->seg;
1207 	unsigned int nseg;
1208 	struct bio *bio = NULL;
1209 	struct bio **biolist = pending_req->biolist;
1210 	int i, nbio = 0;
1211 	int operation;
1212 	struct blk_plug plug;
1213 	bool drain = false;
1214 	struct grant_page **pages = pending_req->segments;
1215 	unsigned short req_operation;
1216 
1217 	req_operation = req->operation == BLKIF_OP_INDIRECT ?
1218 			req->u.indirect.indirect_op : req->operation;
1219 	if ((req->operation == BLKIF_OP_INDIRECT) &&
1220 	    (req_operation != BLKIF_OP_READ) &&
1221 	    (req_operation != BLKIF_OP_WRITE)) {
1222 		pr_debug("Invalid indirect operation (%u)\n", req_operation);
1223 		goto fail_response;
1224 	}
1225 
1226 	switch (req_operation) {
1227 	case BLKIF_OP_READ:
1228 		blkif->st_rd_req++;
1229 		operation = READ;
1230 		break;
1231 	case BLKIF_OP_WRITE:
1232 		blkif->st_wr_req++;
1233 		operation = WRITE_ODIRECT;
1234 		break;
1235 	case BLKIF_OP_WRITE_BARRIER:
1236 		drain = true;
1237 	case BLKIF_OP_FLUSH_DISKCACHE:
1238 		blkif->st_f_req++;
1239 		operation = WRITE_FLUSH;
1240 		break;
1241 	default:
1242 		operation = 0; /* make gcc happy */
1243 		goto fail_response;
1244 		break;
1245 	}
1246 
1247 	/* Check that the number of segments is sane. */
1248 	nseg = req->operation == BLKIF_OP_INDIRECT ?
1249 	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
1250 
1251 	if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1252 	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1253 		     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1254 	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1255 		     (nseg > MAX_INDIRECT_SEGMENTS))) {
1256 		pr_debug("Bad number of segments in request (%d)\n", nseg);
1257 		/* Haven't submitted any bio's yet. */
1258 		goto fail_response;
1259 	}
1260 
1261 	preq.nr_sects      = 0;
1262 
1263 	pending_req->blkif     = blkif;
1264 	pending_req->id        = req->u.rw.id;
1265 	pending_req->operation = req_operation;
1266 	pending_req->status    = BLKIF_RSP_OKAY;
1267 	pending_req->nr_pages  = nseg;
1268 
1269 	if (req->operation != BLKIF_OP_INDIRECT) {
1270 		preq.dev               = req->u.rw.handle;
1271 		preq.sector_number     = req->u.rw.sector_number;
1272 		for (i = 0; i < nseg; i++) {
1273 			pages[i]->gref = req->u.rw.seg[i].gref;
1274 			seg[i].nsec = req->u.rw.seg[i].last_sect -
1275 				req->u.rw.seg[i].first_sect + 1;
1276 			seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1277 			if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1278 			    (req->u.rw.seg[i].last_sect <
1279 			     req->u.rw.seg[i].first_sect))
1280 				goto fail_response;
1281 			preq.nr_sects += seg[i].nsec;
1282 		}
1283 	} else {
1284 		preq.dev               = req->u.indirect.handle;
1285 		preq.sector_number     = req->u.indirect.sector_number;
1286 		if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1287 			goto fail_response;
1288 	}
1289 
1290 	if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1291 		pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1292 			 operation == READ ? "read" : "write",
1293 			 preq.sector_number,
1294 			 preq.sector_number + preq.nr_sects,
1295 			 blkif->vbd.pdevice);
1296 		goto fail_response;
1297 	}
1298 
1299 	/*
1300 	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1301 	 * is set there.
1302 	 */
1303 	for (i = 0; i < nseg; i++) {
1304 		if (((int)preq.sector_number|(int)seg[i].nsec) &
1305 		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1306 			pr_debug("Misaligned I/O request from domain %d\n",
1307 				 blkif->domid);
1308 			goto fail_response;
1309 		}
1310 	}
1311 
1312 	/* Wait on all outstanding I/O's and once that has been completed
1313 	 * issue the WRITE_FLUSH.
1314 	 */
1315 	if (drain)
1316 		xen_blk_drain_io(pending_req->blkif);
1317 
1318 	/*
1319 	 * If we have failed at this point, we need to undo the M2P override,
1320 	 * set gnttab_set_unmap_op on all of the grant references and perform
1321 	 * the hypercall to unmap the grants - that is all done in
1322 	 * xen_blkbk_unmap.
1323 	 */
1324 	if (xen_blkbk_map_seg(pending_req))
1325 		goto fail_flush;
1326 
1327 	/*
1328 	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1329 	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1330 	 */
1331 	xen_blkif_get(blkif);
1332 	atomic_inc(&blkif->inflight);
1333 
1334 	for (i = 0; i < nseg; i++) {
1335 		while ((bio == NULL) ||
1336 		       (bio_add_page(bio,
1337 				     pages[i]->page,
1338 				     seg[i].nsec << 9,
1339 				     seg[i].offset) == 0)) {
1340 
1341 			int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1342 			bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1343 			if (unlikely(bio == NULL))
1344 				goto fail_put_bio;
1345 
1346 			biolist[nbio++] = bio;
1347 			bio->bi_bdev    = preq.bdev;
1348 			bio->bi_private = pending_req;
1349 			bio->bi_end_io  = end_block_io_op;
1350 			bio->bi_iter.bi_sector  = preq.sector_number;
1351 		}
1352 
1353 		preq.sector_number += seg[i].nsec;
1354 	}
1355 
1356 	/* This will be hit if the operation was a flush or discard. */
1357 	if (!bio) {
1358 		BUG_ON(operation != WRITE_FLUSH);
1359 
1360 		bio = bio_alloc(GFP_KERNEL, 0);
1361 		if (unlikely(bio == NULL))
1362 			goto fail_put_bio;
1363 
1364 		biolist[nbio++] = bio;
1365 		bio->bi_bdev    = preq.bdev;
1366 		bio->bi_private = pending_req;
1367 		bio->bi_end_io  = end_block_io_op;
1368 	}
1369 
1370 	atomic_set(&pending_req->pendcnt, nbio);
1371 	blk_start_plug(&plug);
1372 
1373 	for (i = 0; i < nbio; i++)
1374 		submit_bio(operation, biolist[i]);
1375 
1376 	/* Let the I/Os go.. */
1377 	blk_finish_plug(&plug);
1378 
1379 	if (operation == READ)
1380 		blkif->st_rd_sect += preq.nr_sects;
1381 	else if (operation & WRITE)
1382 		blkif->st_wr_sect += preq.nr_sects;
1383 
1384 	return 0;
1385 
1386  fail_flush:
1387 	xen_blkbk_unmap(blkif, pending_req->segments,
1388 	                pending_req->nr_pages);
1389  fail_response:
1390 	/* Haven't submitted any bio's yet. */
1391 	make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1392 	free_req(blkif, pending_req);
1393 	msleep(1); /* back off a bit */
1394 	return -EIO;
1395 
1396  fail_put_bio:
1397 	for (i = 0; i < nbio; i++)
1398 		bio_put(biolist[i]);
1399 	atomic_set(&pending_req->pendcnt, 1);
1400 	__end_block_io_op(pending_req, -EINVAL);
1401 	msleep(1); /* back off a bit */
1402 	return -EIO;
1403 }
1404 
1405 
1406 
1407 /*
1408  * Put a response on the ring on how the operation fared.
1409  */
1410 static void make_response(struct xen_blkif *blkif, u64 id,
1411 			  unsigned short op, int st)
1412 {
1413 	struct blkif_response  resp;
1414 	unsigned long     flags;
1415 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
1416 	int notify;
1417 
1418 	resp.id        = id;
1419 	resp.operation = op;
1420 	resp.status    = st;
1421 
1422 	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1423 	/* Place on the response ring for the relevant domain. */
1424 	switch (blkif->blk_protocol) {
1425 	case BLKIF_PROTOCOL_NATIVE:
1426 		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1427 		       &resp, sizeof(resp));
1428 		break;
1429 	case BLKIF_PROTOCOL_X86_32:
1430 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1431 		       &resp, sizeof(resp));
1432 		break;
1433 	case BLKIF_PROTOCOL_X86_64:
1434 		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1435 		       &resp, sizeof(resp));
1436 		break;
1437 	default:
1438 		BUG();
1439 	}
1440 	blk_rings->common.rsp_prod_pvt++;
1441 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1442 	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1443 	if (notify)
1444 		notify_remote_via_irq(blkif->irq);
1445 }
1446 
1447 static int __init xen_blkif_init(void)
1448 {
1449 	int rc = 0;
1450 
1451 	if (!xen_domain())
1452 		return -ENODEV;
1453 
1454 	rc = xen_blkif_interface_init();
1455 	if (rc)
1456 		goto failed_init;
1457 
1458 	rc = xen_blkif_xenbus_init();
1459 	if (rc)
1460 		goto failed_init;
1461 
1462  failed_init:
1463 	return rc;
1464 }
1465 
1466 module_init(xen_blkif_init);
1467 
1468 MODULE_LICENSE("Dual BSD/GPL");
1469 MODULE_ALIAS("xen-backend:vbd");
1470