1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36 
37 #define pr_fmt(fmt) "xen-blkback: " fmt
38 
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
45 
46 #include <xen/events.h>
47 #include <xen/page.h>
48 #include <xen/xen.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
53 #include "common.h"
54 
55 /*
56  * Maximum number of unused free pages to keep in the internal buffer.
57  * Setting this to a value too low will reduce memory used in each backend,
58  * but can have a performance penalty.
59  *
60  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61  * be set to a lower value that might degrade performance on some intensive
62  * IO workloads.
63  */
64 
65 static int max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
69 
70 /*
71  * Maximum number of grants to map persistently in blkback. For maximum
72  * performance this should be the total numbers of grants that can be used
73  * to fill the ring, but since this might become too high, specially with
74  * the use of indirect descriptors, we set it to a value that provides good
75  * performance without using too much memory.
76  *
77  * When the list of persistent grants is full we clean it up using a LRU
78  * algorithm.
79  */
80 
81 static int max_pgrants = 1056;
82 module_param_named(max_persistent_grants, max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84                  "Maximum number of grants to map persistently");
85 
86 /*
87  * How long a persistent grant is allowed to remain allocated without being in
88  * use. The time is in seconds, 0 means indefinitely long.
89  */
90 
91 static unsigned int pgrant_timeout = 60;
92 module_param_named(persistent_grant_unused_seconds, pgrant_timeout,
93 		   uint, 0644);
94 MODULE_PARM_DESC(persistent_grant_unused_seconds,
95 		 "Time in seconds an unused persistent grant is allowed to "
96 		 "remain allocated. Default is 60, 0 means unlimited.");
97 
98 /*
99  * Maximum number of rings/queues blkback supports, allow as many queues as there
100  * are CPUs if user has not specified a value.
101  */
102 unsigned int xenblk_max_queues;
103 module_param_named(max_queues, xenblk_max_queues, uint, 0644);
104 MODULE_PARM_DESC(max_queues,
105 		 "Maximum number of hardware queues per virtual disk." \
106 		 "By default it is the number of online CPUs.");
107 
108 /*
109  * Maximum order of pages to be used for the shared ring between front and
110  * backend, 4KB page granularity is used.
111  */
112 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
113 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
115 /*
116  * The LRU mechanism to clean the lists of persistent grants needs to
117  * be executed periodically. The time interval between consecutive executions
118  * of the purge mechanism is set in ms.
119  */
120 #define LRU_INTERVAL 100
121 
122 /*
123  * When the persistent grants list is full we will remove unused grants
124  * from the list. The percent number of grants to be removed at each LRU
125  * execution.
126  */
127 #define LRU_PERCENT_CLEAN 5
128 
129 /* Run-time switchable: /sys/module/blkback/parameters/ */
130 static unsigned int log_stats;
131 module_param(log_stats, int, 0644);
132 
133 #define BLKBACK_INVALID_HANDLE (~0)
134 
135 /* Number of free pages to remove on each call to gnttab_free_pages */
136 #define NUM_BATCH_FREE_PAGES 10
137 
138 static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
139 {
140 	return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
141 			HZ * pgrant_timeout);
142 }
143 
144 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
145 {
146 	unsigned long flags;
147 
148 	spin_lock_irqsave(&ring->free_pages_lock, flags);
149 	if (list_empty(&ring->free_pages)) {
150 		BUG_ON(ring->free_pages_num != 0);
151 		spin_unlock_irqrestore(&ring->free_pages_lock, flags);
152 		return gnttab_alloc_pages(1, page);
153 	}
154 	BUG_ON(ring->free_pages_num == 0);
155 	page[0] = list_first_entry(&ring->free_pages, struct page, lru);
156 	list_del(&page[0]->lru);
157 	ring->free_pages_num--;
158 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
159 
160 	return 0;
161 }
162 
163 static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
164                                   int num)
165 {
166 	unsigned long flags;
167 	int i;
168 
169 	spin_lock_irqsave(&ring->free_pages_lock, flags);
170 	for (i = 0; i < num; i++)
171 		list_add(&page[i]->lru, &ring->free_pages);
172 	ring->free_pages_num += num;
173 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
174 }
175 
176 static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
177 {
178 	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
179 	struct page *page[NUM_BATCH_FREE_PAGES];
180 	unsigned int num_pages = 0;
181 	unsigned long flags;
182 
183 	spin_lock_irqsave(&ring->free_pages_lock, flags);
184 	while (ring->free_pages_num > num) {
185 		BUG_ON(list_empty(&ring->free_pages));
186 		page[num_pages] = list_first_entry(&ring->free_pages,
187 		                                   struct page, lru);
188 		list_del(&page[num_pages]->lru);
189 		ring->free_pages_num--;
190 		if (++num_pages == NUM_BATCH_FREE_PAGES) {
191 			spin_unlock_irqrestore(&ring->free_pages_lock, flags);
192 			gnttab_free_pages(num_pages, page);
193 			spin_lock_irqsave(&ring->free_pages_lock, flags);
194 			num_pages = 0;
195 		}
196 	}
197 	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
198 	if (num_pages != 0)
199 		gnttab_free_pages(num_pages, page);
200 }
201 
202 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
203 
204 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
205 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
206 				struct blkif_request *req,
207 				struct pending_req *pending_req);
208 static void make_response(struct xen_blkif_ring *ring, u64 id,
209 			  unsigned short op, int st);
210 
211 #define foreach_grant_safe(pos, n, rbtree, node) \
212 	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
213 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
214 	     &(pos)->node != NULL; \
215 	     (pos) = container_of(n, typeof(*(pos)), node), \
216 	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
217 
218 
219 /*
220  * We don't need locking around the persistent grant helpers
221  * because blkback uses a single-thread for each backend, so we
222  * can be sure that this functions will never be called recursively.
223  *
224  * The only exception to that is put_persistent_grant, that can be called
225  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
226  * bit operations to modify the flags of a persistent grant and to count
227  * the number of used grants.
228  */
229 static int add_persistent_gnt(struct xen_blkif_ring *ring,
230 			       struct persistent_gnt *persistent_gnt)
231 {
232 	struct rb_node **new = NULL, *parent = NULL;
233 	struct persistent_gnt *this;
234 	struct xen_blkif *blkif = ring->blkif;
235 
236 	if (ring->persistent_gnt_c >= max_pgrants) {
237 		if (!blkif->vbd.overflow_max_grants)
238 			blkif->vbd.overflow_max_grants = 1;
239 		return -EBUSY;
240 	}
241 	/* Figure out where to put new node */
242 	new = &ring->persistent_gnts.rb_node;
243 	while (*new) {
244 		this = container_of(*new, struct persistent_gnt, node);
245 
246 		parent = *new;
247 		if (persistent_gnt->gnt < this->gnt)
248 			new = &((*new)->rb_left);
249 		else if (persistent_gnt->gnt > this->gnt)
250 			new = &((*new)->rb_right);
251 		else {
252 			pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
253 			return -EINVAL;
254 		}
255 	}
256 
257 	persistent_gnt->active = true;
258 	/* Add new node and rebalance tree. */
259 	rb_link_node(&(persistent_gnt->node), parent, new);
260 	rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
261 	ring->persistent_gnt_c++;
262 	atomic_inc(&ring->persistent_gnt_in_use);
263 	return 0;
264 }
265 
266 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
267 						 grant_ref_t gref)
268 {
269 	struct persistent_gnt *data;
270 	struct rb_node *node = NULL;
271 
272 	node = ring->persistent_gnts.rb_node;
273 	while (node) {
274 		data = container_of(node, struct persistent_gnt, node);
275 
276 		if (gref < data->gnt)
277 			node = node->rb_left;
278 		else if (gref > data->gnt)
279 			node = node->rb_right;
280 		else {
281 			if (data->active) {
282 				pr_alert_ratelimited("requesting a grant already in use\n");
283 				return NULL;
284 			}
285 			data->active = true;
286 			atomic_inc(&ring->persistent_gnt_in_use);
287 			return data;
288 		}
289 	}
290 	return NULL;
291 }
292 
293 static void put_persistent_gnt(struct xen_blkif_ring *ring,
294                                struct persistent_gnt *persistent_gnt)
295 {
296 	if (!persistent_gnt->active)
297 		pr_alert_ratelimited("freeing a grant already unused\n");
298 	persistent_gnt->last_used = jiffies;
299 	persistent_gnt->active = false;
300 	atomic_dec(&ring->persistent_gnt_in_use);
301 }
302 
303 static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
304                                  unsigned int num)
305 {
306 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
307 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
308 	struct persistent_gnt *persistent_gnt;
309 	struct rb_node *n;
310 	int segs_to_unmap = 0;
311 	struct gntab_unmap_queue_data unmap_data;
312 
313 	unmap_data.pages = pages;
314 	unmap_data.unmap_ops = unmap;
315 	unmap_data.kunmap_ops = NULL;
316 
317 	foreach_grant_safe(persistent_gnt, n, root, node) {
318 		BUG_ON(persistent_gnt->handle ==
319 			BLKBACK_INVALID_HANDLE);
320 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
321 			(unsigned long) pfn_to_kaddr(page_to_pfn(
322 				persistent_gnt->page)),
323 			GNTMAP_host_map,
324 			persistent_gnt->handle);
325 
326 		pages[segs_to_unmap] = persistent_gnt->page;
327 
328 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
329 			!rb_next(&persistent_gnt->node)) {
330 
331 			unmap_data.count = segs_to_unmap;
332 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
333 
334 			put_free_pages(ring, pages, segs_to_unmap);
335 			segs_to_unmap = 0;
336 		}
337 
338 		rb_erase(&persistent_gnt->node, root);
339 		kfree(persistent_gnt);
340 		num--;
341 	}
342 	BUG_ON(num != 0);
343 }
344 
345 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
346 {
347 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
348 	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
349 	struct persistent_gnt *persistent_gnt;
350 	int segs_to_unmap = 0;
351 	struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
352 	struct gntab_unmap_queue_data unmap_data;
353 
354 	unmap_data.pages = pages;
355 	unmap_data.unmap_ops = unmap;
356 	unmap_data.kunmap_ops = NULL;
357 
358 	while(!list_empty(&ring->persistent_purge_list)) {
359 		persistent_gnt = list_first_entry(&ring->persistent_purge_list,
360 		                                  struct persistent_gnt,
361 		                                  remove_node);
362 		list_del(&persistent_gnt->remove_node);
363 
364 		gnttab_set_unmap_op(&unmap[segs_to_unmap],
365 			vaddr(persistent_gnt->page),
366 			GNTMAP_host_map,
367 			persistent_gnt->handle);
368 
369 		pages[segs_to_unmap] = persistent_gnt->page;
370 
371 		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
372 			unmap_data.count = segs_to_unmap;
373 			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
374 			put_free_pages(ring, pages, segs_to_unmap);
375 			segs_to_unmap = 0;
376 		}
377 		kfree(persistent_gnt);
378 	}
379 	if (segs_to_unmap > 0) {
380 		unmap_data.count = segs_to_unmap;
381 		BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
382 		put_free_pages(ring, pages, segs_to_unmap);
383 	}
384 }
385 
386 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
387 {
388 	struct persistent_gnt *persistent_gnt;
389 	struct rb_node *n;
390 	unsigned int num_clean, total;
391 	bool scan_used = false;
392 	struct rb_root *root;
393 
394 	if (work_busy(&ring->persistent_purge_work)) {
395 		pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
396 		goto out;
397 	}
398 
399 	if (ring->persistent_gnt_c < max_pgrants ||
400 	    (ring->persistent_gnt_c == max_pgrants &&
401 	    !ring->blkif->vbd.overflow_max_grants)) {
402 		num_clean = 0;
403 	} else {
404 		num_clean = (max_pgrants / 100) * LRU_PERCENT_CLEAN;
405 		num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
406 		num_clean = min(ring->persistent_gnt_c, num_clean);
407 		pr_debug("Going to purge at least %u persistent grants\n",
408 			 num_clean);
409 	}
410 
411 	/*
412 	 * At this point, we can assure that there will be no calls
413          * to get_persistent_grant (because we are executing this code from
414          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
415          * which means that the number of currently used grants will go down,
416          * but never up, so we will always be able to remove the requested
417          * number of grants.
418 	 */
419 
420 	total = 0;
421 
422 	BUG_ON(!list_empty(&ring->persistent_purge_list));
423 	root = &ring->persistent_gnts;
424 purge_list:
425 	foreach_grant_safe(persistent_gnt, n, root, node) {
426 		BUG_ON(persistent_gnt->handle ==
427 			BLKBACK_INVALID_HANDLE);
428 
429 		if (persistent_gnt->active)
430 			continue;
431 		if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
432 			continue;
433 		if (scan_used && total >= num_clean)
434 			continue;
435 
436 		rb_erase(&persistent_gnt->node, root);
437 		list_add(&persistent_gnt->remove_node,
438 			 &ring->persistent_purge_list);
439 		total++;
440 	}
441 	/*
442 	 * Check whether we also need to start cleaning
443 	 * grants that were used since last purge in order to cope
444 	 * with the requested num
445 	 */
446 	if (!scan_used && total < num_clean) {
447 		pr_debug("Still missing %u purged frames\n", num_clean - total);
448 		scan_used = true;
449 		goto purge_list;
450 	}
451 
452 	if (total) {
453 		ring->persistent_gnt_c -= total;
454 		ring->blkif->vbd.overflow_max_grants = 0;
455 
456 		/* We can defer this work */
457 		schedule_work(&ring->persistent_purge_work);
458 		pr_debug("Purged %u/%u\n", num_clean, total);
459 	}
460 
461 out:
462 	return;
463 }
464 
465 /*
466  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
467  */
468 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
469 {
470 	struct pending_req *req = NULL;
471 	unsigned long flags;
472 
473 	spin_lock_irqsave(&ring->pending_free_lock, flags);
474 	if (!list_empty(&ring->pending_free)) {
475 		req = list_entry(ring->pending_free.next, struct pending_req,
476 				 free_list);
477 		list_del(&req->free_list);
478 	}
479 	spin_unlock_irqrestore(&ring->pending_free_lock, flags);
480 	return req;
481 }
482 
483 /*
484  * Return the 'pending_req' structure back to the freepool. We also
485  * wake up the thread if it was waiting for a free page.
486  */
487 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
488 {
489 	unsigned long flags;
490 	int was_empty;
491 
492 	spin_lock_irqsave(&ring->pending_free_lock, flags);
493 	was_empty = list_empty(&ring->pending_free);
494 	list_add(&req->free_list, &ring->pending_free);
495 	spin_unlock_irqrestore(&ring->pending_free_lock, flags);
496 	if (was_empty)
497 		wake_up(&ring->pending_free_wq);
498 }
499 
500 /*
501  * Routines for managing virtual block devices (vbds).
502  */
503 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
504 			     int operation)
505 {
506 	struct xen_vbd *vbd = &blkif->vbd;
507 	int rc = -EACCES;
508 
509 	if ((operation != REQ_OP_READ) && vbd->readonly)
510 		goto out;
511 
512 	if (likely(req->nr_sects)) {
513 		blkif_sector_t end = req->sector_number + req->nr_sects;
514 
515 		if (unlikely(end < req->sector_number))
516 			goto out;
517 		if (unlikely(end > vbd_sz(vbd)))
518 			goto out;
519 	}
520 
521 	req->dev  = vbd->pdevice;
522 	req->bdev = vbd->bdev;
523 	rc = 0;
524 
525  out:
526 	return rc;
527 }
528 
529 static void xen_vbd_resize(struct xen_blkif *blkif)
530 {
531 	struct xen_vbd *vbd = &blkif->vbd;
532 	struct xenbus_transaction xbt;
533 	int err;
534 	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
535 	unsigned long long new_size = vbd_sz(vbd);
536 
537 	pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
538 		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
539 	pr_info("VBD Resize: new size %llu\n", new_size);
540 	vbd->size = new_size;
541 again:
542 	err = xenbus_transaction_start(&xbt);
543 	if (err) {
544 		pr_warn("Error starting transaction\n");
545 		return;
546 	}
547 	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
548 			    (unsigned long long)vbd_sz(vbd));
549 	if (err) {
550 		pr_warn("Error writing new size\n");
551 		goto abort;
552 	}
553 	/*
554 	 * Write the current state; we will use this to synchronize
555 	 * the front-end. If the current state is "connected" the
556 	 * front-end will get the new size information online.
557 	 */
558 	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
559 	if (err) {
560 		pr_warn("Error writing the state\n");
561 		goto abort;
562 	}
563 
564 	err = xenbus_transaction_end(xbt, 0);
565 	if (err == -EAGAIN)
566 		goto again;
567 	if (err)
568 		pr_warn("Error ending transaction\n");
569 	return;
570 abort:
571 	xenbus_transaction_end(xbt, 1);
572 }
573 
574 /*
575  * Notification from the guest OS.
576  */
577 static void blkif_notify_work(struct xen_blkif_ring *ring)
578 {
579 	ring->waiting_reqs = 1;
580 	wake_up(&ring->wq);
581 }
582 
583 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
584 {
585 	blkif_notify_work(dev_id);
586 	return IRQ_HANDLED;
587 }
588 
589 /*
590  * SCHEDULER FUNCTIONS
591  */
592 
593 static void print_stats(struct xen_blkif_ring *ring)
594 {
595 	pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
596 		 "  |  ds %4llu | pg: %4u/%4d\n",
597 		 current->comm, ring->st_oo_req,
598 		 ring->st_rd_req, ring->st_wr_req,
599 		 ring->st_f_req, ring->st_ds_req,
600 		 ring->persistent_gnt_c, max_pgrants);
601 	ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
602 	ring->st_rd_req = 0;
603 	ring->st_wr_req = 0;
604 	ring->st_oo_req = 0;
605 	ring->st_ds_req = 0;
606 }
607 
608 int xen_blkif_schedule(void *arg)
609 {
610 	struct xen_blkif_ring *ring = arg;
611 	struct xen_blkif *blkif = ring->blkif;
612 	struct xen_vbd *vbd = &blkif->vbd;
613 	unsigned long timeout;
614 	int ret;
615 	bool do_eoi;
616 	unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
617 
618 	set_freezable();
619 	while (!kthread_should_stop()) {
620 		if (try_to_freeze())
621 			continue;
622 		if (unlikely(vbd->size != vbd_sz(vbd)))
623 			xen_vbd_resize(blkif);
624 
625 		timeout = msecs_to_jiffies(LRU_INTERVAL);
626 
627 		timeout = wait_event_interruptible_timeout(
628 			ring->wq,
629 			ring->waiting_reqs || kthread_should_stop(),
630 			timeout);
631 		if (timeout == 0)
632 			goto purge_gnt_list;
633 		timeout = wait_event_interruptible_timeout(
634 			ring->pending_free_wq,
635 			!list_empty(&ring->pending_free) ||
636 			kthread_should_stop(),
637 			timeout);
638 		if (timeout == 0)
639 			goto purge_gnt_list;
640 
641 		do_eoi = ring->waiting_reqs;
642 
643 		ring->waiting_reqs = 0;
644 		smp_mb(); /* clear flag *before* checking for work */
645 
646 		ret = do_block_io_op(ring, &eoi_flags);
647 		if (ret > 0)
648 			ring->waiting_reqs = 1;
649 		if (ret == -EACCES)
650 			wait_event_interruptible(ring->shutdown_wq,
651 						 kthread_should_stop());
652 
653 		if (do_eoi && !ring->waiting_reqs) {
654 			xen_irq_lateeoi(ring->irq, eoi_flags);
655 			eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
656 		}
657 
658 purge_gnt_list:
659 		if (blkif->vbd.feature_gnt_persistent &&
660 		    time_after(jiffies, ring->next_lru)) {
661 			purge_persistent_gnt(ring);
662 			ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
663 		}
664 
665 		/* Shrink the free pages pool if it is too large. */
666 		if (time_before(jiffies, blkif->buffer_squeeze_end))
667 			shrink_free_pagepool(ring, 0);
668 		else
669 			shrink_free_pagepool(ring, max_buffer_pages);
670 
671 		if (log_stats && time_after(jiffies, ring->st_print))
672 			print_stats(ring);
673 	}
674 
675 	/* Drain pending purge work */
676 	flush_work(&ring->persistent_purge_work);
677 
678 	if (log_stats)
679 		print_stats(ring);
680 
681 	ring->xenblkd = NULL;
682 
683 	return 0;
684 }
685 
686 /*
687  * Remove persistent grants and empty the pool of free pages
688  */
689 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
690 {
691 	/* Free all persistent grant pages */
692 	if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
693 		free_persistent_gnts(ring, &ring->persistent_gnts,
694 			ring->persistent_gnt_c);
695 
696 	BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
697 	ring->persistent_gnt_c = 0;
698 
699 	/* Since we are shutting down remove all pages from the buffer */
700 	shrink_free_pagepool(ring, 0 /* All */);
701 }
702 
703 static unsigned int xen_blkbk_unmap_prepare(
704 	struct xen_blkif_ring *ring,
705 	struct grant_page **pages,
706 	unsigned int num,
707 	struct gnttab_unmap_grant_ref *unmap_ops,
708 	struct page **unmap_pages)
709 {
710 	unsigned int i, invcount = 0;
711 
712 	for (i = 0; i < num; i++) {
713 		if (pages[i]->persistent_gnt != NULL) {
714 			put_persistent_gnt(ring, pages[i]->persistent_gnt);
715 			continue;
716 		}
717 		if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
718 			continue;
719 		unmap_pages[invcount] = pages[i]->page;
720 		gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
721 				    GNTMAP_host_map, pages[i]->handle);
722 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
723 		invcount++;
724 	}
725 
726 	return invcount;
727 }
728 
729 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
730 {
731 	struct pending_req *pending_req = (struct pending_req *)(data->data);
732 	struct xen_blkif_ring *ring = pending_req->ring;
733 	struct xen_blkif *blkif = ring->blkif;
734 
735 	/* BUG_ON used to reproduce existing behaviour,
736 	   but is this the best way to deal with this? */
737 	BUG_ON(result);
738 
739 	put_free_pages(ring, data->pages, data->count);
740 	make_response(ring, pending_req->id,
741 		      pending_req->operation, pending_req->status);
742 	free_req(ring, pending_req);
743 	/*
744 	 * Make sure the request is freed before releasing blkif,
745 	 * or there could be a race between free_req and the
746 	 * cleanup done in xen_blkif_free during shutdown.
747 	 *
748 	 * NB: The fact that we might try to wake up pending_free_wq
749 	 * before drain_complete (in case there's a drain going on)
750 	 * it's not a problem with our current implementation
751 	 * because we can assure there's no thread waiting on
752 	 * pending_free_wq if there's a drain going on, but it has
753 	 * to be taken into account if the current model is changed.
754 	 */
755 	if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
756 		complete(&blkif->drain_complete);
757 	}
758 	xen_blkif_put(blkif);
759 }
760 
761 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
762 {
763 	struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
764 	struct xen_blkif_ring *ring = req->ring;
765 	struct grant_page **pages = req->segments;
766 	unsigned int invcount;
767 
768 	invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
769 					   req->unmap, req->unmap_pages);
770 
771 	work->data = req;
772 	work->done = xen_blkbk_unmap_and_respond_callback;
773 	work->unmap_ops = req->unmap;
774 	work->kunmap_ops = NULL;
775 	work->pages = req->unmap_pages;
776 	work->count = invcount;
777 
778 	gnttab_unmap_refs_async(&req->gnttab_unmap_data);
779 }
780 
781 
782 /*
783  * Unmap the grant references.
784  *
785  * This could accumulate ops up to the batch size to reduce the number
786  * of hypercalls, but since this is only used in error paths there's
787  * no real need.
788  */
789 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
790                             struct grant_page *pages[],
791                             int num)
792 {
793 	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
794 	struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
795 	unsigned int invcount = 0;
796 	int ret;
797 
798 	while (num) {
799 		unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
800 
801 		invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
802 						   unmap, unmap_pages);
803 		if (invcount) {
804 			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
805 			BUG_ON(ret);
806 			put_free_pages(ring, unmap_pages, invcount);
807 		}
808 		pages += batch;
809 		num -= batch;
810 	}
811 }
812 
813 static int xen_blkbk_map(struct xen_blkif_ring *ring,
814 			 struct grant_page *pages[],
815 			 int num, bool ro)
816 {
817 	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
818 	struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
819 	struct persistent_gnt *persistent_gnt = NULL;
820 	phys_addr_t addr = 0;
821 	int i, seg_idx, new_map_idx;
822 	int segs_to_map = 0;
823 	int ret = 0;
824 	int last_map = 0, map_until = 0;
825 	int use_persistent_gnts;
826 	struct xen_blkif *blkif = ring->blkif;
827 
828 	use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
829 
830 	/*
831 	 * Fill out preq.nr_sects with proper amount of sectors, and setup
832 	 * assign map[..] with the PFN of the page in our domain with the
833 	 * corresponding grant reference for each page.
834 	 */
835 again:
836 	for (i = map_until; i < num; i++) {
837 		uint32_t flags;
838 
839 		if (use_persistent_gnts) {
840 			persistent_gnt = get_persistent_gnt(
841 				ring,
842 				pages[i]->gref);
843 		}
844 
845 		if (persistent_gnt) {
846 			/*
847 			 * We are using persistent grants and
848 			 * the grant is already mapped
849 			 */
850 			pages[i]->page = persistent_gnt->page;
851 			pages[i]->persistent_gnt = persistent_gnt;
852 		} else {
853 			if (get_free_page(ring, &pages[i]->page))
854 				goto out_of_memory;
855 			addr = vaddr(pages[i]->page);
856 			pages_to_gnt[segs_to_map] = pages[i]->page;
857 			pages[i]->persistent_gnt = NULL;
858 			flags = GNTMAP_host_map;
859 			if (!use_persistent_gnts && ro)
860 				flags |= GNTMAP_readonly;
861 			gnttab_set_map_op(&map[segs_to_map++], addr,
862 					  flags, pages[i]->gref,
863 					  blkif->domid);
864 		}
865 		map_until = i + 1;
866 		if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
867 			break;
868 	}
869 
870 	if (segs_to_map) {
871 		ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
872 		BUG_ON(ret);
873 	}
874 
875 	/*
876 	 * Now swizzle the MFN in our domain with the MFN from the other domain
877 	 * so that when we access vaddr(pending_req,i) it has the contents of
878 	 * the page from the other domain.
879 	 */
880 	for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
881 		if (!pages[seg_idx]->persistent_gnt) {
882 			/* This is a newly mapped grant */
883 			BUG_ON(new_map_idx >= segs_to_map);
884 			if (unlikely(map[new_map_idx].status != 0)) {
885 				pr_debug("invalid buffer -- could not remap it\n");
886 				put_free_pages(ring, &pages[seg_idx]->page, 1);
887 				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
888 				ret |= 1;
889 				goto next;
890 			}
891 			pages[seg_idx]->handle = map[new_map_idx].handle;
892 		} else {
893 			continue;
894 		}
895 		if (use_persistent_gnts &&
896 		    ring->persistent_gnt_c < max_pgrants) {
897 			/*
898 			 * We are using persistent grants, the grant is
899 			 * not mapped but we might have room for it.
900 			 */
901 			persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
902 				                 GFP_KERNEL);
903 			if (!persistent_gnt) {
904 				/*
905 				 * If we don't have enough memory to
906 				 * allocate the persistent_gnt struct
907 				 * map this grant non-persistenly
908 				 */
909 				goto next;
910 			}
911 			persistent_gnt->gnt = map[new_map_idx].ref;
912 			persistent_gnt->handle = map[new_map_idx].handle;
913 			persistent_gnt->page = pages[seg_idx]->page;
914 			if (add_persistent_gnt(ring,
915 			                       persistent_gnt)) {
916 				kfree(persistent_gnt);
917 				persistent_gnt = NULL;
918 				goto next;
919 			}
920 			pages[seg_idx]->persistent_gnt = persistent_gnt;
921 			pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
922 				 persistent_gnt->gnt, ring->persistent_gnt_c,
923 				 max_pgrants);
924 			goto next;
925 		}
926 		if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
927 			blkif->vbd.overflow_max_grants = 1;
928 			pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
929 			         blkif->domid, blkif->vbd.handle);
930 		}
931 		/*
932 		 * We could not map this grant persistently, so use it as
933 		 * a non-persistent grant.
934 		 */
935 next:
936 		new_map_idx++;
937 	}
938 	segs_to_map = 0;
939 	last_map = map_until;
940 	if (map_until != num)
941 		goto again;
942 
943 	return ret;
944 
945 out_of_memory:
946 	pr_alert("%s: out of memory\n", __func__);
947 	put_free_pages(ring, pages_to_gnt, segs_to_map);
948 	for (i = last_map; i < num; i++)
949 		pages[i]->handle = BLKBACK_INVALID_HANDLE;
950 	return -ENOMEM;
951 }
952 
953 static int xen_blkbk_map_seg(struct pending_req *pending_req)
954 {
955 	int rc;
956 
957 	rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
958 			   pending_req->nr_segs,
959 	                   (pending_req->operation != BLKIF_OP_READ));
960 
961 	return rc;
962 }
963 
964 static int xen_blkbk_parse_indirect(struct blkif_request *req,
965 				    struct pending_req *pending_req,
966 				    struct seg_buf seg[],
967 				    struct phys_req *preq)
968 {
969 	struct grant_page **pages = pending_req->indirect_pages;
970 	struct xen_blkif_ring *ring = pending_req->ring;
971 	int indirect_grefs, rc, n, nseg, i;
972 	struct blkif_request_segment *segments = NULL;
973 
974 	nseg = pending_req->nr_segs;
975 	indirect_grefs = INDIRECT_PAGES(nseg);
976 	BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
977 
978 	for (i = 0; i < indirect_grefs; i++)
979 		pages[i]->gref = req->u.indirect.indirect_grefs[i];
980 
981 	rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
982 	if (rc)
983 		goto unmap;
984 
985 	for (n = 0, i = 0; n < nseg; n++) {
986 		uint8_t first_sect, last_sect;
987 
988 		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
989 			/* Map indirect segments */
990 			if (segments)
991 				kunmap_atomic(segments);
992 			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
993 		}
994 		i = n % SEGS_PER_INDIRECT_FRAME;
995 
996 		pending_req->segments[n]->gref = segments[i].gref;
997 
998 		first_sect = READ_ONCE(segments[i].first_sect);
999 		last_sect = READ_ONCE(segments[i].last_sect);
1000 		if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
1001 			rc = -EINVAL;
1002 			goto unmap;
1003 		}
1004 
1005 		seg[n].nsec = last_sect - first_sect + 1;
1006 		seg[n].offset = first_sect << 9;
1007 		preq->nr_sects += seg[n].nsec;
1008 	}
1009 
1010 unmap:
1011 	if (segments)
1012 		kunmap_atomic(segments);
1013 	xen_blkbk_unmap(ring, pages, indirect_grefs);
1014 	return rc;
1015 }
1016 
1017 static int dispatch_discard_io(struct xen_blkif_ring *ring,
1018 				struct blkif_request *req)
1019 {
1020 	int err = 0;
1021 	int status = BLKIF_RSP_OKAY;
1022 	struct xen_blkif *blkif = ring->blkif;
1023 	struct block_device *bdev = blkif->vbd.bdev;
1024 	unsigned long secure;
1025 	struct phys_req preq;
1026 
1027 	xen_blkif_get(blkif);
1028 
1029 	preq.sector_number = req->u.discard.sector_number;
1030 	preq.nr_sects      = req->u.discard.nr_sectors;
1031 
1032 	err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
1033 	if (err) {
1034 		pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1035 			preq.sector_number,
1036 			preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1037 		goto fail_response;
1038 	}
1039 	ring->st_ds_req++;
1040 
1041 	secure = (blkif->vbd.discard_secure &&
1042 		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1043 		 BLKDEV_DISCARD_SECURE : 0;
1044 
1045 	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1046 				   req->u.discard.nr_sectors,
1047 				   GFP_KERNEL, secure);
1048 fail_response:
1049 	if (err == -EOPNOTSUPP) {
1050 		pr_debug("discard op failed, not supported\n");
1051 		status = BLKIF_RSP_EOPNOTSUPP;
1052 	} else if (err)
1053 		status = BLKIF_RSP_ERROR;
1054 
1055 	make_response(ring, req->u.discard.id, req->operation, status);
1056 	xen_blkif_put(blkif);
1057 	return err;
1058 }
1059 
1060 static int dispatch_other_io(struct xen_blkif_ring *ring,
1061 			     struct blkif_request *req,
1062 			     struct pending_req *pending_req)
1063 {
1064 	free_req(ring, pending_req);
1065 	make_response(ring, req->u.other.id, req->operation,
1066 		      BLKIF_RSP_EOPNOTSUPP);
1067 	return -EIO;
1068 }
1069 
1070 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1071 {
1072 	struct xen_blkif *blkif = ring->blkif;
1073 
1074 	atomic_set(&blkif->drain, 1);
1075 	do {
1076 		if (atomic_read(&ring->inflight) == 0)
1077 			break;
1078 		wait_for_completion_interruptible_timeout(
1079 				&blkif->drain_complete, HZ);
1080 
1081 		if (!atomic_read(&blkif->drain))
1082 			break;
1083 	} while (!kthread_should_stop());
1084 	atomic_set(&blkif->drain, 0);
1085 }
1086 
1087 static void __end_block_io_op(struct pending_req *pending_req,
1088 		blk_status_t error)
1089 {
1090 	/* An error fails the entire request. */
1091 	if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1092 	    error == BLK_STS_NOTSUPP) {
1093 		pr_debug("flush diskcache op failed, not supported\n");
1094 		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1095 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1096 	} else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1097 		   error == BLK_STS_NOTSUPP) {
1098 		pr_debug("write barrier op failed, not supported\n");
1099 		xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1100 		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1101 	} else if (error) {
1102 		pr_debug("Buffer not up-to-date at end of operation,"
1103 			 " error=%d\n", error);
1104 		pending_req->status = BLKIF_RSP_ERROR;
1105 	}
1106 
1107 	/*
1108 	 * If all of the bio's have completed it is time to unmap
1109 	 * the grant references associated with 'request' and provide
1110 	 * the proper response on the ring.
1111 	 */
1112 	if (atomic_dec_and_test(&pending_req->pendcnt))
1113 		xen_blkbk_unmap_and_respond(pending_req);
1114 }
1115 
1116 /*
1117  * bio callback.
1118  */
1119 static void end_block_io_op(struct bio *bio)
1120 {
1121 	__end_block_io_op(bio->bi_private, bio->bi_status);
1122 	bio_put(bio);
1123 }
1124 
1125 
1126 
1127 /*
1128  * Function to copy the from the ring buffer the 'struct blkif_request'
1129  * (which has the sectors we want, number of them, grant references, etc),
1130  * and transmute  it to the block API to hand it over to the proper block disk.
1131  */
1132 static int
1133 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1134 {
1135 	union blkif_back_rings *blk_rings = &ring->blk_rings;
1136 	struct blkif_request req;
1137 	struct pending_req *pending_req;
1138 	RING_IDX rc, rp;
1139 	int more_to_do = 0;
1140 
1141 	rc = blk_rings->common.req_cons;
1142 	rp = blk_rings->common.sring->req_prod;
1143 	rmb(); /* Ensure we see queued requests up to 'rp'. */
1144 
1145 	if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1146 		rc = blk_rings->common.rsp_prod_pvt;
1147 		pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1148 			rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1149 		return -EACCES;
1150 	}
1151 	while (rc != rp) {
1152 
1153 		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1154 			break;
1155 
1156 		/* We've seen a request, so clear spurious eoi flag. */
1157 		*eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
1158 
1159 		if (kthread_should_stop()) {
1160 			more_to_do = 1;
1161 			break;
1162 		}
1163 
1164 		pending_req = alloc_req(ring);
1165 		if (NULL == pending_req) {
1166 			ring->st_oo_req++;
1167 			more_to_do = 1;
1168 			break;
1169 		}
1170 
1171 		switch (ring->blkif->blk_protocol) {
1172 		case BLKIF_PROTOCOL_NATIVE:
1173 			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1174 			break;
1175 		case BLKIF_PROTOCOL_X86_32:
1176 			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1177 			break;
1178 		case BLKIF_PROTOCOL_X86_64:
1179 			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1180 			break;
1181 		default:
1182 			BUG();
1183 		}
1184 		blk_rings->common.req_cons = ++rc; /* before make_response() */
1185 
1186 		/* Apply all sanity checks to /private copy/ of request. */
1187 		barrier();
1188 
1189 		switch (req.operation) {
1190 		case BLKIF_OP_READ:
1191 		case BLKIF_OP_WRITE:
1192 		case BLKIF_OP_WRITE_BARRIER:
1193 		case BLKIF_OP_FLUSH_DISKCACHE:
1194 		case BLKIF_OP_INDIRECT:
1195 			if (dispatch_rw_block_io(ring, &req, pending_req))
1196 				goto done;
1197 			break;
1198 		case BLKIF_OP_DISCARD:
1199 			free_req(ring, pending_req);
1200 			if (dispatch_discard_io(ring, &req))
1201 				goto done;
1202 			break;
1203 		default:
1204 			if (dispatch_other_io(ring, &req, pending_req))
1205 				goto done;
1206 			break;
1207 		}
1208 
1209 		/* Yield point for this unbounded loop. */
1210 		cond_resched();
1211 	}
1212 done:
1213 	return more_to_do;
1214 }
1215 
1216 static int
1217 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1218 {
1219 	union blkif_back_rings *blk_rings = &ring->blk_rings;
1220 	int more_to_do;
1221 
1222 	do {
1223 		more_to_do = __do_block_io_op(ring, eoi_flags);
1224 		if (more_to_do)
1225 			break;
1226 
1227 		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1228 	} while (more_to_do);
1229 
1230 	return more_to_do;
1231 }
1232 /*
1233  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1234  * and call the 'submit_bio' to pass it to the underlying storage.
1235  */
1236 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1237 				struct blkif_request *req,
1238 				struct pending_req *pending_req)
1239 {
1240 	struct phys_req preq;
1241 	struct seg_buf *seg = pending_req->seg;
1242 	unsigned int nseg;
1243 	struct bio *bio = NULL;
1244 	struct bio **biolist = pending_req->biolist;
1245 	int i, nbio = 0;
1246 	int operation;
1247 	int operation_flags = 0;
1248 	struct blk_plug plug;
1249 	bool drain = false;
1250 	struct grant_page **pages = pending_req->segments;
1251 	unsigned short req_operation;
1252 
1253 	req_operation = req->operation == BLKIF_OP_INDIRECT ?
1254 			req->u.indirect.indirect_op : req->operation;
1255 
1256 	if ((req->operation == BLKIF_OP_INDIRECT) &&
1257 	    (req_operation != BLKIF_OP_READ) &&
1258 	    (req_operation != BLKIF_OP_WRITE)) {
1259 		pr_debug("Invalid indirect operation (%u)\n", req_operation);
1260 		goto fail_response;
1261 	}
1262 
1263 	switch (req_operation) {
1264 	case BLKIF_OP_READ:
1265 		ring->st_rd_req++;
1266 		operation = REQ_OP_READ;
1267 		break;
1268 	case BLKIF_OP_WRITE:
1269 		ring->st_wr_req++;
1270 		operation = REQ_OP_WRITE;
1271 		operation_flags = REQ_SYNC | REQ_IDLE;
1272 		break;
1273 	case BLKIF_OP_WRITE_BARRIER:
1274 		drain = true;
1275 		fallthrough;
1276 	case BLKIF_OP_FLUSH_DISKCACHE:
1277 		ring->st_f_req++;
1278 		operation = REQ_OP_WRITE;
1279 		operation_flags = REQ_PREFLUSH;
1280 		break;
1281 	default:
1282 		operation = 0; /* make gcc happy */
1283 		goto fail_response;
1284 		break;
1285 	}
1286 
1287 	/* Check that the number of segments is sane. */
1288 	nseg = req->operation == BLKIF_OP_INDIRECT ?
1289 	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
1290 
1291 	if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1292 	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1293 		     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1294 	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1295 		     (nseg > MAX_INDIRECT_SEGMENTS))) {
1296 		pr_debug("Bad number of segments in request (%d)\n", nseg);
1297 		/* Haven't submitted any bio's yet. */
1298 		goto fail_response;
1299 	}
1300 
1301 	preq.nr_sects      = 0;
1302 
1303 	pending_req->ring      = ring;
1304 	pending_req->id        = req->u.rw.id;
1305 	pending_req->operation = req_operation;
1306 	pending_req->status    = BLKIF_RSP_OKAY;
1307 	pending_req->nr_segs   = nseg;
1308 
1309 	if (req->operation != BLKIF_OP_INDIRECT) {
1310 		preq.dev               = req->u.rw.handle;
1311 		preq.sector_number     = req->u.rw.sector_number;
1312 		for (i = 0; i < nseg; i++) {
1313 			pages[i]->gref = req->u.rw.seg[i].gref;
1314 			seg[i].nsec = req->u.rw.seg[i].last_sect -
1315 				req->u.rw.seg[i].first_sect + 1;
1316 			seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1317 			if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1318 			    (req->u.rw.seg[i].last_sect <
1319 			     req->u.rw.seg[i].first_sect))
1320 				goto fail_response;
1321 			preq.nr_sects += seg[i].nsec;
1322 		}
1323 	} else {
1324 		preq.dev               = req->u.indirect.handle;
1325 		preq.sector_number     = req->u.indirect.sector_number;
1326 		if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1327 			goto fail_response;
1328 	}
1329 
1330 	if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1331 		pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1332 			 operation == REQ_OP_READ ? "read" : "write",
1333 			 preq.sector_number,
1334 			 preq.sector_number + preq.nr_sects,
1335 			 ring->blkif->vbd.pdevice);
1336 		goto fail_response;
1337 	}
1338 
1339 	/*
1340 	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1341 	 * is set there.
1342 	 */
1343 	for (i = 0; i < nseg; i++) {
1344 		if (((int)preq.sector_number|(int)seg[i].nsec) &
1345 		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1346 			pr_debug("Misaligned I/O request from domain %d\n",
1347 				 ring->blkif->domid);
1348 			goto fail_response;
1349 		}
1350 	}
1351 
1352 	/* Wait on all outstanding I/O's and once that has been completed
1353 	 * issue the flush.
1354 	 */
1355 	if (drain)
1356 		xen_blk_drain_io(pending_req->ring);
1357 
1358 	/*
1359 	 * If we have failed at this point, we need to undo the M2P override,
1360 	 * set gnttab_set_unmap_op on all of the grant references and perform
1361 	 * the hypercall to unmap the grants - that is all done in
1362 	 * xen_blkbk_unmap.
1363 	 */
1364 	if (xen_blkbk_map_seg(pending_req))
1365 		goto fail_flush;
1366 
1367 	/*
1368 	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1369 	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1370 	 */
1371 	xen_blkif_get(ring->blkif);
1372 	atomic_inc(&ring->inflight);
1373 
1374 	for (i = 0; i < nseg; i++) {
1375 		while ((bio == NULL) ||
1376 		       (bio_add_page(bio,
1377 				     pages[i]->page,
1378 				     seg[i].nsec << 9,
1379 				     seg[i].offset) == 0)) {
1380 
1381 			int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1382 			bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1383 			if (unlikely(bio == NULL))
1384 				goto fail_put_bio;
1385 
1386 			biolist[nbio++] = bio;
1387 			bio_set_dev(bio, preq.bdev);
1388 			bio->bi_private = pending_req;
1389 			bio->bi_end_io  = end_block_io_op;
1390 			bio->bi_iter.bi_sector  = preq.sector_number;
1391 			bio_set_op_attrs(bio, operation, operation_flags);
1392 		}
1393 
1394 		preq.sector_number += seg[i].nsec;
1395 	}
1396 
1397 	/* This will be hit if the operation was a flush or discard. */
1398 	if (!bio) {
1399 		BUG_ON(operation_flags != REQ_PREFLUSH);
1400 
1401 		bio = bio_alloc(GFP_KERNEL, 0);
1402 		if (unlikely(bio == NULL))
1403 			goto fail_put_bio;
1404 
1405 		biolist[nbio++] = bio;
1406 		bio_set_dev(bio, preq.bdev);
1407 		bio->bi_private = pending_req;
1408 		bio->bi_end_io  = end_block_io_op;
1409 		bio_set_op_attrs(bio, operation, operation_flags);
1410 	}
1411 
1412 	atomic_set(&pending_req->pendcnt, nbio);
1413 	blk_start_plug(&plug);
1414 
1415 	for (i = 0; i < nbio; i++)
1416 		submit_bio(biolist[i]);
1417 
1418 	/* Let the I/Os go.. */
1419 	blk_finish_plug(&plug);
1420 
1421 	if (operation == REQ_OP_READ)
1422 		ring->st_rd_sect += preq.nr_sects;
1423 	else if (operation == REQ_OP_WRITE)
1424 		ring->st_wr_sect += preq.nr_sects;
1425 
1426 	return 0;
1427 
1428  fail_flush:
1429 	xen_blkbk_unmap(ring, pending_req->segments,
1430 	                pending_req->nr_segs);
1431  fail_response:
1432 	/* Haven't submitted any bio's yet. */
1433 	make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1434 	free_req(ring, pending_req);
1435 	msleep(1); /* back off a bit */
1436 	return -EIO;
1437 
1438  fail_put_bio:
1439 	for (i = 0; i < nbio; i++)
1440 		bio_put(biolist[i]);
1441 	atomic_set(&pending_req->pendcnt, 1);
1442 	__end_block_io_op(pending_req, BLK_STS_RESOURCE);
1443 	msleep(1); /* back off a bit */
1444 	return -EIO;
1445 }
1446 
1447 
1448 
1449 /*
1450  * Put a response on the ring on how the operation fared.
1451  */
1452 static void make_response(struct xen_blkif_ring *ring, u64 id,
1453 			  unsigned short op, int st)
1454 {
1455 	struct blkif_response *resp;
1456 	unsigned long     flags;
1457 	union blkif_back_rings *blk_rings;
1458 	int notify;
1459 
1460 	spin_lock_irqsave(&ring->blk_ring_lock, flags);
1461 	blk_rings = &ring->blk_rings;
1462 	/* Place on the response ring for the relevant domain. */
1463 	switch (ring->blkif->blk_protocol) {
1464 	case BLKIF_PROTOCOL_NATIVE:
1465 		resp = RING_GET_RESPONSE(&blk_rings->native,
1466 					 blk_rings->native.rsp_prod_pvt);
1467 		break;
1468 	case BLKIF_PROTOCOL_X86_32:
1469 		resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1470 					 blk_rings->x86_32.rsp_prod_pvt);
1471 		break;
1472 	case BLKIF_PROTOCOL_X86_64:
1473 		resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1474 					 blk_rings->x86_64.rsp_prod_pvt);
1475 		break;
1476 	default:
1477 		BUG();
1478 	}
1479 
1480 	resp->id        = id;
1481 	resp->operation = op;
1482 	resp->status    = st;
1483 
1484 	blk_rings->common.rsp_prod_pvt++;
1485 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1486 	spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1487 	if (notify)
1488 		notify_remote_via_irq(ring->irq);
1489 }
1490 
1491 static int __init xen_blkif_init(void)
1492 {
1493 	int rc = 0;
1494 
1495 	if (!xen_domain())
1496 		return -ENODEV;
1497 
1498 	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1499 		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1500 			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1501 		xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1502 	}
1503 
1504 	if (xenblk_max_queues == 0)
1505 		xenblk_max_queues = num_online_cpus();
1506 
1507 	rc = xen_blkif_interface_init();
1508 	if (rc)
1509 		goto failed_init;
1510 
1511 	rc = xen_blkif_xenbus_init();
1512 	if (rc)
1513 		goto failed_init;
1514 
1515  failed_init:
1516 	return rc;
1517 }
1518 
1519 module_init(xen_blkif_init);
1520 
1521 static void __exit xen_blkif_fini(void)
1522 {
1523 	xen_blkif_xenbus_fini();
1524 	xen_blkif_interface_fini();
1525 }
1526 
1527 module_exit(xen_blkif_fini);
1528 
1529 MODULE_LICENSE("Dual BSD/GPL");
1530 MODULE_ALIAS("xen-backend:vbd");
1531