xref: /openbmc/linux/drivers/xen/grant-table.c (revision 7ec6b431)
1 /******************************************************************************
2  * grant_table.c
3  *
4  * Granting foreign access to our memory reservation.
5  *
6  * Copyright (c) 2005-2006, Christopher Clark
7  * Copyright (c) 2004-2005, K A Fraser
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version 2
11  * as published by the Free Software Foundation; or, when distributed
12  * separately from the Linux kernel or incorporated into other
13  * software packages, subject to the following license:
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a copy
16  * of this source file (the "Software"), to deal in the Software without
17  * restriction, including without limitation the rights to use, copy, modify,
18  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19  * and to permit persons to whom the Software is furnished to do so, subject to
20  * the following conditions:
21  *
22  * The above copyright notice and this permission notice shall be included in
23  * all copies or substantial portions of the Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31  * IN THE SOFTWARE.
32  */
33 
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35 
36 #include <linux/memblock.h>
37 #include <linux/sched.h>
38 #include <linux/mm.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/uaccess.h>
42 #include <linux/io.h>
43 #include <linux/delay.h>
44 #include <linux/hardirq.h>
45 #include <linux/workqueue.h>
46 #include <linux/ratelimit.h>
47 #include <linux/moduleparam.h>
48 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
49 #include <linux/dma-mapping.h>
50 #endif
51 
52 #include <xen/xen.h>
53 #include <xen/interface/xen.h>
54 #include <xen/page.h>
55 #include <xen/grant_table.h>
56 #include <xen/interface/memory.h>
57 #include <xen/hvc-console.h>
58 #include <xen/swiotlb-xen.h>
59 #include <xen/balloon.h>
60 #ifdef CONFIG_X86
61 #include <asm/xen/cpuid.h>
62 #endif
63 #include <xen/mem-reservation.h>
64 #include <asm/xen/hypercall.h>
65 #include <asm/xen/interface.h>
66 
67 #include <asm/pgtable.h>
68 #include <asm/sync_bitops.h>
69 
70 /* External tools reserve first few grant table entries. */
71 #define NR_RESERVED_ENTRIES 8
72 #define GNTTAB_LIST_END 0xffffffff
73 
74 static grant_ref_t **gnttab_list;
75 static unsigned int nr_grant_frames;
76 static int gnttab_free_count;
77 static grant_ref_t gnttab_free_head;
78 static DEFINE_SPINLOCK(gnttab_list_lock);
79 struct grant_frames xen_auto_xlat_grant_frames;
80 static unsigned int xen_gnttab_version;
81 module_param_named(version, xen_gnttab_version, uint, 0);
82 
83 static union {
84 	struct grant_entry_v1 *v1;
85 	union grant_entry_v2 *v2;
86 	void *addr;
87 } gnttab_shared;
88 
89 /*This is a structure of function pointers for grant table*/
90 struct gnttab_ops {
91 	/*
92 	 * Version of the grant interface.
93 	 */
94 	unsigned int version;
95 	/*
96 	 * Grant refs per grant frame.
97 	 */
98 	unsigned int grefs_per_grant_frame;
99 	/*
100 	 * Mapping a list of frames for storing grant entries. Frames parameter
101 	 * is used to store grant table address when grant table being setup,
102 	 * nr_gframes is the number of frames to map grant table. Returning
103 	 * GNTST_okay means success and negative value means failure.
104 	 */
105 	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
106 	/*
107 	 * Release a list of frames which are mapped in map_frames for grant
108 	 * entry status.
109 	 */
110 	void (*unmap_frames)(void);
111 	/*
112 	 * Introducing a valid entry into the grant table, granting the frame of
113 	 * this grant entry to domain for accessing or transfering. Ref
114 	 * parameter is reference of this introduced grant entry, domid is id of
115 	 * granted domain, frame is the page frame to be granted, and flags is
116 	 * status of the grant entry to be updated.
117 	 */
118 	void (*update_entry)(grant_ref_t ref, domid_t domid,
119 			     unsigned long frame, unsigned flags);
120 	/*
121 	 * Stop granting a grant entry to domain for accessing. Ref parameter is
122 	 * reference of a grant entry whose grant access will be stopped,
123 	 * readonly is not in use in this function. If the grant entry is
124 	 * currently mapped for reading or writing, just return failure(==0)
125 	 * directly and don't tear down the grant access. Otherwise, stop grant
126 	 * access for this entry and return success(==1).
127 	 */
128 	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
129 	/*
130 	 * Stop granting a grant entry to domain for transfer. Ref parameter is
131 	 * reference of a grant entry whose grant transfer will be stopped. If
132 	 * tranfer has not started, just reclaim the grant entry and return
133 	 * failure(==0). Otherwise, wait for the transfer to complete and then
134 	 * return the frame.
135 	 */
136 	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
137 	/*
138 	 * Query the status of a grant entry. Ref parameter is reference of
139 	 * queried grant entry, return value is the status of queried entry.
140 	 * Detailed status(writing/reading) can be gotten from the return value
141 	 * by bit operations.
142 	 */
143 	int (*query_foreign_access)(grant_ref_t ref);
144 };
145 
146 struct unmap_refs_callback_data {
147 	struct completion completion;
148 	int result;
149 };
150 
151 static const struct gnttab_ops *gnttab_interface;
152 
153 /* This reflects status of grant entries, so act as a global value. */
154 static grant_status_t *grstatus;
155 
156 static struct gnttab_free_callback *gnttab_free_callback_list;
157 
158 static int gnttab_expand(unsigned int req_entries);
159 
160 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
161 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
162 
163 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
164 {
165 	return &gnttab_list[(entry) / RPP][(entry) % RPP];
166 }
167 /* This can be used as an l-value */
168 #define gnttab_entry(entry) (*__gnttab_entry(entry))
169 
170 static int get_free_entries(unsigned count)
171 {
172 	unsigned long flags;
173 	int ref, rc = 0;
174 	grant_ref_t head;
175 
176 	spin_lock_irqsave(&gnttab_list_lock, flags);
177 
178 	if ((gnttab_free_count < count) &&
179 	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
180 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
181 		return rc;
182 	}
183 
184 	ref = head = gnttab_free_head;
185 	gnttab_free_count -= count;
186 	while (count-- > 1)
187 		head = gnttab_entry(head);
188 	gnttab_free_head = gnttab_entry(head);
189 	gnttab_entry(head) = GNTTAB_LIST_END;
190 
191 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
192 
193 	return ref;
194 }
195 
196 static void do_free_callbacks(void)
197 {
198 	struct gnttab_free_callback *callback, *next;
199 
200 	callback = gnttab_free_callback_list;
201 	gnttab_free_callback_list = NULL;
202 
203 	while (callback != NULL) {
204 		next = callback->next;
205 		if (gnttab_free_count >= callback->count) {
206 			callback->next = NULL;
207 			callback->fn(callback->arg);
208 		} else {
209 			callback->next = gnttab_free_callback_list;
210 			gnttab_free_callback_list = callback;
211 		}
212 		callback = next;
213 	}
214 }
215 
216 static inline void check_free_callbacks(void)
217 {
218 	if (unlikely(gnttab_free_callback_list))
219 		do_free_callbacks();
220 }
221 
222 static void put_free_entry(grant_ref_t ref)
223 {
224 	unsigned long flags;
225 	spin_lock_irqsave(&gnttab_list_lock, flags);
226 	gnttab_entry(ref) = gnttab_free_head;
227 	gnttab_free_head = ref;
228 	gnttab_free_count++;
229 	check_free_callbacks();
230 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
231 }
232 
233 /*
234  * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
235  * Introducing a valid entry into the grant table:
236  *  1. Write ent->domid.
237  *  2. Write ent->frame:
238  *      GTF_permit_access:   Frame to which access is permitted.
239  *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
240  *                           frame, or zero if none.
241  *  3. Write memory barrier (WMB).
242  *  4. Write ent->flags, inc. valid type.
243  */
244 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
245 				   unsigned long frame, unsigned flags)
246 {
247 	gnttab_shared.v1[ref].domid = domid;
248 	gnttab_shared.v1[ref].frame = frame;
249 	wmb();
250 	gnttab_shared.v1[ref].flags = flags;
251 }
252 
253 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
254 				   unsigned long frame, unsigned int flags)
255 {
256 	gnttab_shared.v2[ref].hdr.domid = domid;
257 	gnttab_shared.v2[ref].full_page.frame = frame;
258 	wmb();	/* Hypervisor concurrent accesses. */
259 	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
260 }
261 
262 /*
263  * Public grant-issuing interface functions
264  */
265 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
266 				     unsigned long frame, int readonly)
267 {
268 	gnttab_interface->update_entry(ref, domid, frame,
269 			   GTF_permit_access | (readonly ? GTF_readonly : 0));
270 }
271 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
272 
273 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
274 				int readonly)
275 {
276 	int ref;
277 
278 	ref = get_free_entries(1);
279 	if (unlikely(ref < 0))
280 		return -ENOSPC;
281 
282 	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
283 
284 	return ref;
285 }
286 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
287 
288 static int gnttab_query_foreign_access_v1(grant_ref_t ref)
289 {
290 	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
291 }
292 
293 static int gnttab_query_foreign_access_v2(grant_ref_t ref)
294 {
295 	return grstatus[ref] & (GTF_reading|GTF_writing);
296 }
297 
298 int gnttab_query_foreign_access(grant_ref_t ref)
299 {
300 	return gnttab_interface->query_foreign_access(ref);
301 }
302 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
303 
304 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
305 {
306 	u16 flags, nflags;
307 	u16 *pflags;
308 
309 	pflags = &gnttab_shared.v1[ref].flags;
310 	nflags = *pflags;
311 	do {
312 		flags = nflags;
313 		if (flags & (GTF_reading|GTF_writing))
314 			return 0;
315 	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
316 
317 	return 1;
318 }
319 
320 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
321 {
322 	gnttab_shared.v2[ref].hdr.flags = 0;
323 	mb();	/* Concurrent access by hypervisor. */
324 	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
325 		return 0;
326 	} else {
327 		/*
328 		 * The read of grstatus needs to have acquire semantics.
329 		 *  On x86, reads already have that, and we just need to
330 		 * protect against compiler reorderings.
331 		 * On other architectures we may need a full barrier.
332 		 */
333 #ifdef CONFIG_X86
334 		barrier();
335 #else
336 		mb();
337 #endif
338 	}
339 
340 	return 1;
341 }
342 
343 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
344 {
345 	return gnttab_interface->end_foreign_access_ref(ref, readonly);
346 }
347 
348 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
349 {
350 	if (_gnttab_end_foreign_access_ref(ref, readonly))
351 		return 1;
352 	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
353 	return 0;
354 }
355 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
356 
357 struct deferred_entry {
358 	struct list_head list;
359 	grant_ref_t ref;
360 	bool ro;
361 	uint16_t warn_delay;
362 	struct page *page;
363 };
364 static LIST_HEAD(deferred_list);
365 static void gnttab_handle_deferred(struct timer_list *);
366 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
367 
368 static void gnttab_handle_deferred(struct timer_list *unused)
369 {
370 	unsigned int nr = 10;
371 	struct deferred_entry *first = NULL;
372 	unsigned long flags;
373 
374 	spin_lock_irqsave(&gnttab_list_lock, flags);
375 	while (nr--) {
376 		struct deferred_entry *entry
377 			= list_first_entry(&deferred_list,
378 					   struct deferred_entry, list);
379 
380 		if (entry == first)
381 			break;
382 		list_del(&entry->list);
383 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
384 		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
385 			put_free_entry(entry->ref);
386 			if (entry->page) {
387 				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
388 					 entry->ref, page_to_pfn(entry->page));
389 				put_page(entry->page);
390 			} else
391 				pr_info("freeing g.e. %#x\n", entry->ref);
392 			kfree(entry);
393 			entry = NULL;
394 		} else {
395 			if (!--entry->warn_delay)
396 				pr_info("g.e. %#x still pending\n", entry->ref);
397 			if (!first)
398 				first = entry;
399 		}
400 		spin_lock_irqsave(&gnttab_list_lock, flags);
401 		if (entry)
402 			list_add_tail(&entry->list, &deferred_list);
403 		else if (list_empty(&deferred_list))
404 			break;
405 	}
406 	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
407 		deferred_timer.expires = jiffies + HZ;
408 		add_timer(&deferred_timer);
409 	}
410 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
411 }
412 
413 static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
414 				struct page *page)
415 {
416 	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
417 	const char *what = KERN_WARNING "leaking";
418 
419 	if (entry) {
420 		unsigned long flags;
421 
422 		entry->ref = ref;
423 		entry->ro = readonly;
424 		entry->page = page;
425 		entry->warn_delay = 60;
426 		spin_lock_irqsave(&gnttab_list_lock, flags);
427 		list_add_tail(&entry->list, &deferred_list);
428 		if (!timer_pending(&deferred_timer)) {
429 			deferred_timer.expires = jiffies + HZ;
430 			add_timer(&deferred_timer);
431 		}
432 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
433 		what = KERN_DEBUG "deferring";
434 	}
435 	printk("%s g.e. %#x (pfn %#lx)\n",
436 	       what, ref, page ? page_to_pfn(page) : -1);
437 }
438 
439 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
440 			       unsigned long page)
441 {
442 	if (gnttab_end_foreign_access_ref(ref, readonly)) {
443 		put_free_entry(ref);
444 		if (page != 0)
445 			put_page(virt_to_page(page));
446 	} else
447 		gnttab_add_deferred(ref, readonly,
448 				    page ? virt_to_page(page) : NULL);
449 }
450 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
451 
452 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
453 {
454 	int ref;
455 
456 	ref = get_free_entries(1);
457 	if (unlikely(ref < 0))
458 		return -ENOSPC;
459 	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
460 
461 	return ref;
462 }
463 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
464 
465 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
466 				       unsigned long pfn)
467 {
468 	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
469 }
470 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
471 
472 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
473 {
474 	unsigned long frame;
475 	u16           flags;
476 	u16          *pflags;
477 
478 	pflags = &gnttab_shared.v1[ref].flags;
479 
480 	/*
481 	 * If a transfer is not even yet started, try to reclaim the grant
482 	 * reference and return failure (== 0).
483 	 */
484 	while (!((flags = *pflags) & GTF_transfer_committed)) {
485 		if (sync_cmpxchg(pflags, flags, 0) == flags)
486 			return 0;
487 		cpu_relax();
488 	}
489 
490 	/* If a transfer is in progress then wait until it is completed. */
491 	while (!(flags & GTF_transfer_completed)) {
492 		flags = *pflags;
493 		cpu_relax();
494 	}
495 
496 	rmb();	/* Read the frame number /after/ reading completion status. */
497 	frame = gnttab_shared.v1[ref].frame;
498 	BUG_ON(frame == 0);
499 
500 	return frame;
501 }
502 
503 static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
504 {
505 	unsigned long frame;
506 	u16           flags;
507 	u16          *pflags;
508 
509 	pflags = &gnttab_shared.v2[ref].hdr.flags;
510 
511 	/*
512 	 * If a transfer is not even yet started, try to reclaim the grant
513 	 * reference and return failure (== 0).
514 	 */
515 	while (!((flags = *pflags) & GTF_transfer_committed)) {
516 		if (sync_cmpxchg(pflags, flags, 0) == flags)
517 			return 0;
518 		cpu_relax();
519 	}
520 
521 	/* If a transfer is in progress then wait until it is completed. */
522 	while (!(flags & GTF_transfer_completed)) {
523 		flags = *pflags;
524 		cpu_relax();
525 	}
526 
527 	rmb();  /* Read the frame number /after/ reading completion status. */
528 	frame = gnttab_shared.v2[ref].full_page.frame;
529 	BUG_ON(frame == 0);
530 
531 	return frame;
532 }
533 
534 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
535 {
536 	return gnttab_interface->end_foreign_transfer_ref(ref);
537 }
538 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
539 
540 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
541 {
542 	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
543 	put_free_entry(ref);
544 	return frame;
545 }
546 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
547 
548 void gnttab_free_grant_reference(grant_ref_t ref)
549 {
550 	put_free_entry(ref);
551 }
552 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
553 
554 void gnttab_free_grant_references(grant_ref_t head)
555 {
556 	grant_ref_t ref;
557 	unsigned long flags;
558 	int count = 1;
559 	if (head == GNTTAB_LIST_END)
560 		return;
561 	spin_lock_irqsave(&gnttab_list_lock, flags);
562 	ref = head;
563 	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
564 		ref = gnttab_entry(ref);
565 		count++;
566 	}
567 	gnttab_entry(ref) = gnttab_free_head;
568 	gnttab_free_head = head;
569 	gnttab_free_count += count;
570 	check_free_callbacks();
571 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
572 }
573 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
574 
575 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
576 {
577 	int h = get_free_entries(count);
578 
579 	if (h < 0)
580 		return -ENOSPC;
581 
582 	*head = h;
583 
584 	return 0;
585 }
586 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
587 
588 int gnttab_empty_grant_references(const grant_ref_t *private_head)
589 {
590 	return (*private_head == GNTTAB_LIST_END);
591 }
592 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
593 
594 int gnttab_claim_grant_reference(grant_ref_t *private_head)
595 {
596 	grant_ref_t g = *private_head;
597 	if (unlikely(g == GNTTAB_LIST_END))
598 		return -ENOSPC;
599 	*private_head = gnttab_entry(g);
600 	return g;
601 }
602 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
603 
604 void gnttab_release_grant_reference(grant_ref_t *private_head,
605 				    grant_ref_t release)
606 {
607 	gnttab_entry(release) = *private_head;
608 	*private_head = release;
609 }
610 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
611 
612 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
613 				  void (*fn)(void *), void *arg, u16 count)
614 {
615 	unsigned long flags;
616 	struct gnttab_free_callback *cb;
617 
618 	spin_lock_irqsave(&gnttab_list_lock, flags);
619 
620 	/* Check if the callback is already on the list */
621 	cb = gnttab_free_callback_list;
622 	while (cb) {
623 		if (cb == callback)
624 			goto out;
625 		cb = cb->next;
626 	}
627 
628 	callback->fn = fn;
629 	callback->arg = arg;
630 	callback->count = count;
631 	callback->next = gnttab_free_callback_list;
632 	gnttab_free_callback_list = callback;
633 	check_free_callbacks();
634 out:
635 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
636 }
637 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
638 
639 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
640 {
641 	struct gnttab_free_callback **pcb;
642 	unsigned long flags;
643 
644 	spin_lock_irqsave(&gnttab_list_lock, flags);
645 	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
646 		if (*pcb == callback) {
647 			*pcb = callback->next;
648 			break;
649 		}
650 	}
651 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
652 }
653 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
654 
655 static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
656 {
657 	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
658 	       align;
659 }
660 
661 static int grow_gnttab_list(unsigned int more_frames)
662 {
663 	unsigned int new_nr_grant_frames, extra_entries, i;
664 	unsigned int nr_glist_frames, new_nr_glist_frames;
665 	unsigned int grefs_per_frame;
666 
667 	BUG_ON(gnttab_interface == NULL);
668 	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
669 
670 	new_nr_grant_frames = nr_grant_frames + more_frames;
671 	extra_entries = more_frames * grefs_per_frame;
672 
673 	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
674 	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
675 	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
676 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
677 		if (!gnttab_list[i])
678 			goto grow_nomem;
679 	}
680 
681 
682 	for (i = grefs_per_frame * nr_grant_frames;
683 	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
684 		gnttab_entry(i) = i + 1;
685 
686 	gnttab_entry(i) = gnttab_free_head;
687 	gnttab_free_head = grefs_per_frame * nr_grant_frames;
688 	gnttab_free_count += extra_entries;
689 
690 	nr_grant_frames = new_nr_grant_frames;
691 
692 	check_free_callbacks();
693 
694 	return 0;
695 
696 grow_nomem:
697 	while (i-- > nr_glist_frames)
698 		free_page((unsigned long) gnttab_list[i]);
699 	return -ENOMEM;
700 }
701 
702 static unsigned int __max_nr_grant_frames(void)
703 {
704 	struct gnttab_query_size query;
705 	int rc;
706 
707 	query.dom = DOMID_SELF;
708 
709 	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
710 	if ((rc < 0) || (query.status != GNTST_okay))
711 		return 4; /* Legacy max supported number of frames */
712 
713 	return query.max_nr_frames;
714 }
715 
716 unsigned int gnttab_max_grant_frames(void)
717 {
718 	unsigned int xen_max = __max_nr_grant_frames();
719 	static unsigned int boot_max_nr_grant_frames;
720 
721 	/* First time, initialize it properly. */
722 	if (!boot_max_nr_grant_frames)
723 		boot_max_nr_grant_frames = __max_nr_grant_frames();
724 
725 	if (xen_max > boot_max_nr_grant_frames)
726 		return boot_max_nr_grant_frames;
727 	return xen_max;
728 }
729 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
730 
731 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
732 {
733 	xen_pfn_t *pfn;
734 	unsigned int max_nr_gframes = __max_nr_grant_frames();
735 	unsigned int i;
736 	void *vaddr;
737 
738 	if (xen_auto_xlat_grant_frames.count)
739 		return -EINVAL;
740 
741 	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
742 	if (vaddr == NULL) {
743 		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
744 			&addr);
745 		return -ENOMEM;
746 	}
747 	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
748 	if (!pfn) {
749 		xen_unmap(vaddr);
750 		return -ENOMEM;
751 	}
752 	for (i = 0; i < max_nr_gframes; i++)
753 		pfn[i] = XEN_PFN_DOWN(addr) + i;
754 
755 	xen_auto_xlat_grant_frames.vaddr = vaddr;
756 	xen_auto_xlat_grant_frames.pfn = pfn;
757 	xen_auto_xlat_grant_frames.count = max_nr_gframes;
758 
759 	return 0;
760 }
761 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
762 
763 void gnttab_free_auto_xlat_frames(void)
764 {
765 	if (!xen_auto_xlat_grant_frames.count)
766 		return;
767 	kfree(xen_auto_xlat_grant_frames.pfn);
768 	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
769 
770 	xen_auto_xlat_grant_frames.pfn = NULL;
771 	xen_auto_xlat_grant_frames.count = 0;
772 	xen_auto_xlat_grant_frames.vaddr = NULL;
773 }
774 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
775 
776 int gnttab_pages_set_private(int nr_pages, struct page **pages)
777 {
778 	int i;
779 
780 	for (i = 0; i < nr_pages; i++) {
781 #if BITS_PER_LONG < 64
782 		struct xen_page_foreign *foreign;
783 
784 		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
785 		if (!foreign)
786 			return -ENOMEM;
787 
788 		set_page_private(pages[i], (unsigned long)foreign);
789 #endif
790 		SetPagePrivate(pages[i]);
791 	}
792 
793 	return 0;
794 }
795 EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
796 
797 /**
798  * gnttab_alloc_pages - alloc pages suitable for grant mapping into
799  * @nr_pages: number of pages to alloc
800  * @pages: returns the pages
801  */
802 int gnttab_alloc_pages(int nr_pages, struct page **pages)
803 {
804 	int ret;
805 
806 	ret = alloc_xenballooned_pages(nr_pages, pages);
807 	if (ret < 0)
808 		return ret;
809 
810 	ret = gnttab_pages_set_private(nr_pages, pages);
811 	if (ret < 0)
812 		gnttab_free_pages(nr_pages, pages);
813 
814 	return ret;
815 }
816 EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
817 
818 void gnttab_pages_clear_private(int nr_pages, struct page **pages)
819 {
820 	int i;
821 
822 	for (i = 0; i < nr_pages; i++) {
823 		if (PagePrivate(pages[i])) {
824 #if BITS_PER_LONG < 64
825 			kfree((void *)page_private(pages[i]));
826 #endif
827 			ClearPagePrivate(pages[i]);
828 		}
829 	}
830 }
831 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
832 
833 /**
834  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
835  * @nr_pages; number of pages to free
836  * @pages: the pages
837  */
838 void gnttab_free_pages(int nr_pages, struct page **pages)
839 {
840 	gnttab_pages_clear_private(nr_pages, pages);
841 	free_xenballooned_pages(nr_pages, pages);
842 }
843 EXPORT_SYMBOL_GPL(gnttab_free_pages);
844 
845 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
846 /**
847  * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
848  * @args: arguments to the function
849  */
850 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
851 {
852 	unsigned long pfn, start_pfn;
853 	size_t size;
854 	int i, ret;
855 
856 	size = args->nr_pages << PAGE_SHIFT;
857 	if (args->coherent)
858 		args->vaddr = dma_alloc_coherent(args->dev, size,
859 						 &args->dev_bus_addr,
860 						 GFP_KERNEL | __GFP_NOWARN);
861 	else
862 		args->vaddr = dma_alloc_wc(args->dev, size,
863 					   &args->dev_bus_addr,
864 					   GFP_KERNEL | __GFP_NOWARN);
865 	if (!args->vaddr) {
866 		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
867 		return -ENOMEM;
868 	}
869 
870 	start_pfn = __phys_to_pfn(args->dev_bus_addr);
871 	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
872 			pfn++, i++) {
873 		struct page *page = pfn_to_page(pfn);
874 
875 		args->pages[i] = page;
876 		args->frames[i] = xen_page_to_gfn(page);
877 		xenmem_reservation_scrub_page(page);
878 	}
879 
880 	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
881 
882 	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
883 	if (ret != args->nr_pages) {
884 		pr_debug("Failed to decrease reservation for DMA buffer\n");
885 		ret = -EFAULT;
886 		goto fail;
887 	}
888 
889 	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
890 	if (ret < 0)
891 		goto fail;
892 
893 	return 0;
894 
895 fail:
896 	gnttab_dma_free_pages(args);
897 	return ret;
898 }
899 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
900 
901 /**
902  * gnttab_dma_free_pages - free DMAable pages
903  * @args: arguments to the function
904  */
905 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
906 {
907 	size_t size;
908 	int i, ret;
909 
910 	gnttab_pages_clear_private(args->nr_pages, args->pages);
911 
912 	for (i = 0; i < args->nr_pages; i++)
913 		args->frames[i] = page_to_xen_pfn(args->pages[i]);
914 
915 	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
916 	if (ret != args->nr_pages) {
917 		pr_debug("Failed to increase reservation for DMA buffer\n");
918 		ret = -EFAULT;
919 	} else {
920 		ret = 0;
921 	}
922 
923 	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
924 					     args->frames);
925 
926 	size = args->nr_pages << PAGE_SHIFT;
927 	if (args->coherent)
928 		dma_free_coherent(args->dev, size,
929 				  args->vaddr, args->dev_bus_addr);
930 	else
931 		dma_free_wc(args->dev, size,
932 			    args->vaddr, args->dev_bus_addr);
933 	return ret;
934 }
935 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
936 #endif
937 
938 /* Handling of paged out grant targets (GNTST_eagain) */
939 #define MAX_DELAY 256
940 static inline void
941 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
942 						const char *func)
943 {
944 	unsigned delay = 1;
945 
946 	do {
947 		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
948 		if (*status == GNTST_eagain)
949 			msleep(delay++);
950 	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
951 
952 	if (delay >= MAX_DELAY) {
953 		pr_err("%s: %s eagain grant\n", func, current->comm);
954 		*status = GNTST_bad_page;
955 	}
956 }
957 
958 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
959 {
960 	struct gnttab_map_grant_ref *op;
961 
962 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
963 		BUG();
964 	for (op = batch; op < batch + count; op++)
965 		if (op->status == GNTST_eagain)
966 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
967 						&op->status, __func__);
968 }
969 EXPORT_SYMBOL_GPL(gnttab_batch_map);
970 
971 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
972 {
973 	struct gnttab_copy *op;
974 
975 	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
976 		BUG();
977 	for (op = batch; op < batch + count; op++)
978 		if (op->status == GNTST_eagain)
979 			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
980 						&op->status, __func__);
981 }
982 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
983 
984 void gnttab_foreach_grant_in_range(struct page *page,
985 				   unsigned int offset,
986 				   unsigned int len,
987 				   xen_grant_fn_t fn,
988 				   void *data)
989 {
990 	unsigned int goffset;
991 	unsigned int glen;
992 	unsigned long xen_pfn;
993 
994 	len = min_t(unsigned int, PAGE_SIZE - offset, len);
995 	goffset = xen_offset_in_page(offset);
996 
997 	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
998 
999 	while (len) {
1000 		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1001 		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1002 
1003 		goffset = 0;
1004 		xen_pfn++;
1005 		len -= glen;
1006 	}
1007 }
1008 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1009 
1010 void gnttab_foreach_grant(struct page **pages,
1011 			  unsigned int nr_grefs,
1012 			  xen_grant_fn_t fn,
1013 			  void *data)
1014 {
1015 	unsigned int goffset = 0;
1016 	unsigned long xen_pfn = 0;
1017 	unsigned int i;
1018 
1019 	for (i = 0; i < nr_grefs; i++) {
1020 		if ((i % XEN_PFN_PER_PAGE) == 0) {
1021 			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1022 			goffset = 0;
1023 		}
1024 
1025 		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1026 
1027 		goffset += XEN_PAGE_SIZE;
1028 		xen_pfn++;
1029 	}
1030 }
1031 
1032 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1033 		    struct gnttab_map_grant_ref *kmap_ops,
1034 		    struct page **pages, unsigned int count)
1035 {
1036 	int i, ret;
1037 
1038 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1039 	if (ret)
1040 		return ret;
1041 
1042 	for (i = 0; i < count; i++) {
1043 		switch (map_ops[i].status) {
1044 		case GNTST_okay:
1045 		{
1046 			struct xen_page_foreign *foreign;
1047 
1048 			SetPageForeign(pages[i]);
1049 			foreign = xen_page_foreign(pages[i]);
1050 			foreign->domid = map_ops[i].dom;
1051 			foreign->gref = map_ops[i].ref;
1052 			break;
1053 		}
1054 
1055 		case GNTST_no_device_space:
1056 			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1057 			break;
1058 
1059 		case GNTST_eagain:
1060 			/* Retry eagain maps */
1061 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1062 						map_ops + i,
1063 						&map_ops[i].status, __func__);
1064 			/* Test status in next loop iteration. */
1065 			i--;
1066 			break;
1067 
1068 		default:
1069 			break;
1070 		}
1071 	}
1072 
1073 	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1074 }
1075 EXPORT_SYMBOL_GPL(gnttab_map_refs);
1076 
1077 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1078 		      struct gnttab_unmap_grant_ref *kunmap_ops,
1079 		      struct page **pages, unsigned int count)
1080 {
1081 	unsigned int i;
1082 	int ret;
1083 
1084 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1085 	if (ret)
1086 		return ret;
1087 
1088 	for (i = 0; i < count; i++)
1089 		ClearPageForeign(pages[i]);
1090 
1091 	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1092 }
1093 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1094 
1095 #define GNTTAB_UNMAP_REFS_DELAY 5
1096 
1097 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1098 
1099 static void gnttab_unmap_work(struct work_struct *work)
1100 {
1101 	struct gntab_unmap_queue_data
1102 		*unmap_data = container_of(work,
1103 					   struct gntab_unmap_queue_data,
1104 					   gnttab_work.work);
1105 	if (unmap_data->age != UINT_MAX)
1106 		unmap_data->age++;
1107 	__gnttab_unmap_refs_async(unmap_data);
1108 }
1109 
1110 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1111 {
1112 	int ret;
1113 	int pc;
1114 
1115 	for (pc = 0; pc < item->count; pc++) {
1116 		if (page_count(item->pages[pc]) > 1) {
1117 			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1118 			schedule_delayed_work(&item->gnttab_work,
1119 					      msecs_to_jiffies(delay));
1120 			return;
1121 		}
1122 	}
1123 
1124 	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1125 				item->pages, item->count);
1126 	item->done(ret, item);
1127 }
1128 
1129 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1130 {
1131 	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1132 	item->age = 0;
1133 
1134 	__gnttab_unmap_refs_async(item);
1135 }
1136 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1137 
1138 static void unmap_refs_callback(int result,
1139 		struct gntab_unmap_queue_data *data)
1140 {
1141 	struct unmap_refs_callback_data *d = data->data;
1142 
1143 	d->result = result;
1144 	complete(&d->completion);
1145 }
1146 
1147 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1148 {
1149 	struct unmap_refs_callback_data data;
1150 
1151 	init_completion(&data.completion);
1152 	item->data = &data;
1153 	item->done = &unmap_refs_callback;
1154 	gnttab_unmap_refs_async(item);
1155 	wait_for_completion(&data.completion);
1156 
1157 	return data.result;
1158 }
1159 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1160 
1161 static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1162 {
1163 	BUG_ON(gnttab_interface == NULL);
1164 	return gnttab_frames(nr_grant_frames, SPP);
1165 }
1166 
1167 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1168 {
1169 	int rc;
1170 
1171 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1172 				    gnttab_max_grant_frames(),
1173 				    &gnttab_shared.addr);
1174 	BUG_ON(rc);
1175 
1176 	return 0;
1177 }
1178 
1179 static void gnttab_unmap_frames_v1(void)
1180 {
1181 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1182 }
1183 
1184 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1185 {
1186 	uint64_t *sframes;
1187 	unsigned int nr_sframes;
1188 	struct gnttab_get_status_frames getframes;
1189 	int rc;
1190 
1191 	nr_sframes = nr_status_frames(nr_gframes);
1192 
1193 	/* No need for kzalloc as it is initialized in following hypercall
1194 	 * GNTTABOP_get_status_frames.
1195 	 */
1196 	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1197 	if (!sframes)
1198 		return -ENOMEM;
1199 
1200 	getframes.dom        = DOMID_SELF;
1201 	getframes.nr_frames  = nr_sframes;
1202 	set_xen_guest_handle(getframes.frame_list, sframes);
1203 
1204 	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1205 				       &getframes, 1);
1206 	if (rc == -ENOSYS) {
1207 		kfree(sframes);
1208 		return -ENOSYS;
1209 	}
1210 
1211 	BUG_ON(rc || getframes.status);
1212 
1213 	rc = arch_gnttab_map_status(sframes, nr_sframes,
1214 				    nr_status_frames(gnttab_max_grant_frames()),
1215 				    &grstatus);
1216 	BUG_ON(rc);
1217 	kfree(sframes);
1218 
1219 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1220 				    gnttab_max_grant_frames(),
1221 				    &gnttab_shared.addr);
1222 	BUG_ON(rc);
1223 
1224 	return 0;
1225 }
1226 
1227 static void gnttab_unmap_frames_v2(void)
1228 {
1229 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1230 	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1231 }
1232 
1233 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1234 {
1235 	struct gnttab_setup_table setup;
1236 	xen_pfn_t *frames;
1237 	unsigned int nr_gframes = end_idx + 1;
1238 	int rc;
1239 
1240 	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1241 		struct xen_add_to_physmap xatp;
1242 		unsigned int i = end_idx;
1243 		rc = 0;
1244 		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1245 		/*
1246 		 * Loop backwards, so that the first hypercall has the largest
1247 		 * index, ensuring that the table will grow only once.
1248 		 */
1249 		do {
1250 			xatp.domid = DOMID_SELF;
1251 			xatp.idx = i;
1252 			xatp.space = XENMAPSPACE_grant_table;
1253 			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1254 			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1255 			if (rc != 0) {
1256 				pr_warn("grant table add_to_physmap failed, err=%d\n",
1257 					rc);
1258 				break;
1259 			}
1260 		} while (i-- > start_idx);
1261 
1262 		return rc;
1263 	}
1264 
1265 	/* No need for kzalloc as it is initialized in following hypercall
1266 	 * GNTTABOP_setup_table.
1267 	 */
1268 	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1269 	if (!frames)
1270 		return -ENOMEM;
1271 
1272 	setup.dom        = DOMID_SELF;
1273 	setup.nr_frames  = nr_gframes;
1274 	set_xen_guest_handle(setup.frame_list, frames);
1275 
1276 	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1277 	if (rc == -ENOSYS) {
1278 		kfree(frames);
1279 		return -ENOSYS;
1280 	}
1281 
1282 	BUG_ON(rc || setup.status);
1283 
1284 	rc = gnttab_interface->map_frames(frames, nr_gframes);
1285 
1286 	kfree(frames);
1287 
1288 	return rc;
1289 }
1290 
1291 static const struct gnttab_ops gnttab_v1_ops = {
1292 	.version			= 1,
1293 	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1294 					  sizeof(struct grant_entry_v1),
1295 	.map_frames			= gnttab_map_frames_v1,
1296 	.unmap_frames			= gnttab_unmap_frames_v1,
1297 	.update_entry			= gnttab_update_entry_v1,
1298 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1299 	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1300 	.query_foreign_access		= gnttab_query_foreign_access_v1,
1301 };
1302 
1303 static const struct gnttab_ops gnttab_v2_ops = {
1304 	.version			= 2,
1305 	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1306 					  sizeof(union grant_entry_v2),
1307 	.map_frames			= gnttab_map_frames_v2,
1308 	.unmap_frames			= gnttab_unmap_frames_v2,
1309 	.update_entry			= gnttab_update_entry_v2,
1310 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1311 	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1312 	.query_foreign_access		= gnttab_query_foreign_access_v2,
1313 };
1314 
1315 static bool gnttab_need_v2(void)
1316 {
1317 #ifdef CONFIG_X86
1318 	uint32_t base, width;
1319 
1320 	if (xen_pv_domain()) {
1321 		base = xen_cpuid_base();
1322 		if (cpuid_eax(base) < 5)
1323 			return false;	/* Information not available, use V1. */
1324 		width = cpuid_ebx(base + 5) &
1325 			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1326 		return width > 32 + PAGE_SHIFT;
1327 	}
1328 #endif
1329 	return !!(max_possible_pfn >> 32);
1330 }
1331 
1332 static void gnttab_request_version(void)
1333 {
1334 	long rc;
1335 	struct gnttab_set_version gsv;
1336 
1337 	if (gnttab_need_v2())
1338 		gsv.version = 2;
1339 	else
1340 		gsv.version = 1;
1341 
1342 	/* Boot parameter overrides automatic selection. */
1343 	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1344 		gsv.version = xen_gnttab_version;
1345 
1346 	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1347 	if (rc == 0 && gsv.version == 2)
1348 		gnttab_interface = &gnttab_v2_ops;
1349 	else
1350 		gnttab_interface = &gnttab_v1_ops;
1351 	pr_info("Grant tables using version %d layout\n",
1352 		gnttab_interface->version);
1353 }
1354 
1355 static int gnttab_setup(void)
1356 {
1357 	unsigned int max_nr_gframes;
1358 
1359 	max_nr_gframes = gnttab_max_grant_frames();
1360 	if (max_nr_gframes < nr_grant_frames)
1361 		return -ENOSYS;
1362 
1363 	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1364 		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1365 		if (gnttab_shared.addr == NULL) {
1366 			pr_warn("gnttab share frames is not mapped!\n");
1367 			return -ENOMEM;
1368 		}
1369 	}
1370 	return gnttab_map(0, nr_grant_frames - 1);
1371 }
1372 
1373 int gnttab_resume(void)
1374 {
1375 	gnttab_request_version();
1376 	return gnttab_setup();
1377 }
1378 
1379 int gnttab_suspend(void)
1380 {
1381 	if (!xen_feature(XENFEAT_auto_translated_physmap))
1382 		gnttab_interface->unmap_frames();
1383 	return 0;
1384 }
1385 
1386 static int gnttab_expand(unsigned int req_entries)
1387 {
1388 	int rc;
1389 	unsigned int cur, extra;
1390 
1391 	BUG_ON(gnttab_interface == NULL);
1392 	cur = nr_grant_frames;
1393 	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1394 		 gnttab_interface->grefs_per_grant_frame);
1395 	if (cur + extra > gnttab_max_grant_frames()) {
1396 		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1397 				    " cur=%u extra=%u limit=%u"
1398 				    " gnttab_free_count=%u req_entries=%u\n",
1399 				    cur, extra, gnttab_max_grant_frames(),
1400 				    gnttab_free_count, req_entries);
1401 		return -ENOSPC;
1402 	}
1403 
1404 	rc = gnttab_map(cur, cur + extra - 1);
1405 	if (rc == 0)
1406 		rc = grow_gnttab_list(extra);
1407 
1408 	return rc;
1409 }
1410 
1411 int gnttab_init(void)
1412 {
1413 	int i;
1414 	unsigned long max_nr_grant_frames;
1415 	unsigned int max_nr_glist_frames, nr_glist_frames;
1416 	unsigned int nr_init_grefs;
1417 	int ret;
1418 
1419 	gnttab_request_version();
1420 	max_nr_grant_frames = gnttab_max_grant_frames();
1421 	nr_grant_frames = 1;
1422 
1423 	/* Determine the maximum number of frames required for the
1424 	 * grant reference free list on the current hypervisor.
1425 	 */
1426 	BUG_ON(gnttab_interface == NULL);
1427 	max_nr_glist_frames = (max_nr_grant_frames *
1428 			       gnttab_interface->grefs_per_grant_frame / RPP);
1429 
1430 	gnttab_list = kmalloc_array(max_nr_glist_frames,
1431 				    sizeof(grant_ref_t *),
1432 				    GFP_KERNEL);
1433 	if (gnttab_list == NULL)
1434 		return -ENOMEM;
1435 
1436 	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1437 	for (i = 0; i < nr_glist_frames; i++) {
1438 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1439 		if (gnttab_list[i] == NULL) {
1440 			ret = -ENOMEM;
1441 			goto ini_nomem;
1442 		}
1443 	}
1444 
1445 	ret = arch_gnttab_init(max_nr_grant_frames,
1446 			       nr_status_frames(max_nr_grant_frames));
1447 	if (ret < 0)
1448 		goto ini_nomem;
1449 
1450 	if (gnttab_setup() < 0) {
1451 		ret = -ENODEV;
1452 		goto ini_nomem;
1453 	}
1454 
1455 	nr_init_grefs = nr_grant_frames *
1456 			gnttab_interface->grefs_per_grant_frame;
1457 
1458 	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1459 		gnttab_entry(i) = i + 1;
1460 
1461 	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1462 	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1463 	gnttab_free_head  = NR_RESERVED_ENTRIES;
1464 
1465 	printk("Grant table initialized\n");
1466 	return 0;
1467 
1468  ini_nomem:
1469 	for (i--; i >= 0; i--)
1470 		free_page((unsigned long)gnttab_list[i]);
1471 	kfree(gnttab_list);
1472 	return ret;
1473 }
1474 EXPORT_SYMBOL_GPL(gnttab_init);
1475 
1476 static int __gnttab_init(void)
1477 {
1478 	if (!xen_domain())
1479 		return -ENODEV;
1480 
1481 	/* Delay grant-table initialization in the PV on HVM case */
1482 	if (xen_hvm_domain() && !xen_pvh_domain())
1483 		return 0;
1484 
1485 	return gnttab_init();
1486 }
1487 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1488  * beforehand to initialize xen_auto_xlat_grant_frames. */
1489 core_initcall_sync(__gnttab_init);
1490