xref: /openbmc/linux/drivers/xen/grant-table.c (revision 79f08d9e)
1 /******************************************************************************
2  * grant_table.c
3  *
4  * Granting foreign access to our memory reservation.
5  *
6  * Copyright (c) 2005-2006, Christopher Clark
7  * Copyright (c) 2004-2005, K A Fraser
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version 2
11  * as published by the Free Software Foundation; or, when distributed
12  * separately from the Linux kernel or incorporated into other
13  * software packages, subject to the following license:
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a copy
16  * of this source file (the "Software"), to deal in the Software without
17  * restriction, including without limitation the rights to use, copy, modify,
18  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19  * and to permit persons to whom the Software is furnished to do so, subject to
20  * the following conditions:
21  *
22  * The above copyright notice and this permission notice shall be included in
23  * all copies or substantial portions of the Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31  * IN THE SOFTWARE.
32  */
33 
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35 
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/mm.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/uaccess.h>
42 #include <linux/io.h>
43 #include <linux/delay.h>
44 #include <linux/hardirq.h>
45 
46 #include <xen/xen.h>
47 #include <xen/interface/xen.h>
48 #include <xen/page.h>
49 #include <xen/grant_table.h>
50 #include <xen/interface/memory.h>
51 #include <xen/hvc-console.h>
52 #include <xen/swiotlb-xen.h>
53 #include <asm/xen/hypercall.h>
54 #include <asm/xen/interface.h>
55 
56 #include <asm/pgtable.h>
57 #include <asm/sync_bitops.h>
58 
59 /* External tools reserve first few grant table entries. */
60 #define NR_RESERVED_ENTRIES 8
61 #define GNTTAB_LIST_END 0xffffffff
62 
63 static grant_ref_t **gnttab_list;
64 static unsigned int nr_grant_frames;
65 static unsigned int boot_max_nr_grant_frames;
66 static int gnttab_free_count;
67 static grant_ref_t gnttab_free_head;
68 static DEFINE_SPINLOCK(gnttab_list_lock);
69 unsigned long xen_hvm_resume_frames;
70 EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
71 
72 static union {
73 	struct grant_entry_v1 *v1;
74 	union grant_entry_v2 *v2;
75 	void *addr;
76 } gnttab_shared;
77 
78 /*This is a structure of function pointers for grant table*/
79 struct gnttab_ops {
80 	/*
81 	 * Mapping a list of frames for storing grant entries. Frames parameter
82 	 * is used to store grant table address when grant table being setup,
83 	 * nr_gframes is the number of frames to map grant table. Returning
84 	 * GNTST_okay means success and negative value means failure.
85 	 */
86 	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
87 	/*
88 	 * Release a list of frames which are mapped in map_frames for grant
89 	 * entry status.
90 	 */
91 	void (*unmap_frames)(void);
92 	/*
93 	 * Introducing a valid entry into the grant table, granting the frame of
94 	 * this grant entry to domain for accessing or transfering. Ref
95 	 * parameter is reference of this introduced grant entry, domid is id of
96 	 * granted domain, frame is the page frame to be granted, and flags is
97 	 * status of the grant entry to be updated.
98 	 */
99 	void (*update_entry)(grant_ref_t ref, domid_t domid,
100 			     unsigned long frame, unsigned flags);
101 	/*
102 	 * Stop granting a grant entry to domain for accessing. Ref parameter is
103 	 * reference of a grant entry whose grant access will be stopped,
104 	 * readonly is not in use in this function. If the grant entry is
105 	 * currently mapped for reading or writing, just return failure(==0)
106 	 * directly and don't tear down the grant access. Otherwise, stop grant
107 	 * access for this entry and return success(==1).
108 	 */
109 	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
110 	/*
111 	 * Stop granting a grant entry to domain for transfer. Ref parameter is
112 	 * reference of a grant entry whose grant transfer will be stopped. If
113 	 * tranfer has not started, just reclaim the grant entry and return
114 	 * failure(==0). Otherwise, wait for the transfer to complete and then
115 	 * return the frame.
116 	 */
117 	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
118 	/*
119 	 * Query the status of a grant entry. Ref parameter is reference of
120 	 * queried grant entry, return value is the status of queried entry.
121 	 * Detailed status(writing/reading) can be gotten from the return value
122 	 * by bit operations.
123 	 */
124 	int (*query_foreign_access)(grant_ref_t ref);
125 	/*
126 	 * Grant a domain to access a range of bytes within the page referred by
127 	 * an available grant entry. Ref parameter is reference of a grant entry
128 	 * which will be sub-page accessed, domid is id of grantee domain, frame
129 	 * is frame address of subpage grant, flags is grant type and flag
130 	 * information, page_off is offset of the range of bytes, and length is
131 	 * length of bytes to be accessed.
132 	 */
133 	void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
134 				     unsigned long frame, int flags,
135 				     unsigned page_off, unsigned length);
136 	/*
137 	 * Redirect an available grant entry on domain A to another grant
138 	 * reference of domain B, then allow domain C to use grant reference
139 	 * of domain B transitively. Ref parameter is an available grant entry
140 	 * reference on domain A, domid is id of domain C which accesses grant
141 	 * entry transitively, flags is grant type and flag information,
142 	 * trans_domid is id of domain B whose grant entry is finally accessed
143 	 * transitively, trans_gref is grant entry transitive reference of
144 	 * domain B.
145 	 */
146 	void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
147 				   domid_t trans_domid, grant_ref_t trans_gref);
148 };
149 
150 static struct gnttab_ops *gnttab_interface;
151 
152 /*This reflects status of grant entries, so act as a global value*/
153 static grant_status_t *grstatus;
154 
155 static int grant_table_version;
156 static int grefs_per_grant_frame;
157 
158 static struct gnttab_free_callback *gnttab_free_callback_list;
159 
160 static int gnttab_expand(unsigned int req_entries);
161 
162 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
163 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
164 
165 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
166 {
167 	return &gnttab_list[(entry) / RPP][(entry) % RPP];
168 }
169 /* This can be used as an l-value */
170 #define gnttab_entry(entry) (*__gnttab_entry(entry))
171 
172 static int get_free_entries(unsigned count)
173 {
174 	unsigned long flags;
175 	int ref, rc = 0;
176 	grant_ref_t head;
177 
178 	spin_lock_irqsave(&gnttab_list_lock, flags);
179 
180 	if ((gnttab_free_count < count) &&
181 	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
182 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
183 		return rc;
184 	}
185 
186 	ref = head = gnttab_free_head;
187 	gnttab_free_count -= count;
188 	while (count-- > 1)
189 		head = gnttab_entry(head);
190 	gnttab_free_head = gnttab_entry(head);
191 	gnttab_entry(head) = GNTTAB_LIST_END;
192 
193 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
194 
195 	return ref;
196 }
197 
198 static void do_free_callbacks(void)
199 {
200 	struct gnttab_free_callback *callback, *next;
201 
202 	callback = gnttab_free_callback_list;
203 	gnttab_free_callback_list = NULL;
204 
205 	while (callback != NULL) {
206 		next = callback->next;
207 		if (gnttab_free_count >= callback->count) {
208 			callback->next = NULL;
209 			callback->fn(callback->arg);
210 		} else {
211 			callback->next = gnttab_free_callback_list;
212 			gnttab_free_callback_list = callback;
213 		}
214 		callback = next;
215 	}
216 }
217 
218 static inline void check_free_callbacks(void)
219 {
220 	if (unlikely(gnttab_free_callback_list))
221 		do_free_callbacks();
222 }
223 
224 static void put_free_entry(grant_ref_t ref)
225 {
226 	unsigned long flags;
227 	spin_lock_irqsave(&gnttab_list_lock, flags);
228 	gnttab_entry(ref) = gnttab_free_head;
229 	gnttab_free_head = ref;
230 	gnttab_free_count++;
231 	check_free_callbacks();
232 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
233 }
234 
235 /*
236  * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
237  * Introducing a valid entry into the grant table:
238  *  1. Write ent->domid.
239  *  2. Write ent->frame:
240  *      GTF_permit_access:   Frame to which access is permitted.
241  *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
242  *                           frame, or zero if none.
243  *  3. Write memory barrier (WMB).
244  *  4. Write ent->flags, inc. valid type.
245  */
246 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
247 				   unsigned long frame, unsigned flags)
248 {
249 	gnttab_shared.v1[ref].domid = domid;
250 	gnttab_shared.v1[ref].frame = frame;
251 	wmb();
252 	gnttab_shared.v1[ref].flags = flags;
253 }
254 
255 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
256 				   unsigned long frame, unsigned flags)
257 {
258 	gnttab_shared.v2[ref].hdr.domid = domid;
259 	gnttab_shared.v2[ref].full_page.frame = frame;
260 	wmb();
261 	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
262 }
263 
264 /*
265  * Public grant-issuing interface functions
266  */
267 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
268 				     unsigned long frame, int readonly)
269 {
270 	gnttab_interface->update_entry(ref, domid, frame,
271 			   GTF_permit_access | (readonly ? GTF_readonly : 0));
272 }
273 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
274 
275 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
276 				int readonly)
277 {
278 	int ref;
279 
280 	ref = get_free_entries(1);
281 	if (unlikely(ref < 0))
282 		return -ENOSPC;
283 
284 	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
285 
286 	return ref;
287 }
288 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
289 
290 static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
291 					   unsigned long frame, int flags,
292 					   unsigned page_off, unsigned length)
293 {
294 	gnttab_shared.v2[ref].sub_page.frame = frame;
295 	gnttab_shared.v2[ref].sub_page.page_off = page_off;
296 	gnttab_shared.v2[ref].sub_page.length = length;
297 	gnttab_shared.v2[ref].hdr.domid = domid;
298 	wmb();
299 	gnttab_shared.v2[ref].hdr.flags =
300 				GTF_permit_access | GTF_sub_page | flags;
301 }
302 
303 int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
304 					    unsigned long frame, int flags,
305 					    unsigned page_off,
306 					    unsigned length)
307 {
308 	if (flags & (GTF_accept_transfer | GTF_reading |
309 		     GTF_writing | GTF_transitive))
310 		return -EPERM;
311 
312 	if (gnttab_interface->update_subpage_entry == NULL)
313 		return -ENOSYS;
314 
315 	gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
316 					       page_off, length);
317 
318 	return 0;
319 }
320 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
321 
322 int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
323 					int flags, unsigned page_off,
324 					unsigned length)
325 {
326 	int ref, rc;
327 
328 	ref = get_free_entries(1);
329 	if (unlikely(ref < 0))
330 		return -ENOSPC;
331 
332 	rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
333 						     page_off, length);
334 	if (rc < 0) {
335 		put_free_entry(ref);
336 		return rc;
337 	}
338 
339 	return ref;
340 }
341 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
342 
343 bool gnttab_subpage_grants_available(void)
344 {
345 	return gnttab_interface->update_subpage_entry != NULL;
346 }
347 EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
348 
349 static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
350 					 int flags, domid_t trans_domid,
351 					 grant_ref_t trans_gref)
352 {
353 	gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
354 	gnttab_shared.v2[ref].transitive.gref = trans_gref;
355 	gnttab_shared.v2[ref].hdr.domid = domid;
356 	wmb();
357 	gnttab_shared.v2[ref].hdr.flags =
358 				GTF_permit_access | GTF_transitive | flags;
359 }
360 
361 int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
362 					  int flags, domid_t trans_domid,
363 					  grant_ref_t trans_gref)
364 {
365 	if (flags & (GTF_accept_transfer | GTF_reading |
366 		     GTF_writing | GTF_sub_page))
367 		return -EPERM;
368 
369 	if (gnttab_interface->update_trans_entry == NULL)
370 		return -ENOSYS;
371 
372 	gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
373 					     trans_gref);
374 
375 	return 0;
376 }
377 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
378 
379 int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
380 				      domid_t trans_domid,
381 				      grant_ref_t trans_gref)
382 {
383 	int ref, rc;
384 
385 	ref = get_free_entries(1);
386 	if (unlikely(ref < 0))
387 		return -ENOSPC;
388 
389 	rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
390 						   trans_domid, trans_gref);
391 	if (rc < 0) {
392 		put_free_entry(ref);
393 		return rc;
394 	}
395 
396 	return ref;
397 }
398 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
399 
400 bool gnttab_trans_grants_available(void)
401 {
402 	return gnttab_interface->update_trans_entry != NULL;
403 }
404 EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
405 
406 static int gnttab_query_foreign_access_v1(grant_ref_t ref)
407 {
408 	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
409 }
410 
411 static int gnttab_query_foreign_access_v2(grant_ref_t ref)
412 {
413 	return grstatus[ref] & (GTF_reading|GTF_writing);
414 }
415 
416 int gnttab_query_foreign_access(grant_ref_t ref)
417 {
418 	return gnttab_interface->query_foreign_access(ref);
419 }
420 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
421 
422 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
423 {
424 	u16 flags, nflags;
425 	u16 *pflags;
426 
427 	pflags = &gnttab_shared.v1[ref].flags;
428 	nflags = *pflags;
429 	do {
430 		flags = nflags;
431 		if (flags & (GTF_reading|GTF_writing))
432 			return 0;
433 	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
434 
435 	return 1;
436 }
437 
438 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
439 {
440 	gnttab_shared.v2[ref].hdr.flags = 0;
441 	mb();
442 	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
443 		return 0;
444 	} else {
445 		/* The read of grstatus needs to have acquire
446 		semantics.  On x86, reads already have
447 		that, and we just need to protect against
448 		compiler reorderings.  On other
449 		architectures we may need a full
450 		barrier. */
451 #ifdef CONFIG_X86
452 		barrier();
453 #else
454 		mb();
455 #endif
456 	}
457 
458 	return 1;
459 }
460 
461 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
462 {
463 	return gnttab_interface->end_foreign_access_ref(ref, readonly);
464 }
465 
466 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
467 {
468 	if (_gnttab_end_foreign_access_ref(ref, readonly))
469 		return 1;
470 	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
471 	return 0;
472 }
473 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
474 
475 struct deferred_entry {
476 	struct list_head list;
477 	grant_ref_t ref;
478 	bool ro;
479 	uint16_t warn_delay;
480 	struct page *page;
481 };
482 static LIST_HEAD(deferred_list);
483 static void gnttab_handle_deferred(unsigned long);
484 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
485 
486 static void gnttab_handle_deferred(unsigned long unused)
487 {
488 	unsigned int nr = 10;
489 	struct deferred_entry *first = NULL;
490 	unsigned long flags;
491 
492 	spin_lock_irqsave(&gnttab_list_lock, flags);
493 	while (nr--) {
494 		struct deferred_entry *entry
495 			= list_first_entry(&deferred_list,
496 					   struct deferred_entry, list);
497 
498 		if (entry == first)
499 			break;
500 		list_del(&entry->list);
501 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
502 		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
503 			put_free_entry(entry->ref);
504 			if (entry->page) {
505 				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
506 					 entry->ref, page_to_pfn(entry->page));
507 				__free_page(entry->page);
508 			} else
509 				pr_info("freeing g.e. %#x\n", entry->ref);
510 			kfree(entry);
511 			entry = NULL;
512 		} else {
513 			if (!--entry->warn_delay)
514 				pr_info("g.e. %#x still pending\n", entry->ref);
515 			if (!first)
516 				first = entry;
517 		}
518 		spin_lock_irqsave(&gnttab_list_lock, flags);
519 		if (entry)
520 			list_add_tail(&entry->list, &deferred_list);
521 		else if (list_empty(&deferred_list))
522 			break;
523 	}
524 	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
525 		deferred_timer.expires = jiffies + HZ;
526 		add_timer(&deferred_timer);
527 	}
528 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
529 }
530 
531 static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
532 				struct page *page)
533 {
534 	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
535 	const char *what = KERN_WARNING "leaking";
536 
537 	if (entry) {
538 		unsigned long flags;
539 
540 		entry->ref = ref;
541 		entry->ro = readonly;
542 		entry->page = page;
543 		entry->warn_delay = 60;
544 		spin_lock_irqsave(&gnttab_list_lock, flags);
545 		list_add_tail(&entry->list, &deferred_list);
546 		if (!timer_pending(&deferred_timer)) {
547 			deferred_timer.expires = jiffies + HZ;
548 			add_timer(&deferred_timer);
549 		}
550 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
551 		what = KERN_DEBUG "deferring";
552 	}
553 	printk("%s g.e. %#x (pfn %#lx)\n",
554 	       what, ref, page ? page_to_pfn(page) : -1);
555 }
556 
557 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
558 			       unsigned long page)
559 {
560 	if (gnttab_end_foreign_access_ref(ref, readonly)) {
561 		put_free_entry(ref);
562 		if (page != 0)
563 			free_page(page);
564 	} else
565 		gnttab_add_deferred(ref, readonly,
566 				    page ? virt_to_page(page) : NULL);
567 }
568 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
569 
570 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
571 {
572 	int ref;
573 
574 	ref = get_free_entries(1);
575 	if (unlikely(ref < 0))
576 		return -ENOSPC;
577 	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
578 
579 	return ref;
580 }
581 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
582 
583 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
584 				       unsigned long pfn)
585 {
586 	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
587 }
588 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
589 
590 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
591 {
592 	unsigned long frame;
593 	u16           flags;
594 	u16          *pflags;
595 
596 	pflags = &gnttab_shared.v1[ref].flags;
597 
598 	/*
599 	 * If a transfer is not even yet started, try to reclaim the grant
600 	 * reference and return failure (== 0).
601 	 */
602 	while (!((flags = *pflags) & GTF_transfer_committed)) {
603 		if (sync_cmpxchg(pflags, flags, 0) == flags)
604 			return 0;
605 		cpu_relax();
606 	}
607 
608 	/* If a transfer is in progress then wait until it is completed. */
609 	while (!(flags & GTF_transfer_completed)) {
610 		flags = *pflags;
611 		cpu_relax();
612 	}
613 
614 	rmb();	/* Read the frame number /after/ reading completion status. */
615 	frame = gnttab_shared.v1[ref].frame;
616 	BUG_ON(frame == 0);
617 
618 	return frame;
619 }
620 
621 static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
622 {
623 	unsigned long frame;
624 	u16           flags;
625 	u16          *pflags;
626 
627 	pflags = &gnttab_shared.v2[ref].hdr.flags;
628 
629 	/*
630 	 * If a transfer is not even yet started, try to reclaim the grant
631 	 * reference and return failure (== 0).
632 	 */
633 	while (!((flags = *pflags) & GTF_transfer_committed)) {
634 		if (sync_cmpxchg(pflags, flags, 0) == flags)
635 			return 0;
636 		cpu_relax();
637 	}
638 
639 	/* If a transfer is in progress then wait until it is completed. */
640 	while (!(flags & GTF_transfer_completed)) {
641 		flags = *pflags;
642 		cpu_relax();
643 	}
644 
645 	rmb();  /* Read the frame number /after/ reading completion status. */
646 	frame = gnttab_shared.v2[ref].full_page.frame;
647 	BUG_ON(frame == 0);
648 
649 	return frame;
650 }
651 
652 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
653 {
654 	return gnttab_interface->end_foreign_transfer_ref(ref);
655 }
656 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
657 
658 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
659 {
660 	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
661 	put_free_entry(ref);
662 	return frame;
663 }
664 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
665 
666 void gnttab_free_grant_reference(grant_ref_t ref)
667 {
668 	put_free_entry(ref);
669 }
670 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
671 
672 void gnttab_free_grant_references(grant_ref_t head)
673 {
674 	grant_ref_t ref;
675 	unsigned long flags;
676 	int count = 1;
677 	if (head == GNTTAB_LIST_END)
678 		return;
679 	spin_lock_irqsave(&gnttab_list_lock, flags);
680 	ref = head;
681 	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
682 		ref = gnttab_entry(ref);
683 		count++;
684 	}
685 	gnttab_entry(ref) = gnttab_free_head;
686 	gnttab_free_head = head;
687 	gnttab_free_count += count;
688 	check_free_callbacks();
689 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
690 }
691 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
692 
693 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
694 {
695 	int h = get_free_entries(count);
696 
697 	if (h < 0)
698 		return -ENOSPC;
699 
700 	*head = h;
701 
702 	return 0;
703 }
704 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
705 
706 int gnttab_empty_grant_references(const grant_ref_t *private_head)
707 {
708 	return (*private_head == GNTTAB_LIST_END);
709 }
710 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
711 
712 int gnttab_claim_grant_reference(grant_ref_t *private_head)
713 {
714 	grant_ref_t g = *private_head;
715 	if (unlikely(g == GNTTAB_LIST_END))
716 		return -ENOSPC;
717 	*private_head = gnttab_entry(g);
718 	return g;
719 }
720 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
721 
722 void gnttab_release_grant_reference(grant_ref_t *private_head,
723 				    grant_ref_t release)
724 {
725 	gnttab_entry(release) = *private_head;
726 	*private_head = release;
727 }
728 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
729 
730 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
731 				  void (*fn)(void *), void *arg, u16 count)
732 {
733 	unsigned long flags;
734 	struct gnttab_free_callback *cb;
735 
736 	spin_lock_irqsave(&gnttab_list_lock, flags);
737 
738 	/* Check if the callback is already on the list */
739 	cb = gnttab_free_callback_list;
740 	while (cb) {
741 		if (cb == callback)
742 			goto out;
743 		cb = cb->next;
744 	}
745 
746 	callback->fn = fn;
747 	callback->arg = arg;
748 	callback->count = count;
749 	callback->next = gnttab_free_callback_list;
750 	gnttab_free_callback_list = callback;
751 	check_free_callbacks();
752 out:
753 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
754 }
755 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
756 
757 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
758 {
759 	struct gnttab_free_callback **pcb;
760 	unsigned long flags;
761 
762 	spin_lock_irqsave(&gnttab_list_lock, flags);
763 	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
764 		if (*pcb == callback) {
765 			*pcb = callback->next;
766 			break;
767 		}
768 	}
769 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
770 }
771 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
772 
773 static int grow_gnttab_list(unsigned int more_frames)
774 {
775 	unsigned int new_nr_grant_frames, extra_entries, i;
776 	unsigned int nr_glist_frames, new_nr_glist_frames;
777 
778 	BUG_ON(grefs_per_grant_frame == 0);
779 
780 	new_nr_grant_frames = nr_grant_frames + more_frames;
781 	extra_entries       = more_frames * grefs_per_grant_frame;
782 
783 	nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
784 	new_nr_glist_frames =
785 		(new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
786 	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
787 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
788 		if (!gnttab_list[i])
789 			goto grow_nomem;
790 	}
791 
792 
793 	for (i = grefs_per_grant_frame * nr_grant_frames;
794 	     i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
795 		gnttab_entry(i) = i + 1;
796 
797 	gnttab_entry(i) = gnttab_free_head;
798 	gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
799 	gnttab_free_count += extra_entries;
800 
801 	nr_grant_frames = new_nr_grant_frames;
802 
803 	check_free_callbacks();
804 
805 	return 0;
806 
807 grow_nomem:
808 	for ( ; i >= nr_glist_frames; i--)
809 		free_page((unsigned long) gnttab_list[i]);
810 	return -ENOMEM;
811 }
812 
813 static unsigned int __max_nr_grant_frames(void)
814 {
815 	struct gnttab_query_size query;
816 	int rc;
817 
818 	query.dom = DOMID_SELF;
819 
820 	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
821 	if ((rc < 0) || (query.status != GNTST_okay))
822 		return 4; /* Legacy max supported number of frames */
823 
824 	return query.max_nr_frames;
825 }
826 
827 unsigned int gnttab_max_grant_frames(void)
828 {
829 	unsigned int xen_max = __max_nr_grant_frames();
830 
831 	if (xen_max > boot_max_nr_grant_frames)
832 		return boot_max_nr_grant_frames;
833 	return xen_max;
834 }
835 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
836 
837 /* Handling of paged out grant targets (GNTST_eagain) */
838 #define MAX_DELAY 256
839 static inline void
840 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
841 						const char *func)
842 {
843 	unsigned delay = 1;
844 
845 	do {
846 		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
847 		if (*status == GNTST_eagain)
848 			msleep(delay++);
849 	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
850 
851 	if (delay >= MAX_DELAY) {
852 		pr_err("%s: %s eagain grant\n", func, current->comm);
853 		*status = GNTST_bad_page;
854 	}
855 }
856 
857 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
858 {
859 	struct gnttab_map_grant_ref *op;
860 
861 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
862 		BUG();
863 	for (op = batch; op < batch + count; op++)
864 		if (op->status == GNTST_eagain)
865 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
866 						&op->status, __func__);
867 }
868 EXPORT_SYMBOL_GPL(gnttab_batch_map);
869 
870 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
871 {
872 	struct gnttab_copy *op;
873 
874 	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
875 		BUG();
876 	for (op = batch; op < batch + count; op++)
877 		if (op->status == GNTST_eagain)
878 			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
879 						&op->status, __func__);
880 }
881 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
882 
883 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
884 		    struct gnttab_map_grant_ref *kmap_ops,
885 		    struct page **pages, unsigned int count)
886 {
887 	int i, ret;
888 	bool lazy = false;
889 	pte_t *pte;
890 	unsigned long mfn;
891 
892 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
893 	if (ret)
894 		return ret;
895 
896 	/* Retry eagain maps */
897 	for (i = 0; i < count; i++)
898 		if (map_ops[i].status == GNTST_eagain)
899 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
900 						&map_ops[i].status, __func__);
901 
902 	/* this is basically a nop on x86 */
903 	if (xen_feature(XENFEAT_auto_translated_physmap)) {
904 		for (i = 0; i < count; i++) {
905 			if (map_ops[i].status)
906 				continue;
907 			set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
908 					map_ops[i].dev_bus_addr >> PAGE_SHIFT);
909 		}
910 		return ret;
911 	}
912 
913 	if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
914 		arch_enter_lazy_mmu_mode();
915 		lazy = true;
916 	}
917 
918 	for (i = 0; i < count; i++) {
919 		/* Do not add to override if the map failed. */
920 		if (map_ops[i].status)
921 			continue;
922 
923 		if (map_ops[i].flags & GNTMAP_contains_pte) {
924 			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
925 				(map_ops[i].host_addr & ~PAGE_MASK));
926 			mfn = pte_mfn(*pte);
927 		} else {
928 			mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
929 		}
930 		ret = m2p_add_override(mfn, pages[i], kmap_ops ?
931 				       &kmap_ops[i] : NULL);
932 		if (ret)
933 			return ret;
934 	}
935 
936 	if (lazy)
937 		arch_leave_lazy_mmu_mode();
938 
939 	return ret;
940 }
941 EXPORT_SYMBOL_GPL(gnttab_map_refs);
942 
943 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
944 		      struct gnttab_map_grant_ref *kmap_ops,
945 		      struct page **pages, unsigned int count)
946 {
947 	int i, ret;
948 	bool lazy = false;
949 
950 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
951 	if (ret)
952 		return ret;
953 
954 	/* this is basically a nop on x86 */
955 	if (xen_feature(XENFEAT_auto_translated_physmap)) {
956 		for (i = 0; i < count; i++) {
957 			set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
958 					INVALID_P2M_ENTRY);
959 		}
960 		return ret;
961 	}
962 
963 	if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
964 		arch_enter_lazy_mmu_mode();
965 		lazy = true;
966 	}
967 
968 	for (i = 0; i < count; i++) {
969 		ret = m2p_remove_override(pages[i], kmap_ops ?
970 				       &kmap_ops[i] : NULL);
971 		if (ret)
972 			return ret;
973 	}
974 
975 	if (lazy)
976 		arch_leave_lazy_mmu_mode();
977 
978 	return ret;
979 }
980 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
981 
982 static unsigned nr_status_frames(unsigned nr_grant_frames)
983 {
984 	BUG_ON(grefs_per_grant_frame == 0);
985 	return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
986 }
987 
988 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
989 {
990 	int rc;
991 
992 	rc = arch_gnttab_map_shared(frames, nr_gframes,
993 				    gnttab_max_grant_frames(),
994 				    &gnttab_shared.addr);
995 	BUG_ON(rc);
996 
997 	return 0;
998 }
999 
1000 static void gnttab_unmap_frames_v1(void)
1001 {
1002 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1003 }
1004 
1005 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1006 {
1007 	uint64_t *sframes;
1008 	unsigned int nr_sframes;
1009 	struct gnttab_get_status_frames getframes;
1010 	int rc;
1011 
1012 	nr_sframes = nr_status_frames(nr_gframes);
1013 
1014 	/* No need for kzalloc as it is initialized in following hypercall
1015 	 * GNTTABOP_get_status_frames.
1016 	 */
1017 	sframes = kmalloc(nr_sframes  * sizeof(uint64_t), GFP_ATOMIC);
1018 	if (!sframes)
1019 		return -ENOMEM;
1020 
1021 	getframes.dom        = DOMID_SELF;
1022 	getframes.nr_frames  = nr_sframes;
1023 	set_xen_guest_handle(getframes.frame_list, sframes);
1024 
1025 	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1026 				       &getframes, 1);
1027 	if (rc == -ENOSYS) {
1028 		kfree(sframes);
1029 		return -ENOSYS;
1030 	}
1031 
1032 	BUG_ON(rc || getframes.status);
1033 
1034 	rc = arch_gnttab_map_status(sframes, nr_sframes,
1035 				    nr_status_frames(gnttab_max_grant_frames()),
1036 				    &grstatus);
1037 	BUG_ON(rc);
1038 	kfree(sframes);
1039 
1040 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1041 				    gnttab_max_grant_frames(),
1042 				    &gnttab_shared.addr);
1043 	BUG_ON(rc);
1044 
1045 	return 0;
1046 }
1047 
1048 static void gnttab_unmap_frames_v2(void)
1049 {
1050 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1051 	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1052 }
1053 
1054 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1055 {
1056 	struct gnttab_setup_table setup;
1057 	xen_pfn_t *frames;
1058 	unsigned int nr_gframes = end_idx + 1;
1059 	int rc;
1060 
1061 	if (xen_hvm_domain()) {
1062 		struct xen_add_to_physmap xatp;
1063 		unsigned int i = end_idx;
1064 		rc = 0;
1065 		/*
1066 		 * Loop backwards, so that the first hypercall has the largest
1067 		 * index, ensuring that the table will grow only once.
1068 		 */
1069 		do {
1070 			xatp.domid = DOMID_SELF;
1071 			xatp.idx = i;
1072 			xatp.space = XENMAPSPACE_grant_table;
1073 			xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
1074 			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1075 			if (rc != 0) {
1076 				pr_warn("grant table add_to_physmap failed, err=%d\n",
1077 					rc);
1078 				break;
1079 			}
1080 		} while (i-- > start_idx);
1081 
1082 		return rc;
1083 	}
1084 
1085 	/* No need for kzalloc as it is initialized in following hypercall
1086 	 * GNTTABOP_setup_table.
1087 	 */
1088 	frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
1089 	if (!frames)
1090 		return -ENOMEM;
1091 
1092 	setup.dom        = DOMID_SELF;
1093 	setup.nr_frames  = nr_gframes;
1094 	set_xen_guest_handle(setup.frame_list, frames);
1095 
1096 	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1097 	if (rc == -ENOSYS) {
1098 		kfree(frames);
1099 		return -ENOSYS;
1100 	}
1101 
1102 	BUG_ON(rc || setup.status);
1103 
1104 	rc = gnttab_interface->map_frames(frames, nr_gframes);
1105 
1106 	kfree(frames);
1107 
1108 	return rc;
1109 }
1110 
1111 static struct gnttab_ops gnttab_v1_ops = {
1112 	.map_frames			= gnttab_map_frames_v1,
1113 	.unmap_frames			= gnttab_unmap_frames_v1,
1114 	.update_entry			= gnttab_update_entry_v1,
1115 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1116 	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1117 	.query_foreign_access		= gnttab_query_foreign_access_v1,
1118 };
1119 
1120 static struct gnttab_ops gnttab_v2_ops = {
1121 	.map_frames			= gnttab_map_frames_v2,
1122 	.unmap_frames			= gnttab_unmap_frames_v2,
1123 	.update_entry			= gnttab_update_entry_v2,
1124 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1125 	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1126 	.query_foreign_access		= gnttab_query_foreign_access_v2,
1127 	.update_subpage_entry		= gnttab_update_subpage_entry_v2,
1128 	.update_trans_entry		= gnttab_update_trans_entry_v2,
1129 };
1130 
1131 static void gnttab_request_version(void)
1132 {
1133 	int rc;
1134 	struct gnttab_set_version gsv;
1135 
1136 	if (xen_hvm_domain())
1137 		gsv.version = 1;
1138 	else
1139 		gsv.version = 2;
1140 	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1141 	if (rc == 0 && gsv.version == 2) {
1142 		grant_table_version = 2;
1143 		grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
1144 		gnttab_interface = &gnttab_v2_ops;
1145 	} else if (grant_table_version == 2) {
1146 		/*
1147 		 * If we've already used version 2 features,
1148 		 * but then suddenly discover that they're not
1149 		 * available (e.g. migrating to an older
1150 		 * version of Xen), almost unbounded badness
1151 		 * can happen.
1152 		 */
1153 		panic("we need grant tables version 2, but only version 1 is available");
1154 	} else {
1155 		grant_table_version = 1;
1156 		grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
1157 		gnttab_interface = &gnttab_v1_ops;
1158 	}
1159 	pr_info("Grant tables using version %d layout\n", grant_table_version);
1160 }
1161 
1162 static int gnttab_setup(void)
1163 {
1164 	unsigned int max_nr_gframes;
1165 
1166 	max_nr_gframes = gnttab_max_grant_frames();
1167 	if (max_nr_gframes < nr_grant_frames)
1168 		return -ENOSYS;
1169 
1170 	if (xen_pv_domain())
1171 		return gnttab_map(0, nr_grant_frames - 1);
1172 
1173 	if (gnttab_shared.addr == NULL) {
1174 		gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
1175 						PAGE_SIZE * max_nr_gframes);
1176 		if (gnttab_shared.addr == NULL) {
1177 			pr_warn("Failed to ioremap gnttab share frames!\n");
1178 			return -ENOMEM;
1179 		}
1180 	}
1181 
1182 	gnttab_map(0, nr_grant_frames - 1);
1183 
1184 	return 0;
1185 }
1186 
1187 int gnttab_resume(void)
1188 {
1189 	gnttab_request_version();
1190 	return gnttab_setup();
1191 }
1192 
1193 int gnttab_suspend(void)
1194 {
1195 	gnttab_interface->unmap_frames();
1196 	return 0;
1197 }
1198 
1199 static int gnttab_expand(unsigned int req_entries)
1200 {
1201 	int rc;
1202 	unsigned int cur, extra;
1203 
1204 	BUG_ON(grefs_per_grant_frame == 0);
1205 	cur = nr_grant_frames;
1206 	extra = ((req_entries + (grefs_per_grant_frame-1)) /
1207 		 grefs_per_grant_frame);
1208 	if (cur + extra > gnttab_max_grant_frames())
1209 		return -ENOSPC;
1210 
1211 	rc = gnttab_map(cur, cur + extra - 1);
1212 	if (rc == 0)
1213 		rc = grow_gnttab_list(extra);
1214 
1215 	return rc;
1216 }
1217 
1218 int gnttab_init(void)
1219 {
1220 	int i;
1221 	unsigned int max_nr_glist_frames, nr_glist_frames;
1222 	unsigned int nr_init_grefs;
1223 	int ret;
1224 
1225 	gnttab_request_version();
1226 	nr_grant_frames = 1;
1227 	boot_max_nr_grant_frames = __max_nr_grant_frames();
1228 
1229 	/* Determine the maximum number of frames required for the
1230 	 * grant reference free list on the current hypervisor.
1231 	 */
1232 	BUG_ON(grefs_per_grant_frame == 0);
1233 	max_nr_glist_frames = (boot_max_nr_grant_frames *
1234 			       grefs_per_grant_frame / RPP);
1235 
1236 	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
1237 			      GFP_KERNEL);
1238 	if (gnttab_list == NULL)
1239 		return -ENOMEM;
1240 
1241 	nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
1242 	for (i = 0; i < nr_glist_frames; i++) {
1243 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1244 		if (gnttab_list[i] == NULL) {
1245 			ret = -ENOMEM;
1246 			goto ini_nomem;
1247 		}
1248 	}
1249 
1250 	if (gnttab_setup() < 0) {
1251 		ret = -ENODEV;
1252 		goto ini_nomem;
1253 	}
1254 
1255 	nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
1256 
1257 	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1258 		gnttab_entry(i) = i + 1;
1259 
1260 	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1261 	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1262 	gnttab_free_head  = NR_RESERVED_ENTRIES;
1263 
1264 	printk("Grant table initialized\n");
1265 	return 0;
1266 
1267  ini_nomem:
1268 	for (i--; i >= 0; i--)
1269 		free_page((unsigned long)gnttab_list[i]);
1270 	kfree(gnttab_list);
1271 	return ret;
1272 }
1273 EXPORT_SYMBOL_GPL(gnttab_init);
1274 
1275 static int __gnttab_init(void)
1276 {
1277 	/* Delay grant-table initialization in the PV on HVM case */
1278 	if (xen_hvm_domain())
1279 		return 0;
1280 
1281 	if (!xen_pv_domain())
1282 		return -ENODEV;
1283 
1284 	return gnttab_init();
1285 }
1286 
1287 core_initcall(__gnttab_init);
1288