xref: /openbmc/linux/drivers/xen/grant-table.c (revision 532c2919)
1 /******************************************************************************
2  * grant_table.c
3  *
4  * Granting foreign access to our memory reservation.
5  *
6  * Copyright (c) 2005-2006, Christopher Clark
7  * Copyright (c) 2004-2005, K A Fraser
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version 2
11  * as published by the Free Software Foundation; or, when distributed
12  * separately from the Linux kernel or incorporated into other
13  * software packages, subject to the following license:
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a copy
16  * of this source file (the "Software"), to deal in the Software without
17  * restriction, including without limitation the rights to use, copy, modify,
18  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19  * and to permit persons to whom the Software is furnished to do so, subject to
20  * the following conditions:
21  *
22  * The above copyright notice and this permission notice shall be included in
23  * all copies or substantial portions of the Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31  * IN THE SOFTWARE.
32  */
33 
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35 
36 #include <linux/memblock.h>
37 #include <linux/sched.h>
38 #include <linux/mm.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/uaccess.h>
42 #include <linux/io.h>
43 #include <linux/delay.h>
44 #include <linux/hardirq.h>
45 #include <linux/workqueue.h>
46 #include <linux/ratelimit.h>
47 #include <linux/moduleparam.h>
48 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
49 #include <linux/dma-mapping.h>
50 #endif
51 
52 #include <xen/xen.h>
53 #include <xen/interface/xen.h>
54 #include <xen/page.h>
55 #include <xen/grant_table.h>
56 #include <xen/interface/memory.h>
57 #include <xen/hvc-console.h>
58 #include <xen/swiotlb-xen.h>
59 #include <xen/balloon.h>
60 #ifdef CONFIG_X86
61 #include <asm/xen/cpuid.h>
62 #endif
63 #include <xen/mem-reservation.h>
64 #include <asm/xen/hypercall.h>
65 #include <asm/xen/interface.h>
66 
67 #include <asm/pgtable.h>
68 #include <asm/sync_bitops.h>
69 
70 /* External tools reserve first few grant table entries. */
71 #define NR_RESERVED_ENTRIES 8
72 #define GNTTAB_LIST_END 0xffffffff
73 
74 static grant_ref_t **gnttab_list;
75 static unsigned int nr_grant_frames;
76 static int gnttab_free_count;
77 static grant_ref_t gnttab_free_head;
78 static DEFINE_SPINLOCK(gnttab_list_lock);
79 struct grant_frames xen_auto_xlat_grant_frames;
80 static unsigned int xen_gnttab_version;
81 module_param_named(version, xen_gnttab_version, uint, 0);
82 
83 static union {
84 	struct grant_entry_v1 *v1;
85 	union grant_entry_v2 *v2;
86 	void *addr;
87 } gnttab_shared;
88 
89 /*This is a structure of function pointers for grant table*/
90 struct gnttab_ops {
91 	/*
92 	 * Version of the grant interface.
93 	 */
94 	unsigned int version;
95 	/*
96 	 * Grant refs per grant frame.
97 	 */
98 	unsigned int grefs_per_grant_frame;
99 	/*
100 	 * Mapping a list of frames for storing grant entries. Frames parameter
101 	 * is used to store grant table address when grant table being setup,
102 	 * nr_gframes is the number of frames to map grant table. Returning
103 	 * GNTST_okay means success and negative value means failure.
104 	 */
105 	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
106 	/*
107 	 * Release a list of frames which are mapped in map_frames for grant
108 	 * entry status.
109 	 */
110 	void (*unmap_frames)(void);
111 	/*
112 	 * Introducing a valid entry into the grant table, granting the frame of
113 	 * this grant entry to domain for accessing or transfering. Ref
114 	 * parameter is reference of this introduced grant entry, domid is id of
115 	 * granted domain, frame is the page frame to be granted, and flags is
116 	 * status of the grant entry to be updated.
117 	 */
118 	void (*update_entry)(grant_ref_t ref, domid_t domid,
119 			     unsigned long frame, unsigned flags);
120 	/*
121 	 * Stop granting a grant entry to domain for accessing. Ref parameter is
122 	 * reference of a grant entry whose grant access will be stopped,
123 	 * readonly is not in use in this function. If the grant entry is
124 	 * currently mapped for reading or writing, just return failure(==0)
125 	 * directly and don't tear down the grant access. Otherwise, stop grant
126 	 * access for this entry and return success(==1).
127 	 */
128 	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
129 	/*
130 	 * Stop granting a grant entry to domain for transfer. Ref parameter is
131 	 * reference of a grant entry whose grant transfer will be stopped. If
132 	 * tranfer has not started, just reclaim the grant entry and return
133 	 * failure(==0). Otherwise, wait for the transfer to complete and then
134 	 * return the frame.
135 	 */
136 	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
137 	/*
138 	 * Query the status of a grant entry. Ref parameter is reference of
139 	 * queried grant entry, return value is the status of queried entry.
140 	 * Detailed status(writing/reading) can be gotten from the return value
141 	 * by bit operations.
142 	 */
143 	int (*query_foreign_access)(grant_ref_t ref);
144 };
145 
146 struct unmap_refs_callback_data {
147 	struct completion completion;
148 	int result;
149 };
150 
151 static const struct gnttab_ops *gnttab_interface;
152 
153 /* This reflects status of grant entries, so act as a global value. */
154 static grant_status_t *grstatus;
155 
156 static struct gnttab_free_callback *gnttab_free_callback_list;
157 
158 static int gnttab_expand(unsigned int req_entries);
159 
160 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
161 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
162 
163 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
164 {
165 	return &gnttab_list[(entry) / RPP][(entry) % RPP];
166 }
167 /* This can be used as an l-value */
168 #define gnttab_entry(entry) (*__gnttab_entry(entry))
169 
170 static int get_free_entries(unsigned count)
171 {
172 	unsigned long flags;
173 	int ref, rc = 0;
174 	grant_ref_t head;
175 
176 	spin_lock_irqsave(&gnttab_list_lock, flags);
177 
178 	if ((gnttab_free_count < count) &&
179 	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
180 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
181 		return rc;
182 	}
183 
184 	ref = head = gnttab_free_head;
185 	gnttab_free_count -= count;
186 	while (count-- > 1)
187 		head = gnttab_entry(head);
188 	gnttab_free_head = gnttab_entry(head);
189 	gnttab_entry(head) = GNTTAB_LIST_END;
190 
191 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
192 
193 	return ref;
194 }
195 
196 static void do_free_callbacks(void)
197 {
198 	struct gnttab_free_callback *callback, *next;
199 
200 	callback = gnttab_free_callback_list;
201 	gnttab_free_callback_list = NULL;
202 
203 	while (callback != NULL) {
204 		next = callback->next;
205 		if (gnttab_free_count >= callback->count) {
206 			callback->next = NULL;
207 			callback->fn(callback->arg);
208 		} else {
209 			callback->next = gnttab_free_callback_list;
210 			gnttab_free_callback_list = callback;
211 		}
212 		callback = next;
213 	}
214 }
215 
216 static inline void check_free_callbacks(void)
217 {
218 	if (unlikely(gnttab_free_callback_list))
219 		do_free_callbacks();
220 }
221 
222 static void put_free_entry(grant_ref_t ref)
223 {
224 	unsigned long flags;
225 	spin_lock_irqsave(&gnttab_list_lock, flags);
226 	gnttab_entry(ref) = gnttab_free_head;
227 	gnttab_free_head = ref;
228 	gnttab_free_count++;
229 	check_free_callbacks();
230 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
231 }
232 
233 /*
234  * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
235  * Introducing a valid entry into the grant table:
236  *  1. Write ent->domid.
237  *  2. Write ent->frame:
238  *      GTF_permit_access:   Frame to which access is permitted.
239  *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
240  *                           frame, or zero if none.
241  *  3. Write memory barrier (WMB).
242  *  4. Write ent->flags, inc. valid type.
243  */
244 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
245 				   unsigned long frame, unsigned flags)
246 {
247 	gnttab_shared.v1[ref].domid = domid;
248 	gnttab_shared.v1[ref].frame = frame;
249 	wmb();
250 	gnttab_shared.v1[ref].flags = flags;
251 }
252 
253 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
254 				   unsigned long frame, unsigned int flags)
255 {
256 	gnttab_shared.v2[ref].hdr.domid = domid;
257 	gnttab_shared.v2[ref].full_page.frame = frame;
258 	wmb();	/* Hypervisor concurrent accesses. */
259 	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
260 }
261 
262 /*
263  * Public grant-issuing interface functions
264  */
265 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
266 				     unsigned long frame, int readonly)
267 {
268 	gnttab_interface->update_entry(ref, domid, frame,
269 			   GTF_permit_access | (readonly ? GTF_readonly : 0));
270 }
271 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
272 
273 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
274 				int readonly)
275 {
276 	int ref;
277 
278 	ref = get_free_entries(1);
279 	if (unlikely(ref < 0))
280 		return -ENOSPC;
281 
282 	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
283 
284 	return ref;
285 }
286 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
287 
288 static int gnttab_query_foreign_access_v1(grant_ref_t ref)
289 {
290 	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
291 }
292 
293 static int gnttab_query_foreign_access_v2(grant_ref_t ref)
294 {
295 	return grstatus[ref] & (GTF_reading|GTF_writing);
296 }
297 
298 int gnttab_query_foreign_access(grant_ref_t ref)
299 {
300 	return gnttab_interface->query_foreign_access(ref);
301 }
302 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
303 
304 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
305 {
306 	u16 flags, nflags;
307 	u16 *pflags;
308 
309 	pflags = &gnttab_shared.v1[ref].flags;
310 	nflags = *pflags;
311 	do {
312 		flags = nflags;
313 		if (flags & (GTF_reading|GTF_writing))
314 			return 0;
315 	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
316 
317 	return 1;
318 }
319 
320 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
321 {
322 	gnttab_shared.v2[ref].hdr.flags = 0;
323 	mb();	/* Concurrent access by hypervisor. */
324 	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
325 		return 0;
326 	} else {
327 		/*
328 		 * The read of grstatus needs to have acquire semantics.
329 		 *  On x86, reads already have that, and we just need to
330 		 * protect against compiler reorderings.
331 		 * On other architectures we may need a full barrier.
332 		 */
333 #ifdef CONFIG_X86
334 		barrier();
335 #else
336 		mb();
337 #endif
338 	}
339 
340 	return 1;
341 }
342 
343 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
344 {
345 	return gnttab_interface->end_foreign_access_ref(ref, readonly);
346 }
347 
348 int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
349 {
350 	if (_gnttab_end_foreign_access_ref(ref, readonly))
351 		return 1;
352 	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
353 	return 0;
354 }
355 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
356 
357 struct deferred_entry {
358 	struct list_head list;
359 	grant_ref_t ref;
360 	bool ro;
361 	uint16_t warn_delay;
362 	struct page *page;
363 };
364 static LIST_HEAD(deferred_list);
365 static void gnttab_handle_deferred(struct timer_list *);
366 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
367 
368 static void gnttab_handle_deferred(struct timer_list *unused)
369 {
370 	unsigned int nr = 10;
371 	struct deferred_entry *first = NULL;
372 	unsigned long flags;
373 
374 	spin_lock_irqsave(&gnttab_list_lock, flags);
375 	while (nr--) {
376 		struct deferred_entry *entry
377 			= list_first_entry(&deferred_list,
378 					   struct deferred_entry, list);
379 
380 		if (entry == first)
381 			break;
382 		list_del(&entry->list);
383 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
384 		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
385 			put_free_entry(entry->ref);
386 			if (entry->page) {
387 				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
388 					 entry->ref, page_to_pfn(entry->page));
389 				put_page(entry->page);
390 			} else
391 				pr_info("freeing g.e. %#x\n", entry->ref);
392 			kfree(entry);
393 			entry = NULL;
394 		} else {
395 			if (!--entry->warn_delay)
396 				pr_info("g.e. %#x still pending\n", entry->ref);
397 			if (!first)
398 				first = entry;
399 		}
400 		spin_lock_irqsave(&gnttab_list_lock, flags);
401 		if (entry)
402 			list_add_tail(&entry->list, &deferred_list);
403 		else if (list_empty(&deferred_list))
404 			break;
405 	}
406 	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
407 		deferred_timer.expires = jiffies + HZ;
408 		add_timer(&deferred_timer);
409 	}
410 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
411 }
412 
413 static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
414 				struct page *page)
415 {
416 	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
417 	const char *what = KERN_WARNING "leaking";
418 
419 	if (entry) {
420 		unsigned long flags;
421 
422 		entry->ref = ref;
423 		entry->ro = readonly;
424 		entry->page = page;
425 		entry->warn_delay = 60;
426 		spin_lock_irqsave(&gnttab_list_lock, flags);
427 		list_add_tail(&entry->list, &deferred_list);
428 		if (!timer_pending(&deferred_timer)) {
429 			deferred_timer.expires = jiffies + HZ;
430 			add_timer(&deferred_timer);
431 		}
432 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
433 		what = KERN_DEBUG "deferring";
434 	}
435 	printk("%s g.e. %#x (pfn %#lx)\n",
436 	       what, ref, page ? page_to_pfn(page) : -1);
437 }
438 
439 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
440 			       unsigned long page)
441 {
442 	if (gnttab_end_foreign_access_ref(ref, readonly)) {
443 		put_free_entry(ref);
444 		if (page != 0)
445 			put_page(virt_to_page(page));
446 	} else
447 		gnttab_add_deferred(ref, readonly,
448 				    page ? virt_to_page(page) : NULL);
449 }
450 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
451 
452 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
453 {
454 	int ref;
455 
456 	ref = get_free_entries(1);
457 	if (unlikely(ref < 0))
458 		return -ENOSPC;
459 	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
460 
461 	return ref;
462 }
463 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
464 
465 void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
466 				       unsigned long pfn)
467 {
468 	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
469 }
470 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
471 
472 static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
473 {
474 	unsigned long frame;
475 	u16           flags;
476 	u16          *pflags;
477 
478 	pflags = &gnttab_shared.v1[ref].flags;
479 
480 	/*
481 	 * If a transfer is not even yet started, try to reclaim the grant
482 	 * reference and return failure (== 0).
483 	 */
484 	while (!((flags = *pflags) & GTF_transfer_committed)) {
485 		if (sync_cmpxchg(pflags, flags, 0) == flags)
486 			return 0;
487 		cpu_relax();
488 	}
489 
490 	/* If a transfer is in progress then wait until it is completed. */
491 	while (!(flags & GTF_transfer_completed)) {
492 		flags = *pflags;
493 		cpu_relax();
494 	}
495 
496 	rmb();	/* Read the frame number /after/ reading completion status. */
497 	frame = gnttab_shared.v1[ref].frame;
498 	BUG_ON(frame == 0);
499 
500 	return frame;
501 }
502 
503 static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
504 {
505 	unsigned long frame;
506 	u16           flags;
507 	u16          *pflags;
508 
509 	pflags = &gnttab_shared.v2[ref].hdr.flags;
510 
511 	/*
512 	 * If a transfer is not even yet started, try to reclaim the grant
513 	 * reference and return failure (== 0).
514 	 */
515 	while (!((flags = *pflags) & GTF_transfer_committed)) {
516 		if (sync_cmpxchg(pflags, flags, 0) == flags)
517 			return 0;
518 		cpu_relax();
519 	}
520 
521 	/* If a transfer is in progress then wait until it is completed. */
522 	while (!(flags & GTF_transfer_completed)) {
523 		flags = *pflags;
524 		cpu_relax();
525 	}
526 
527 	rmb();  /* Read the frame number /after/ reading completion status. */
528 	frame = gnttab_shared.v2[ref].full_page.frame;
529 	BUG_ON(frame == 0);
530 
531 	return frame;
532 }
533 
534 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
535 {
536 	return gnttab_interface->end_foreign_transfer_ref(ref);
537 }
538 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
539 
540 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
541 {
542 	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
543 	put_free_entry(ref);
544 	return frame;
545 }
546 EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
547 
548 void gnttab_free_grant_reference(grant_ref_t ref)
549 {
550 	put_free_entry(ref);
551 }
552 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
553 
554 void gnttab_free_grant_references(grant_ref_t head)
555 {
556 	grant_ref_t ref;
557 	unsigned long flags;
558 	int count = 1;
559 	if (head == GNTTAB_LIST_END)
560 		return;
561 	spin_lock_irqsave(&gnttab_list_lock, flags);
562 	ref = head;
563 	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
564 		ref = gnttab_entry(ref);
565 		count++;
566 	}
567 	gnttab_entry(ref) = gnttab_free_head;
568 	gnttab_free_head = head;
569 	gnttab_free_count += count;
570 	check_free_callbacks();
571 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
572 }
573 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
574 
575 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
576 {
577 	int h = get_free_entries(count);
578 
579 	if (h < 0)
580 		return -ENOSPC;
581 
582 	*head = h;
583 
584 	return 0;
585 }
586 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
587 
588 int gnttab_empty_grant_references(const grant_ref_t *private_head)
589 {
590 	return (*private_head == GNTTAB_LIST_END);
591 }
592 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
593 
594 int gnttab_claim_grant_reference(grant_ref_t *private_head)
595 {
596 	grant_ref_t g = *private_head;
597 	if (unlikely(g == GNTTAB_LIST_END))
598 		return -ENOSPC;
599 	*private_head = gnttab_entry(g);
600 	return g;
601 }
602 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
603 
604 void gnttab_release_grant_reference(grant_ref_t *private_head,
605 				    grant_ref_t release)
606 {
607 	gnttab_entry(release) = *private_head;
608 	*private_head = release;
609 }
610 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
611 
612 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
613 				  void (*fn)(void *), void *arg, u16 count)
614 {
615 	unsigned long flags;
616 	struct gnttab_free_callback *cb;
617 
618 	spin_lock_irqsave(&gnttab_list_lock, flags);
619 
620 	/* Check if the callback is already on the list */
621 	cb = gnttab_free_callback_list;
622 	while (cb) {
623 		if (cb == callback)
624 			goto out;
625 		cb = cb->next;
626 	}
627 
628 	callback->fn = fn;
629 	callback->arg = arg;
630 	callback->count = count;
631 	callback->next = gnttab_free_callback_list;
632 	gnttab_free_callback_list = callback;
633 	check_free_callbacks();
634 out:
635 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
636 }
637 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
638 
639 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
640 {
641 	struct gnttab_free_callback **pcb;
642 	unsigned long flags;
643 
644 	spin_lock_irqsave(&gnttab_list_lock, flags);
645 	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
646 		if (*pcb == callback) {
647 			*pcb = callback->next;
648 			break;
649 		}
650 	}
651 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
652 }
653 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
654 
655 static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
656 {
657 	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
658 	       align;
659 }
660 
661 static int grow_gnttab_list(unsigned int more_frames)
662 {
663 	unsigned int new_nr_grant_frames, extra_entries, i;
664 	unsigned int nr_glist_frames, new_nr_glist_frames;
665 	unsigned int grefs_per_frame;
666 
667 	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
668 
669 	new_nr_grant_frames = nr_grant_frames + more_frames;
670 	extra_entries = more_frames * grefs_per_frame;
671 
672 	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
673 	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
674 	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
675 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
676 		if (!gnttab_list[i])
677 			goto grow_nomem;
678 	}
679 
680 
681 	for (i = grefs_per_frame * nr_grant_frames;
682 	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
683 		gnttab_entry(i) = i + 1;
684 
685 	gnttab_entry(i) = gnttab_free_head;
686 	gnttab_free_head = grefs_per_frame * nr_grant_frames;
687 	gnttab_free_count += extra_entries;
688 
689 	nr_grant_frames = new_nr_grant_frames;
690 
691 	check_free_callbacks();
692 
693 	return 0;
694 
695 grow_nomem:
696 	while (i-- > nr_glist_frames)
697 		free_page((unsigned long) gnttab_list[i]);
698 	return -ENOMEM;
699 }
700 
701 static unsigned int __max_nr_grant_frames(void)
702 {
703 	struct gnttab_query_size query;
704 	int rc;
705 
706 	query.dom = DOMID_SELF;
707 
708 	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
709 	if ((rc < 0) || (query.status != GNTST_okay))
710 		return 4; /* Legacy max supported number of frames */
711 
712 	return query.max_nr_frames;
713 }
714 
715 unsigned int gnttab_max_grant_frames(void)
716 {
717 	unsigned int xen_max = __max_nr_grant_frames();
718 	static unsigned int boot_max_nr_grant_frames;
719 
720 	/* First time, initialize it properly. */
721 	if (!boot_max_nr_grant_frames)
722 		boot_max_nr_grant_frames = __max_nr_grant_frames();
723 
724 	if (xen_max > boot_max_nr_grant_frames)
725 		return boot_max_nr_grant_frames;
726 	return xen_max;
727 }
728 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
729 
730 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
731 {
732 	xen_pfn_t *pfn;
733 	unsigned int max_nr_gframes = __max_nr_grant_frames();
734 	unsigned int i;
735 	void *vaddr;
736 
737 	if (xen_auto_xlat_grant_frames.count)
738 		return -EINVAL;
739 
740 	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
741 	if (vaddr == NULL) {
742 		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
743 			&addr);
744 		return -ENOMEM;
745 	}
746 	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
747 	if (!pfn) {
748 		xen_unmap(vaddr);
749 		return -ENOMEM;
750 	}
751 	for (i = 0; i < max_nr_gframes; i++)
752 		pfn[i] = XEN_PFN_DOWN(addr) + i;
753 
754 	xen_auto_xlat_grant_frames.vaddr = vaddr;
755 	xen_auto_xlat_grant_frames.pfn = pfn;
756 	xen_auto_xlat_grant_frames.count = max_nr_gframes;
757 
758 	return 0;
759 }
760 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
761 
762 void gnttab_free_auto_xlat_frames(void)
763 {
764 	if (!xen_auto_xlat_grant_frames.count)
765 		return;
766 	kfree(xen_auto_xlat_grant_frames.pfn);
767 	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
768 
769 	xen_auto_xlat_grant_frames.pfn = NULL;
770 	xen_auto_xlat_grant_frames.count = 0;
771 	xen_auto_xlat_grant_frames.vaddr = NULL;
772 }
773 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
774 
775 int gnttab_pages_set_private(int nr_pages, struct page **pages)
776 {
777 	int i;
778 
779 	for (i = 0; i < nr_pages; i++) {
780 #if BITS_PER_LONG < 64
781 		struct xen_page_foreign *foreign;
782 
783 		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
784 		if (!foreign)
785 			return -ENOMEM;
786 
787 		set_page_private(pages[i], (unsigned long)foreign);
788 #endif
789 		SetPagePrivate(pages[i]);
790 	}
791 
792 	return 0;
793 }
794 EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
795 
796 /**
797  * gnttab_alloc_pages - alloc pages suitable for grant mapping into
798  * @nr_pages: number of pages to alloc
799  * @pages: returns the pages
800  */
801 int gnttab_alloc_pages(int nr_pages, struct page **pages)
802 {
803 	int ret;
804 
805 	ret = alloc_xenballooned_pages(nr_pages, pages);
806 	if (ret < 0)
807 		return ret;
808 
809 	ret = gnttab_pages_set_private(nr_pages, pages);
810 	if (ret < 0)
811 		gnttab_free_pages(nr_pages, pages);
812 
813 	return ret;
814 }
815 EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
816 
817 void gnttab_pages_clear_private(int nr_pages, struct page **pages)
818 {
819 	int i;
820 
821 	for (i = 0; i < nr_pages; i++) {
822 		if (PagePrivate(pages[i])) {
823 #if BITS_PER_LONG < 64
824 			kfree((void *)page_private(pages[i]));
825 #endif
826 			ClearPagePrivate(pages[i]);
827 		}
828 	}
829 }
830 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
831 
832 /**
833  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
834  * @nr_pages; number of pages to free
835  * @pages: the pages
836  */
837 void gnttab_free_pages(int nr_pages, struct page **pages)
838 {
839 	gnttab_pages_clear_private(nr_pages, pages);
840 	free_xenballooned_pages(nr_pages, pages);
841 }
842 EXPORT_SYMBOL_GPL(gnttab_free_pages);
843 
844 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
845 /**
846  * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
847  * @args: arguments to the function
848  */
849 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
850 {
851 	unsigned long pfn, start_pfn;
852 	size_t size;
853 	int i, ret;
854 
855 	size = args->nr_pages << PAGE_SHIFT;
856 	if (args->coherent)
857 		args->vaddr = dma_alloc_coherent(args->dev, size,
858 						 &args->dev_bus_addr,
859 						 GFP_KERNEL | __GFP_NOWARN);
860 	else
861 		args->vaddr = dma_alloc_wc(args->dev, size,
862 					   &args->dev_bus_addr,
863 					   GFP_KERNEL | __GFP_NOWARN);
864 	if (!args->vaddr) {
865 		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
866 		return -ENOMEM;
867 	}
868 
869 	start_pfn = __phys_to_pfn(args->dev_bus_addr);
870 	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
871 			pfn++, i++) {
872 		struct page *page = pfn_to_page(pfn);
873 
874 		args->pages[i] = page;
875 		args->frames[i] = xen_page_to_gfn(page);
876 		xenmem_reservation_scrub_page(page);
877 	}
878 
879 	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
880 
881 	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
882 	if (ret != args->nr_pages) {
883 		pr_debug("Failed to decrease reservation for DMA buffer\n");
884 		ret = -EFAULT;
885 		goto fail;
886 	}
887 
888 	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
889 	if (ret < 0)
890 		goto fail;
891 
892 	return 0;
893 
894 fail:
895 	gnttab_dma_free_pages(args);
896 	return ret;
897 }
898 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
899 
900 /**
901  * gnttab_dma_free_pages - free DMAable pages
902  * @args: arguments to the function
903  */
904 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
905 {
906 	size_t size;
907 	int i, ret;
908 
909 	gnttab_pages_clear_private(args->nr_pages, args->pages);
910 
911 	for (i = 0; i < args->nr_pages; i++)
912 		args->frames[i] = page_to_xen_pfn(args->pages[i]);
913 
914 	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
915 	if (ret != args->nr_pages) {
916 		pr_debug("Failed to increase reservation for DMA buffer\n");
917 		ret = -EFAULT;
918 	} else {
919 		ret = 0;
920 	}
921 
922 	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
923 					     args->frames);
924 
925 	size = args->nr_pages << PAGE_SHIFT;
926 	if (args->coherent)
927 		dma_free_coherent(args->dev, size,
928 				  args->vaddr, args->dev_bus_addr);
929 	else
930 		dma_free_wc(args->dev, size,
931 			    args->vaddr, args->dev_bus_addr);
932 	return ret;
933 }
934 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
935 #endif
936 
937 /* Handling of paged out grant targets (GNTST_eagain) */
938 #define MAX_DELAY 256
939 static inline void
940 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
941 						const char *func)
942 {
943 	unsigned delay = 1;
944 
945 	do {
946 		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
947 		if (*status == GNTST_eagain)
948 			msleep(delay++);
949 	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
950 
951 	if (delay >= MAX_DELAY) {
952 		pr_err("%s: %s eagain grant\n", func, current->comm);
953 		*status = GNTST_bad_page;
954 	}
955 }
956 
957 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
958 {
959 	struct gnttab_map_grant_ref *op;
960 
961 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
962 		BUG();
963 	for (op = batch; op < batch + count; op++)
964 		if (op->status == GNTST_eagain)
965 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
966 						&op->status, __func__);
967 }
968 EXPORT_SYMBOL_GPL(gnttab_batch_map);
969 
970 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
971 {
972 	struct gnttab_copy *op;
973 
974 	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
975 		BUG();
976 	for (op = batch; op < batch + count; op++)
977 		if (op->status == GNTST_eagain)
978 			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
979 						&op->status, __func__);
980 }
981 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
982 
983 void gnttab_foreach_grant_in_range(struct page *page,
984 				   unsigned int offset,
985 				   unsigned int len,
986 				   xen_grant_fn_t fn,
987 				   void *data)
988 {
989 	unsigned int goffset;
990 	unsigned int glen;
991 	unsigned long xen_pfn;
992 
993 	len = min_t(unsigned int, PAGE_SIZE - offset, len);
994 	goffset = xen_offset_in_page(offset);
995 
996 	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
997 
998 	while (len) {
999 		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1000 		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1001 
1002 		goffset = 0;
1003 		xen_pfn++;
1004 		len -= glen;
1005 	}
1006 }
1007 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1008 
1009 void gnttab_foreach_grant(struct page **pages,
1010 			  unsigned int nr_grefs,
1011 			  xen_grant_fn_t fn,
1012 			  void *data)
1013 {
1014 	unsigned int goffset = 0;
1015 	unsigned long xen_pfn = 0;
1016 	unsigned int i;
1017 
1018 	for (i = 0; i < nr_grefs; i++) {
1019 		if ((i % XEN_PFN_PER_PAGE) == 0) {
1020 			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1021 			goffset = 0;
1022 		}
1023 
1024 		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1025 
1026 		goffset += XEN_PAGE_SIZE;
1027 		xen_pfn++;
1028 	}
1029 }
1030 
1031 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1032 		    struct gnttab_map_grant_ref *kmap_ops,
1033 		    struct page **pages, unsigned int count)
1034 {
1035 	int i, ret;
1036 
1037 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1038 	if (ret)
1039 		return ret;
1040 
1041 	for (i = 0; i < count; i++) {
1042 		switch (map_ops[i].status) {
1043 		case GNTST_okay:
1044 		{
1045 			struct xen_page_foreign *foreign;
1046 
1047 			SetPageForeign(pages[i]);
1048 			foreign = xen_page_foreign(pages[i]);
1049 			foreign->domid = map_ops[i].dom;
1050 			foreign->gref = map_ops[i].ref;
1051 			break;
1052 		}
1053 
1054 		case GNTST_no_device_space:
1055 			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1056 			break;
1057 
1058 		case GNTST_eagain:
1059 			/* Retry eagain maps */
1060 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1061 						map_ops + i,
1062 						&map_ops[i].status, __func__);
1063 			/* Test status in next loop iteration. */
1064 			i--;
1065 			break;
1066 
1067 		default:
1068 			break;
1069 		}
1070 	}
1071 
1072 	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1073 }
1074 EXPORT_SYMBOL_GPL(gnttab_map_refs);
1075 
1076 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1077 		      struct gnttab_unmap_grant_ref *kunmap_ops,
1078 		      struct page **pages, unsigned int count)
1079 {
1080 	unsigned int i;
1081 	int ret;
1082 
1083 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1084 	if (ret)
1085 		return ret;
1086 
1087 	for (i = 0; i < count; i++)
1088 		ClearPageForeign(pages[i]);
1089 
1090 	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1091 }
1092 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1093 
1094 #define GNTTAB_UNMAP_REFS_DELAY 5
1095 
1096 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1097 
1098 static void gnttab_unmap_work(struct work_struct *work)
1099 {
1100 	struct gntab_unmap_queue_data
1101 		*unmap_data = container_of(work,
1102 					   struct gntab_unmap_queue_data,
1103 					   gnttab_work.work);
1104 	if (unmap_data->age != UINT_MAX)
1105 		unmap_data->age++;
1106 	__gnttab_unmap_refs_async(unmap_data);
1107 }
1108 
1109 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1110 {
1111 	int ret;
1112 	int pc;
1113 
1114 	for (pc = 0; pc < item->count; pc++) {
1115 		if (page_count(item->pages[pc]) > 1) {
1116 			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1117 			schedule_delayed_work(&item->gnttab_work,
1118 					      msecs_to_jiffies(delay));
1119 			return;
1120 		}
1121 	}
1122 
1123 	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1124 				item->pages, item->count);
1125 	item->done(ret, item);
1126 }
1127 
1128 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1129 {
1130 	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1131 	item->age = 0;
1132 
1133 	__gnttab_unmap_refs_async(item);
1134 }
1135 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1136 
1137 static void unmap_refs_callback(int result,
1138 		struct gntab_unmap_queue_data *data)
1139 {
1140 	struct unmap_refs_callback_data *d = data->data;
1141 
1142 	d->result = result;
1143 	complete(&d->completion);
1144 }
1145 
1146 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1147 {
1148 	struct unmap_refs_callback_data data;
1149 
1150 	init_completion(&data.completion);
1151 	item->data = &data;
1152 	item->done = &unmap_refs_callback;
1153 	gnttab_unmap_refs_async(item);
1154 	wait_for_completion(&data.completion);
1155 
1156 	return data.result;
1157 }
1158 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1159 
1160 static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1161 {
1162 	return gnttab_frames(nr_grant_frames, SPP);
1163 }
1164 
1165 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1166 {
1167 	int rc;
1168 
1169 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1170 				    gnttab_max_grant_frames(),
1171 				    &gnttab_shared.addr);
1172 	BUG_ON(rc);
1173 
1174 	return 0;
1175 }
1176 
1177 static void gnttab_unmap_frames_v1(void)
1178 {
1179 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1180 }
1181 
1182 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1183 {
1184 	uint64_t *sframes;
1185 	unsigned int nr_sframes;
1186 	struct gnttab_get_status_frames getframes;
1187 	int rc;
1188 
1189 	nr_sframes = nr_status_frames(nr_gframes);
1190 
1191 	/* No need for kzalloc as it is initialized in following hypercall
1192 	 * GNTTABOP_get_status_frames.
1193 	 */
1194 	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1195 	if (!sframes)
1196 		return -ENOMEM;
1197 
1198 	getframes.dom        = DOMID_SELF;
1199 	getframes.nr_frames  = nr_sframes;
1200 	set_xen_guest_handle(getframes.frame_list, sframes);
1201 
1202 	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1203 				       &getframes, 1);
1204 	if (rc == -ENOSYS) {
1205 		kfree(sframes);
1206 		return -ENOSYS;
1207 	}
1208 
1209 	BUG_ON(rc || getframes.status);
1210 
1211 	rc = arch_gnttab_map_status(sframes, nr_sframes,
1212 				    nr_status_frames(gnttab_max_grant_frames()),
1213 				    &grstatus);
1214 	BUG_ON(rc);
1215 	kfree(sframes);
1216 
1217 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1218 				    gnttab_max_grant_frames(),
1219 				    &gnttab_shared.addr);
1220 	BUG_ON(rc);
1221 
1222 	return 0;
1223 }
1224 
1225 static void gnttab_unmap_frames_v2(void)
1226 {
1227 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1228 	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1229 }
1230 
1231 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1232 {
1233 	struct gnttab_setup_table setup;
1234 	xen_pfn_t *frames;
1235 	unsigned int nr_gframes = end_idx + 1;
1236 	int rc;
1237 
1238 	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1239 		struct xen_add_to_physmap xatp;
1240 		unsigned int i = end_idx;
1241 		rc = 0;
1242 		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1243 		/*
1244 		 * Loop backwards, so that the first hypercall has the largest
1245 		 * index, ensuring that the table will grow only once.
1246 		 */
1247 		do {
1248 			xatp.domid = DOMID_SELF;
1249 			xatp.idx = i;
1250 			xatp.space = XENMAPSPACE_grant_table;
1251 			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1252 			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1253 			if (rc != 0) {
1254 				pr_warn("grant table add_to_physmap failed, err=%d\n",
1255 					rc);
1256 				break;
1257 			}
1258 		} while (i-- > start_idx);
1259 
1260 		return rc;
1261 	}
1262 
1263 	/* No need for kzalloc as it is initialized in following hypercall
1264 	 * GNTTABOP_setup_table.
1265 	 */
1266 	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1267 	if (!frames)
1268 		return -ENOMEM;
1269 
1270 	setup.dom        = DOMID_SELF;
1271 	setup.nr_frames  = nr_gframes;
1272 	set_xen_guest_handle(setup.frame_list, frames);
1273 
1274 	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1275 	if (rc == -ENOSYS) {
1276 		kfree(frames);
1277 		return -ENOSYS;
1278 	}
1279 
1280 	BUG_ON(rc || setup.status);
1281 
1282 	rc = gnttab_interface->map_frames(frames, nr_gframes);
1283 
1284 	kfree(frames);
1285 
1286 	return rc;
1287 }
1288 
1289 static const struct gnttab_ops gnttab_v1_ops = {
1290 	.version			= 1,
1291 	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1292 					  sizeof(struct grant_entry_v1),
1293 	.map_frames			= gnttab_map_frames_v1,
1294 	.unmap_frames			= gnttab_unmap_frames_v1,
1295 	.update_entry			= gnttab_update_entry_v1,
1296 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1297 	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1298 	.query_foreign_access		= gnttab_query_foreign_access_v1,
1299 };
1300 
1301 static const struct gnttab_ops gnttab_v2_ops = {
1302 	.version			= 2,
1303 	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1304 					  sizeof(union grant_entry_v2),
1305 	.map_frames			= gnttab_map_frames_v2,
1306 	.unmap_frames			= gnttab_unmap_frames_v2,
1307 	.update_entry			= gnttab_update_entry_v2,
1308 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1309 	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1310 	.query_foreign_access		= gnttab_query_foreign_access_v2,
1311 };
1312 
1313 static bool gnttab_need_v2(void)
1314 {
1315 #ifdef CONFIG_X86
1316 	uint32_t base, width;
1317 
1318 	if (xen_pv_domain()) {
1319 		base = xen_cpuid_base();
1320 		if (cpuid_eax(base) < 5)
1321 			return false;	/* Information not available, use V1. */
1322 		width = cpuid_ebx(base + 5) &
1323 			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1324 		return width > 32 + PAGE_SHIFT;
1325 	}
1326 #endif
1327 	return !!(max_possible_pfn >> 32);
1328 }
1329 
1330 static void gnttab_request_version(void)
1331 {
1332 	long rc;
1333 	struct gnttab_set_version gsv;
1334 
1335 	if (gnttab_need_v2())
1336 		gsv.version = 2;
1337 	else
1338 		gsv.version = 1;
1339 
1340 	/* Boot parameter overrides automatic selection. */
1341 	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1342 		gsv.version = xen_gnttab_version;
1343 
1344 	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1345 	if (rc == 0 && gsv.version == 2)
1346 		gnttab_interface = &gnttab_v2_ops;
1347 	else
1348 		gnttab_interface = &gnttab_v1_ops;
1349 	pr_info("Grant tables using version %d layout\n",
1350 		gnttab_interface->version);
1351 }
1352 
1353 static int gnttab_setup(void)
1354 {
1355 	unsigned int max_nr_gframes;
1356 
1357 	max_nr_gframes = gnttab_max_grant_frames();
1358 	if (max_nr_gframes < nr_grant_frames)
1359 		return -ENOSYS;
1360 
1361 	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1362 		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1363 		if (gnttab_shared.addr == NULL) {
1364 			pr_warn("gnttab share frames is not mapped!\n");
1365 			return -ENOMEM;
1366 		}
1367 	}
1368 	return gnttab_map(0, nr_grant_frames - 1);
1369 }
1370 
1371 int gnttab_resume(void)
1372 {
1373 	gnttab_request_version();
1374 	return gnttab_setup();
1375 }
1376 
1377 int gnttab_suspend(void)
1378 {
1379 	if (!xen_feature(XENFEAT_auto_translated_physmap))
1380 		gnttab_interface->unmap_frames();
1381 	return 0;
1382 }
1383 
1384 static int gnttab_expand(unsigned int req_entries)
1385 {
1386 	int rc;
1387 	unsigned int cur, extra;
1388 
1389 	cur = nr_grant_frames;
1390 	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1391 		 gnttab_interface->grefs_per_grant_frame);
1392 	if (cur + extra > gnttab_max_grant_frames()) {
1393 		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1394 				    " cur=%u extra=%u limit=%u"
1395 				    " gnttab_free_count=%u req_entries=%u\n",
1396 				    cur, extra, gnttab_max_grant_frames(),
1397 				    gnttab_free_count, req_entries);
1398 		return -ENOSPC;
1399 	}
1400 
1401 	rc = gnttab_map(cur, cur + extra - 1);
1402 	if (rc == 0)
1403 		rc = grow_gnttab_list(extra);
1404 
1405 	return rc;
1406 }
1407 
1408 int gnttab_init(void)
1409 {
1410 	int i;
1411 	unsigned long max_nr_grant_frames;
1412 	unsigned int max_nr_glist_frames, nr_glist_frames;
1413 	unsigned int nr_init_grefs;
1414 	int ret;
1415 
1416 	gnttab_request_version();
1417 	max_nr_grant_frames = gnttab_max_grant_frames();
1418 	nr_grant_frames = 1;
1419 
1420 	/* Determine the maximum number of frames required for the
1421 	 * grant reference free list on the current hypervisor.
1422 	 */
1423 	max_nr_glist_frames = (max_nr_grant_frames *
1424 			       gnttab_interface->grefs_per_grant_frame / RPP);
1425 
1426 	gnttab_list = kmalloc_array(max_nr_glist_frames,
1427 				    sizeof(grant_ref_t *),
1428 				    GFP_KERNEL);
1429 	if (gnttab_list == NULL)
1430 		return -ENOMEM;
1431 
1432 	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1433 	for (i = 0; i < nr_glist_frames; i++) {
1434 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1435 		if (gnttab_list[i] == NULL) {
1436 			ret = -ENOMEM;
1437 			goto ini_nomem;
1438 		}
1439 	}
1440 
1441 	ret = arch_gnttab_init(max_nr_grant_frames,
1442 			       nr_status_frames(max_nr_grant_frames));
1443 	if (ret < 0)
1444 		goto ini_nomem;
1445 
1446 	if (gnttab_setup() < 0) {
1447 		ret = -ENODEV;
1448 		goto ini_nomem;
1449 	}
1450 
1451 	nr_init_grefs = nr_grant_frames *
1452 			gnttab_interface->grefs_per_grant_frame;
1453 
1454 	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1455 		gnttab_entry(i) = i + 1;
1456 
1457 	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1458 	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1459 	gnttab_free_head  = NR_RESERVED_ENTRIES;
1460 
1461 	printk("Grant table initialized\n");
1462 	return 0;
1463 
1464  ini_nomem:
1465 	for (i--; i >= 0; i--)
1466 		free_page((unsigned long)gnttab_list[i]);
1467 	kfree(gnttab_list);
1468 	return ret;
1469 }
1470 EXPORT_SYMBOL_GPL(gnttab_init);
1471 
1472 static int __gnttab_init(void)
1473 {
1474 	if (!xen_domain())
1475 		return -ENODEV;
1476 
1477 	/* Delay grant-table initialization in the PV on HVM case */
1478 	if (xen_hvm_domain() && !xen_pvh_domain())
1479 		return 0;
1480 
1481 	return gnttab_init();
1482 }
1483 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1484  * beforehand to initialize xen_auto_xlat_grant_frames. */
1485 core_initcall_sync(__gnttab_init);
1486