xref: /openbmc/linux/drivers/xen/grant-table.c (revision caf83e49)
1 /******************************************************************************
2  * grant_table.c
3  *
4  * Granting foreign access to our memory reservation.
5  *
6  * Copyright (c) 2005-2006, Christopher Clark
7  * Copyright (c) 2004-2005, K A Fraser
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version 2
11  * as published by the Free Software Foundation; or, when distributed
12  * separately from the Linux kernel or incorporated into other
13  * software packages, subject to the following license:
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a copy
16  * of this source file (the "Software"), to deal in the Software without
17  * restriction, including without limitation the rights to use, copy, modify,
18  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19  * and to permit persons to whom the Software is furnished to do so, subject to
20  * the following conditions:
21  *
22  * The above copyright notice and this permission notice shall be included in
23  * all copies or substantial portions of the Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31  * IN THE SOFTWARE.
32  */
33 
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35 
36 #include <linux/memblock.h>
37 #include <linux/sched.h>
38 #include <linux/mm.h>
39 #include <linux/slab.h>
40 #include <linux/vmalloc.h>
41 #include <linux/uaccess.h>
42 #include <linux/io.h>
43 #include <linux/delay.h>
44 #include <linux/hardirq.h>
45 #include <linux/workqueue.h>
46 #include <linux/ratelimit.h>
47 #include <linux/moduleparam.h>
48 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
49 #include <linux/dma-mapping.h>
50 #endif
51 
52 #include <xen/xen.h>
53 #include <xen/interface/xen.h>
54 #include <xen/page.h>
55 #include <xen/grant_table.h>
56 #include <xen/interface/memory.h>
57 #include <xen/hvc-console.h>
58 #include <xen/swiotlb-xen.h>
59 #include <xen/balloon.h>
60 #ifdef CONFIG_X86
61 #include <asm/xen/cpuid.h>
62 #endif
63 #include <xen/mem-reservation.h>
64 #include <asm/xen/hypercall.h>
65 #include <asm/xen/interface.h>
66 
67 #include <asm/sync_bitops.h>
68 
69 /* External tools reserve first few grant table entries. */
70 #define NR_RESERVED_ENTRIES 8
71 #define GNTTAB_LIST_END 0xffffffff
72 
73 static grant_ref_t **gnttab_list;
74 static unsigned int nr_grant_frames;
75 static int gnttab_free_count;
76 static grant_ref_t gnttab_free_head;
77 static DEFINE_SPINLOCK(gnttab_list_lock);
78 struct grant_frames xen_auto_xlat_grant_frames;
79 static unsigned int xen_gnttab_version;
80 module_param_named(version, xen_gnttab_version, uint, 0);
81 
82 static union {
83 	struct grant_entry_v1 *v1;
84 	union grant_entry_v2 *v2;
85 	void *addr;
86 } gnttab_shared;
87 
88 /*This is a structure of function pointers for grant table*/
89 struct gnttab_ops {
90 	/*
91 	 * Version of the grant interface.
92 	 */
93 	unsigned int version;
94 	/*
95 	 * Grant refs per grant frame.
96 	 */
97 	unsigned int grefs_per_grant_frame;
98 	/*
99 	 * Mapping a list of frames for storing grant entries. Frames parameter
100 	 * is used to store grant table address when grant table being setup,
101 	 * nr_gframes is the number of frames to map grant table. Returning
102 	 * GNTST_okay means success and negative value means failure.
103 	 */
104 	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
105 	/*
106 	 * Release a list of frames which are mapped in map_frames for grant
107 	 * entry status.
108 	 */
109 	void (*unmap_frames)(void);
110 	/*
111 	 * Introducing a valid entry into the grant table, granting the frame of
112 	 * this grant entry to domain for accessing. Ref
113 	 * parameter is reference of this introduced grant entry, domid is id of
114 	 * granted domain, frame is the page frame to be granted, and flags is
115 	 * status of the grant entry to be updated.
116 	 */
117 	void (*update_entry)(grant_ref_t ref, domid_t domid,
118 			     unsigned long frame, unsigned flags);
119 	/*
120 	 * Stop granting a grant entry to domain for accessing. Ref parameter is
121 	 * reference of a grant entry whose grant access will be stopped.
122 	 * If the grant entry is currently mapped for reading or writing, just
123 	 * return failure(==0) directly and don't tear down the grant access.
124 	 * Otherwise, stop grant access for this entry and return success(==1).
125 	 */
126 	int (*end_foreign_access_ref)(grant_ref_t ref);
127 	/*
128 	 * Read the frame number related to a given grant reference.
129 	 */
130 	unsigned long (*read_frame)(grant_ref_t ref);
131 };
132 
133 struct unmap_refs_callback_data {
134 	struct completion completion;
135 	int result;
136 };
137 
138 static const struct gnttab_ops *gnttab_interface;
139 
140 /* This reflects status of grant entries, so act as a global value. */
141 static grant_status_t *grstatus;
142 
143 static struct gnttab_free_callback *gnttab_free_callback_list;
144 
145 static int gnttab_expand(unsigned int req_entries);
146 
147 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
148 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
149 
150 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
151 {
152 	return &gnttab_list[(entry) / RPP][(entry) % RPP];
153 }
154 /* This can be used as an l-value */
155 #define gnttab_entry(entry) (*__gnttab_entry(entry))
156 
157 static int get_free_entries(unsigned count)
158 {
159 	unsigned long flags;
160 	int ref, rc = 0;
161 	grant_ref_t head;
162 
163 	spin_lock_irqsave(&gnttab_list_lock, flags);
164 
165 	if ((gnttab_free_count < count) &&
166 	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
167 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
168 		return rc;
169 	}
170 
171 	ref = head = gnttab_free_head;
172 	gnttab_free_count -= count;
173 	while (count-- > 1)
174 		head = gnttab_entry(head);
175 	gnttab_free_head = gnttab_entry(head);
176 	gnttab_entry(head) = GNTTAB_LIST_END;
177 
178 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
179 
180 	return ref;
181 }
182 
183 static void do_free_callbacks(void)
184 {
185 	struct gnttab_free_callback *callback, *next;
186 
187 	callback = gnttab_free_callback_list;
188 	gnttab_free_callback_list = NULL;
189 
190 	while (callback != NULL) {
191 		next = callback->next;
192 		if (gnttab_free_count >= callback->count) {
193 			callback->next = NULL;
194 			callback->fn(callback->arg);
195 		} else {
196 			callback->next = gnttab_free_callback_list;
197 			gnttab_free_callback_list = callback;
198 		}
199 		callback = next;
200 	}
201 }
202 
203 static inline void check_free_callbacks(void)
204 {
205 	if (unlikely(gnttab_free_callback_list))
206 		do_free_callbacks();
207 }
208 
209 static void put_free_entry(grant_ref_t ref)
210 {
211 	unsigned long flags;
212 	spin_lock_irqsave(&gnttab_list_lock, flags);
213 	gnttab_entry(ref) = gnttab_free_head;
214 	gnttab_free_head = ref;
215 	gnttab_free_count++;
216 	check_free_callbacks();
217 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
218 }
219 
220 /*
221  * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
222  * Introducing a valid entry into the grant table:
223  *  1. Write ent->domid.
224  *  2. Write ent->frame: Frame to which access is permitted.
225  *  3. Write memory barrier (WMB).
226  *  4. Write ent->flags, inc. valid type.
227  */
228 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
229 				   unsigned long frame, unsigned flags)
230 {
231 	gnttab_shared.v1[ref].domid = domid;
232 	gnttab_shared.v1[ref].frame = frame;
233 	wmb();
234 	gnttab_shared.v1[ref].flags = flags;
235 }
236 
237 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
238 				   unsigned long frame, unsigned int flags)
239 {
240 	gnttab_shared.v2[ref].hdr.domid = domid;
241 	gnttab_shared.v2[ref].full_page.frame = frame;
242 	wmb();	/* Hypervisor concurrent accesses. */
243 	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
244 }
245 
246 /*
247  * Public grant-issuing interface functions
248  */
249 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
250 				     unsigned long frame, int readonly)
251 {
252 	gnttab_interface->update_entry(ref, domid, frame,
253 			   GTF_permit_access | (readonly ? GTF_readonly : 0));
254 }
255 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
256 
257 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
258 				int readonly)
259 {
260 	int ref;
261 
262 	ref = get_free_entries(1);
263 	if (unlikely(ref < 0))
264 		return -ENOSPC;
265 
266 	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
267 
268 	return ref;
269 }
270 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
271 
272 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
273 {
274 	u16 flags, nflags;
275 	u16 *pflags;
276 
277 	pflags = &gnttab_shared.v1[ref].flags;
278 	nflags = *pflags;
279 	do {
280 		flags = nflags;
281 		if (flags & (GTF_reading|GTF_writing))
282 			return 0;
283 	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
284 
285 	return 1;
286 }
287 
288 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
289 {
290 	gnttab_shared.v2[ref].hdr.flags = 0;
291 	mb();	/* Concurrent access by hypervisor. */
292 	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
293 		return 0;
294 	} else {
295 		/*
296 		 * The read of grstatus needs to have acquire semantics.
297 		 *  On x86, reads already have that, and we just need to
298 		 * protect against compiler reorderings.
299 		 * On other architectures we may need a full barrier.
300 		 */
301 #ifdef CONFIG_X86
302 		barrier();
303 #else
304 		mb();
305 #endif
306 	}
307 
308 	return 1;
309 }
310 
311 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
312 {
313 	return gnttab_interface->end_foreign_access_ref(ref);
314 }
315 
316 int gnttab_end_foreign_access_ref(grant_ref_t ref)
317 {
318 	if (_gnttab_end_foreign_access_ref(ref))
319 		return 1;
320 	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
321 	return 0;
322 }
323 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
324 
325 static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
326 {
327 	return gnttab_shared.v1[ref].frame;
328 }
329 
330 static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
331 {
332 	return gnttab_shared.v2[ref].full_page.frame;
333 }
334 
335 struct deferred_entry {
336 	struct list_head list;
337 	grant_ref_t ref;
338 	uint16_t warn_delay;
339 	struct page *page;
340 };
341 static LIST_HEAD(deferred_list);
342 static void gnttab_handle_deferred(struct timer_list *);
343 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
344 
345 static void gnttab_handle_deferred(struct timer_list *unused)
346 {
347 	unsigned int nr = 10;
348 	struct deferred_entry *first = NULL;
349 	unsigned long flags;
350 
351 	spin_lock_irqsave(&gnttab_list_lock, flags);
352 	while (nr--) {
353 		struct deferred_entry *entry
354 			= list_first_entry(&deferred_list,
355 					   struct deferred_entry, list);
356 
357 		if (entry == first)
358 			break;
359 		list_del(&entry->list);
360 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
361 		if (_gnttab_end_foreign_access_ref(entry->ref)) {
362 			put_free_entry(entry->ref);
363 			pr_debug("freeing g.e. %#x (pfn %#lx)\n",
364 				 entry->ref, page_to_pfn(entry->page));
365 			put_page(entry->page);
366 			kfree(entry);
367 			entry = NULL;
368 		} else {
369 			if (!--entry->warn_delay)
370 				pr_info("g.e. %#x still pending\n", entry->ref);
371 			if (!first)
372 				first = entry;
373 		}
374 		spin_lock_irqsave(&gnttab_list_lock, flags);
375 		if (entry)
376 			list_add_tail(&entry->list, &deferred_list);
377 		else if (list_empty(&deferred_list))
378 			break;
379 	}
380 	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
381 		deferred_timer.expires = jiffies + HZ;
382 		add_timer(&deferred_timer);
383 	}
384 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
385 }
386 
387 static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
388 {
389 	struct deferred_entry *entry;
390 	gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
391 	const char *what = KERN_WARNING "leaking";
392 
393 	entry = kmalloc(sizeof(*entry), gfp);
394 	if (!page) {
395 		unsigned long gfn = gnttab_interface->read_frame(ref);
396 
397 		page = pfn_to_page(gfn_to_pfn(gfn));
398 		get_page(page);
399 	}
400 
401 	if (entry) {
402 		unsigned long flags;
403 
404 		entry->ref = ref;
405 		entry->page = page;
406 		entry->warn_delay = 60;
407 		spin_lock_irqsave(&gnttab_list_lock, flags);
408 		list_add_tail(&entry->list, &deferred_list);
409 		if (!timer_pending(&deferred_timer)) {
410 			deferred_timer.expires = jiffies + HZ;
411 			add_timer(&deferred_timer);
412 		}
413 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
414 		what = KERN_DEBUG "deferring";
415 	}
416 	printk("%s g.e. %#x (pfn %#lx)\n",
417 	       what, ref, page ? page_to_pfn(page) : -1);
418 }
419 
420 int gnttab_try_end_foreign_access(grant_ref_t ref)
421 {
422 	int ret = _gnttab_end_foreign_access_ref(ref);
423 
424 	if (ret)
425 		put_free_entry(ref);
426 
427 	return ret;
428 }
429 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
430 
431 void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
432 {
433 	if (gnttab_try_end_foreign_access(ref)) {
434 		if (page != 0)
435 			put_page(virt_to_page(page));
436 	} else
437 		gnttab_add_deferred(ref, page ? virt_to_page(page) : NULL);
438 }
439 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
440 
441 void gnttab_free_grant_reference(grant_ref_t ref)
442 {
443 	put_free_entry(ref);
444 }
445 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
446 
447 void gnttab_free_grant_references(grant_ref_t head)
448 {
449 	grant_ref_t ref;
450 	unsigned long flags;
451 	int count = 1;
452 	if (head == GNTTAB_LIST_END)
453 		return;
454 	spin_lock_irqsave(&gnttab_list_lock, flags);
455 	ref = head;
456 	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
457 		ref = gnttab_entry(ref);
458 		count++;
459 	}
460 	gnttab_entry(ref) = gnttab_free_head;
461 	gnttab_free_head = head;
462 	gnttab_free_count += count;
463 	check_free_callbacks();
464 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
465 }
466 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
467 
468 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
469 {
470 	int h = get_free_entries(count);
471 
472 	if (h < 0)
473 		return -ENOSPC;
474 
475 	*head = h;
476 
477 	return 0;
478 }
479 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
480 
481 int gnttab_empty_grant_references(const grant_ref_t *private_head)
482 {
483 	return (*private_head == GNTTAB_LIST_END);
484 }
485 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
486 
487 int gnttab_claim_grant_reference(grant_ref_t *private_head)
488 {
489 	grant_ref_t g = *private_head;
490 	if (unlikely(g == GNTTAB_LIST_END))
491 		return -ENOSPC;
492 	*private_head = gnttab_entry(g);
493 	return g;
494 }
495 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
496 
497 void gnttab_release_grant_reference(grant_ref_t *private_head,
498 				    grant_ref_t release)
499 {
500 	gnttab_entry(release) = *private_head;
501 	*private_head = release;
502 }
503 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
504 
505 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
506 				  void (*fn)(void *), void *arg, u16 count)
507 {
508 	unsigned long flags;
509 	struct gnttab_free_callback *cb;
510 
511 	spin_lock_irqsave(&gnttab_list_lock, flags);
512 
513 	/* Check if the callback is already on the list */
514 	cb = gnttab_free_callback_list;
515 	while (cb) {
516 		if (cb == callback)
517 			goto out;
518 		cb = cb->next;
519 	}
520 
521 	callback->fn = fn;
522 	callback->arg = arg;
523 	callback->count = count;
524 	callback->next = gnttab_free_callback_list;
525 	gnttab_free_callback_list = callback;
526 	check_free_callbacks();
527 out:
528 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
529 }
530 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
531 
532 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
533 {
534 	struct gnttab_free_callback **pcb;
535 	unsigned long flags;
536 
537 	spin_lock_irqsave(&gnttab_list_lock, flags);
538 	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
539 		if (*pcb == callback) {
540 			*pcb = callback->next;
541 			break;
542 		}
543 	}
544 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
545 }
546 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
547 
548 static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
549 {
550 	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
551 	       align;
552 }
553 
554 static int grow_gnttab_list(unsigned int more_frames)
555 {
556 	unsigned int new_nr_grant_frames, extra_entries, i;
557 	unsigned int nr_glist_frames, new_nr_glist_frames;
558 	unsigned int grefs_per_frame;
559 
560 	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
561 
562 	new_nr_grant_frames = nr_grant_frames + more_frames;
563 	extra_entries = more_frames * grefs_per_frame;
564 
565 	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
566 	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
567 	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
568 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
569 		if (!gnttab_list[i])
570 			goto grow_nomem;
571 	}
572 
573 
574 	for (i = grefs_per_frame * nr_grant_frames;
575 	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
576 		gnttab_entry(i) = i + 1;
577 
578 	gnttab_entry(i) = gnttab_free_head;
579 	gnttab_free_head = grefs_per_frame * nr_grant_frames;
580 	gnttab_free_count += extra_entries;
581 
582 	nr_grant_frames = new_nr_grant_frames;
583 
584 	check_free_callbacks();
585 
586 	return 0;
587 
588 grow_nomem:
589 	while (i-- > nr_glist_frames)
590 		free_page((unsigned long) gnttab_list[i]);
591 	return -ENOMEM;
592 }
593 
594 static unsigned int __max_nr_grant_frames(void)
595 {
596 	struct gnttab_query_size query;
597 	int rc;
598 
599 	query.dom = DOMID_SELF;
600 
601 	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
602 	if ((rc < 0) || (query.status != GNTST_okay))
603 		return 4; /* Legacy max supported number of frames */
604 
605 	return query.max_nr_frames;
606 }
607 
608 unsigned int gnttab_max_grant_frames(void)
609 {
610 	unsigned int xen_max = __max_nr_grant_frames();
611 	static unsigned int boot_max_nr_grant_frames;
612 
613 	/* First time, initialize it properly. */
614 	if (!boot_max_nr_grant_frames)
615 		boot_max_nr_grant_frames = __max_nr_grant_frames();
616 
617 	if (xen_max > boot_max_nr_grant_frames)
618 		return boot_max_nr_grant_frames;
619 	return xen_max;
620 }
621 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
622 
623 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
624 {
625 	xen_pfn_t *pfn;
626 	unsigned int max_nr_gframes = __max_nr_grant_frames();
627 	unsigned int i;
628 	void *vaddr;
629 
630 	if (xen_auto_xlat_grant_frames.count)
631 		return -EINVAL;
632 
633 	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
634 	if (vaddr == NULL) {
635 		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
636 			&addr);
637 		return -ENOMEM;
638 	}
639 	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
640 	if (!pfn) {
641 		xen_unmap(vaddr);
642 		return -ENOMEM;
643 	}
644 	for (i = 0; i < max_nr_gframes; i++)
645 		pfn[i] = XEN_PFN_DOWN(addr) + i;
646 
647 	xen_auto_xlat_grant_frames.vaddr = vaddr;
648 	xen_auto_xlat_grant_frames.pfn = pfn;
649 	xen_auto_xlat_grant_frames.count = max_nr_gframes;
650 
651 	return 0;
652 }
653 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
654 
655 void gnttab_free_auto_xlat_frames(void)
656 {
657 	if (!xen_auto_xlat_grant_frames.count)
658 		return;
659 	kfree(xen_auto_xlat_grant_frames.pfn);
660 	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
661 
662 	xen_auto_xlat_grant_frames.pfn = NULL;
663 	xen_auto_xlat_grant_frames.count = 0;
664 	xen_auto_xlat_grant_frames.vaddr = NULL;
665 }
666 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
667 
668 int gnttab_pages_set_private(int nr_pages, struct page **pages)
669 {
670 	int i;
671 
672 	for (i = 0; i < nr_pages; i++) {
673 #if BITS_PER_LONG < 64
674 		struct xen_page_foreign *foreign;
675 
676 		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
677 		if (!foreign)
678 			return -ENOMEM;
679 
680 		set_page_private(pages[i], (unsigned long)foreign);
681 #endif
682 		SetPagePrivate(pages[i]);
683 	}
684 
685 	return 0;
686 }
687 EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
688 
689 /**
690  * gnttab_alloc_pages - alloc pages suitable for grant mapping into
691  * @nr_pages: number of pages to alloc
692  * @pages: returns the pages
693  */
694 int gnttab_alloc_pages(int nr_pages, struct page **pages)
695 {
696 	int ret;
697 
698 	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
699 	if (ret < 0)
700 		return ret;
701 
702 	ret = gnttab_pages_set_private(nr_pages, pages);
703 	if (ret < 0)
704 		gnttab_free_pages(nr_pages, pages);
705 
706 	return ret;
707 }
708 EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
709 
710 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
711 static inline void cache_init(struct gnttab_page_cache *cache)
712 {
713 	cache->pages = NULL;
714 }
715 
716 static inline bool cache_empty(struct gnttab_page_cache *cache)
717 {
718 	return !cache->pages;
719 }
720 
721 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
722 {
723 	struct page *page;
724 
725 	page = cache->pages;
726 	cache->pages = page->zone_device_data;
727 
728 	return page;
729 }
730 
731 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
732 {
733 	page->zone_device_data = cache->pages;
734 	cache->pages = page;
735 }
736 #else
737 static inline void cache_init(struct gnttab_page_cache *cache)
738 {
739 	INIT_LIST_HEAD(&cache->pages);
740 }
741 
742 static inline bool cache_empty(struct gnttab_page_cache *cache)
743 {
744 	return list_empty(&cache->pages);
745 }
746 
747 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
748 {
749 	struct page *page;
750 
751 	page = list_first_entry(&cache->pages, struct page, lru);
752 	list_del(&page->lru);
753 
754 	return page;
755 }
756 
757 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
758 {
759 	list_add(&page->lru, &cache->pages);
760 }
761 #endif
762 
763 void gnttab_page_cache_init(struct gnttab_page_cache *cache)
764 {
765 	spin_lock_init(&cache->lock);
766 	cache_init(cache);
767 	cache->num_pages = 0;
768 }
769 EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
770 
771 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
772 {
773 	unsigned long flags;
774 
775 	spin_lock_irqsave(&cache->lock, flags);
776 
777 	if (cache_empty(cache)) {
778 		spin_unlock_irqrestore(&cache->lock, flags);
779 		return gnttab_alloc_pages(1, page);
780 	}
781 
782 	page[0] = cache_deq(cache);
783 	cache->num_pages--;
784 
785 	spin_unlock_irqrestore(&cache->lock, flags);
786 
787 	return 0;
788 }
789 EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
790 
791 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
792 			   unsigned int num)
793 {
794 	unsigned long flags;
795 	unsigned int i;
796 
797 	spin_lock_irqsave(&cache->lock, flags);
798 
799 	for (i = 0; i < num; i++)
800 		cache_enq(cache, page[i]);
801 	cache->num_pages += num;
802 
803 	spin_unlock_irqrestore(&cache->lock, flags);
804 }
805 EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
806 
807 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
808 {
809 	struct page *page[10];
810 	unsigned int i = 0;
811 	unsigned long flags;
812 
813 	spin_lock_irqsave(&cache->lock, flags);
814 
815 	while (cache->num_pages > num) {
816 		page[i] = cache_deq(cache);
817 		cache->num_pages--;
818 		if (++i == ARRAY_SIZE(page)) {
819 			spin_unlock_irqrestore(&cache->lock, flags);
820 			gnttab_free_pages(i, page);
821 			i = 0;
822 			spin_lock_irqsave(&cache->lock, flags);
823 		}
824 	}
825 
826 	spin_unlock_irqrestore(&cache->lock, flags);
827 
828 	if (i != 0)
829 		gnttab_free_pages(i, page);
830 }
831 EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
832 
833 void gnttab_pages_clear_private(int nr_pages, struct page **pages)
834 {
835 	int i;
836 
837 	for (i = 0; i < nr_pages; i++) {
838 		if (PagePrivate(pages[i])) {
839 #if BITS_PER_LONG < 64
840 			kfree((void *)page_private(pages[i]));
841 #endif
842 			ClearPagePrivate(pages[i]);
843 		}
844 	}
845 }
846 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
847 
848 /**
849  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
850  * @nr_pages; number of pages to free
851  * @pages: the pages
852  */
853 void gnttab_free_pages(int nr_pages, struct page **pages)
854 {
855 	gnttab_pages_clear_private(nr_pages, pages);
856 	xen_free_unpopulated_pages(nr_pages, pages);
857 }
858 EXPORT_SYMBOL_GPL(gnttab_free_pages);
859 
860 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
861 /**
862  * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
863  * @args: arguments to the function
864  */
865 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
866 {
867 	unsigned long pfn, start_pfn;
868 	size_t size;
869 	int i, ret;
870 
871 	size = args->nr_pages << PAGE_SHIFT;
872 	if (args->coherent)
873 		args->vaddr = dma_alloc_coherent(args->dev, size,
874 						 &args->dev_bus_addr,
875 						 GFP_KERNEL | __GFP_NOWARN);
876 	else
877 		args->vaddr = dma_alloc_wc(args->dev, size,
878 					   &args->dev_bus_addr,
879 					   GFP_KERNEL | __GFP_NOWARN);
880 	if (!args->vaddr) {
881 		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
882 		return -ENOMEM;
883 	}
884 
885 	start_pfn = __phys_to_pfn(args->dev_bus_addr);
886 	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
887 			pfn++, i++) {
888 		struct page *page = pfn_to_page(pfn);
889 
890 		args->pages[i] = page;
891 		args->frames[i] = xen_page_to_gfn(page);
892 		xenmem_reservation_scrub_page(page);
893 	}
894 
895 	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
896 
897 	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
898 	if (ret != args->nr_pages) {
899 		pr_debug("Failed to decrease reservation for DMA buffer\n");
900 		ret = -EFAULT;
901 		goto fail;
902 	}
903 
904 	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
905 	if (ret < 0)
906 		goto fail;
907 
908 	return 0;
909 
910 fail:
911 	gnttab_dma_free_pages(args);
912 	return ret;
913 }
914 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
915 
916 /**
917  * gnttab_dma_free_pages - free DMAable pages
918  * @args: arguments to the function
919  */
920 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
921 {
922 	size_t size;
923 	int i, ret;
924 
925 	gnttab_pages_clear_private(args->nr_pages, args->pages);
926 
927 	for (i = 0; i < args->nr_pages; i++)
928 		args->frames[i] = page_to_xen_pfn(args->pages[i]);
929 
930 	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
931 	if (ret != args->nr_pages) {
932 		pr_debug("Failed to increase reservation for DMA buffer\n");
933 		ret = -EFAULT;
934 	} else {
935 		ret = 0;
936 	}
937 
938 	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
939 					     args->frames);
940 
941 	size = args->nr_pages << PAGE_SHIFT;
942 	if (args->coherent)
943 		dma_free_coherent(args->dev, size,
944 				  args->vaddr, args->dev_bus_addr);
945 	else
946 		dma_free_wc(args->dev, size,
947 			    args->vaddr, args->dev_bus_addr);
948 	return ret;
949 }
950 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
951 #endif
952 
953 /* Handling of paged out grant targets (GNTST_eagain) */
954 #define MAX_DELAY 256
955 static inline void
956 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
957 						const char *func)
958 {
959 	unsigned delay = 1;
960 
961 	do {
962 		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
963 		if (*status == GNTST_eagain)
964 			msleep(delay++);
965 	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
966 
967 	if (delay >= MAX_DELAY) {
968 		pr_err("%s: %s eagain grant\n", func, current->comm);
969 		*status = GNTST_bad_page;
970 	}
971 }
972 
973 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
974 {
975 	struct gnttab_map_grant_ref *op;
976 
977 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
978 		BUG();
979 	for (op = batch; op < batch + count; op++)
980 		if (op->status == GNTST_eagain)
981 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
982 						&op->status, __func__);
983 }
984 EXPORT_SYMBOL_GPL(gnttab_batch_map);
985 
986 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
987 {
988 	struct gnttab_copy *op;
989 
990 	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
991 		BUG();
992 	for (op = batch; op < batch + count; op++)
993 		if (op->status == GNTST_eagain)
994 			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
995 						&op->status, __func__);
996 }
997 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
998 
999 void gnttab_foreach_grant_in_range(struct page *page,
1000 				   unsigned int offset,
1001 				   unsigned int len,
1002 				   xen_grant_fn_t fn,
1003 				   void *data)
1004 {
1005 	unsigned int goffset;
1006 	unsigned int glen;
1007 	unsigned long xen_pfn;
1008 
1009 	len = min_t(unsigned int, PAGE_SIZE - offset, len);
1010 	goffset = xen_offset_in_page(offset);
1011 
1012 	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1013 
1014 	while (len) {
1015 		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1016 		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1017 
1018 		goffset = 0;
1019 		xen_pfn++;
1020 		len -= glen;
1021 	}
1022 }
1023 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1024 
1025 void gnttab_foreach_grant(struct page **pages,
1026 			  unsigned int nr_grefs,
1027 			  xen_grant_fn_t fn,
1028 			  void *data)
1029 {
1030 	unsigned int goffset = 0;
1031 	unsigned long xen_pfn = 0;
1032 	unsigned int i;
1033 
1034 	for (i = 0; i < nr_grefs; i++) {
1035 		if ((i % XEN_PFN_PER_PAGE) == 0) {
1036 			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1037 			goffset = 0;
1038 		}
1039 
1040 		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1041 
1042 		goffset += XEN_PAGE_SIZE;
1043 		xen_pfn++;
1044 	}
1045 }
1046 
1047 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1048 		    struct gnttab_map_grant_ref *kmap_ops,
1049 		    struct page **pages, unsigned int count)
1050 {
1051 	int i, ret;
1052 
1053 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1054 	if (ret)
1055 		return ret;
1056 
1057 	for (i = 0; i < count; i++) {
1058 		switch (map_ops[i].status) {
1059 		case GNTST_okay:
1060 		{
1061 			struct xen_page_foreign *foreign;
1062 
1063 			SetPageForeign(pages[i]);
1064 			foreign = xen_page_foreign(pages[i]);
1065 			foreign->domid = map_ops[i].dom;
1066 			foreign->gref = map_ops[i].ref;
1067 			break;
1068 		}
1069 
1070 		case GNTST_no_device_space:
1071 			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1072 			break;
1073 
1074 		case GNTST_eagain:
1075 			/* Retry eagain maps */
1076 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1077 						map_ops + i,
1078 						&map_ops[i].status, __func__);
1079 			/* Test status in next loop iteration. */
1080 			i--;
1081 			break;
1082 
1083 		default:
1084 			break;
1085 		}
1086 	}
1087 
1088 	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1089 }
1090 EXPORT_SYMBOL_GPL(gnttab_map_refs);
1091 
1092 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1093 		      struct gnttab_unmap_grant_ref *kunmap_ops,
1094 		      struct page **pages, unsigned int count)
1095 {
1096 	unsigned int i;
1097 	int ret;
1098 
1099 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1100 	if (ret)
1101 		return ret;
1102 
1103 	for (i = 0; i < count; i++)
1104 		ClearPageForeign(pages[i]);
1105 
1106 	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1107 }
1108 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1109 
1110 #define GNTTAB_UNMAP_REFS_DELAY 5
1111 
1112 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1113 
1114 static void gnttab_unmap_work(struct work_struct *work)
1115 {
1116 	struct gntab_unmap_queue_data
1117 		*unmap_data = container_of(work,
1118 					   struct gntab_unmap_queue_data,
1119 					   gnttab_work.work);
1120 	if (unmap_data->age != UINT_MAX)
1121 		unmap_data->age++;
1122 	__gnttab_unmap_refs_async(unmap_data);
1123 }
1124 
1125 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1126 {
1127 	int ret;
1128 	int pc;
1129 
1130 	for (pc = 0; pc < item->count; pc++) {
1131 		if (page_count(item->pages[pc]) > 1) {
1132 			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1133 			schedule_delayed_work(&item->gnttab_work,
1134 					      msecs_to_jiffies(delay));
1135 			return;
1136 		}
1137 	}
1138 
1139 	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1140 				item->pages, item->count);
1141 	item->done(ret, item);
1142 }
1143 
1144 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1145 {
1146 	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1147 	item->age = 0;
1148 
1149 	__gnttab_unmap_refs_async(item);
1150 }
1151 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1152 
1153 static void unmap_refs_callback(int result,
1154 		struct gntab_unmap_queue_data *data)
1155 {
1156 	struct unmap_refs_callback_data *d = data->data;
1157 
1158 	d->result = result;
1159 	complete(&d->completion);
1160 }
1161 
1162 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1163 {
1164 	struct unmap_refs_callback_data data;
1165 
1166 	init_completion(&data.completion);
1167 	item->data = &data;
1168 	item->done = &unmap_refs_callback;
1169 	gnttab_unmap_refs_async(item);
1170 	wait_for_completion(&data.completion);
1171 
1172 	return data.result;
1173 }
1174 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1175 
1176 static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1177 {
1178 	return gnttab_frames(nr_grant_frames, SPP);
1179 }
1180 
1181 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1182 {
1183 	int rc;
1184 
1185 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1186 				    gnttab_max_grant_frames(),
1187 				    &gnttab_shared.addr);
1188 	BUG_ON(rc);
1189 
1190 	return 0;
1191 }
1192 
1193 static void gnttab_unmap_frames_v1(void)
1194 {
1195 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1196 }
1197 
1198 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1199 {
1200 	uint64_t *sframes;
1201 	unsigned int nr_sframes;
1202 	struct gnttab_get_status_frames getframes;
1203 	int rc;
1204 
1205 	nr_sframes = nr_status_frames(nr_gframes);
1206 
1207 	/* No need for kzalloc as it is initialized in following hypercall
1208 	 * GNTTABOP_get_status_frames.
1209 	 */
1210 	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1211 	if (!sframes)
1212 		return -ENOMEM;
1213 
1214 	getframes.dom        = DOMID_SELF;
1215 	getframes.nr_frames  = nr_sframes;
1216 	set_xen_guest_handle(getframes.frame_list, sframes);
1217 
1218 	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1219 				       &getframes, 1);
1220 	if (rc == -ENOSYS) {
1221 		kfree(sframes);
1222 		return -ENOSYS;
1223 	}
1224 
1225 	BUG_ON(rc || getframes.status);
1226 
1227 	rc = arch_gnttab_map_status(sframes, nr_sframes,
1228 				    nr_status_frames(gnttab_max_grant_frames()),
1229 				    &grstatus);
1230 	BUG_ON(rc);
1231 	kfree(sframes);
1232 
1233 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1234 				    gnttab_max_grant_frames(),
1235 				    &gnttab_shared.addr);
1236 	BUG_ON(rc);
1237 
1238 	return 0;
1239 }
1240 
1241 static void gnttab_unmap_frames_v2(void)
1242 {
1243 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1244 	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1245 }
1246 
1247 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1248 {
1249 	struct gnttab_setup_table setup;
1250 	xen_pfn_t *frames;
1251 	unsigned int nr_gframes = end_idx + 1;
1252 	int rc;
1253 
1254 	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1255 		struct xen_add_to_physmap xatp;
1256 		unsigned int i = end_idx;
1257 		rc = 0;
1258 		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1259 		/*
1260 		 * Loop backwards, so that the first hypercall has the largest
1261 		 * index, ensuring that the table will grow only once.
1262 		 */
1263 		do {
1264 			xatp.domid = DOMID_SELF;
1265 			xatp.idx = i;
1266 			xatp.space = XENMAPSPACE_grant_table;
1267 			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1268 			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1269 			if (rc != 0) {
1270 				pr_warn("grant table add_to_physmap failed, err=%d\n",
1271 					rc);
1272 				break;
1273 			}
1274 		} while (i-- > start_idx);
1275 
1276 		return rc;
1277 	}
1278 
1279 	/* No need for kzalloc as it is initialized in following hypercall
1280 	 * GNTTABOP_setup_table.
1281 	 */
1282 	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1283 	if (!frames)
1284 		return -ENOMEM;
1285 
1286 	setup.dom        = DOMID_SELF;
1287 	setup.nr_frames  = nr_gframes;
1288 	set_xen_guest_handle(setup.frame_list, frames);
1289 
1290 	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1291 	if (rc == -ENOSYS) {
1292 		kfree(frames);
1293 		return -ENOSYS;
1294 	}
1295 
1296 	BUG_ON(rc || setup.status);
1297 
1298 	rc = gnttab_interface->map_frames(frames, nr_gframes);
1299 
1300 	kfree(frames);
1301 
1302 	return rc;
1303 }
1304 
1305 static const struct gnttab_ops gnttab_v1_ops = {
1306 	.version			= 1,
1307 	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1308 					  sizeof(struct grant_entry_v1),
1309 	.map_frames			= gnttab_map_frames_v1,
1310 	.unmap_frames			= gnttab_unmap_frames_v1,
1311 	.update_entry			= gnttab_update_entry_v1,
1312 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1313 	.read_frame			= gnttab_read_frame_v1,
1314 };
1315 
1316 static const struct gnttab_ops gnttab_v2_ops = {
1317 	.version			= 2,
1318 	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1319 					  sizeof(union grant_entry_v2),
1320 	.map_frames			= gnttab_map_frames_v2,
1321 	.unmap_frames			= gnttab_unmap_frames_v2,
1322 	.update_entry			= gnttab_update_entry_v2,
1323 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1324 	.read_frame			= gnttab_read_frame_v2,
1325 };
1326 
1327 static bool gnttab_need_v2(void)
1328 {
1329 #ifdef CONFIG_X86
1330 	uint32_t base, width;
1331 
1332 	if (xen_pv_domain()) {
1333 		base = xen_cpuid_base();
1334 		if (cpuid_eax(base) < 5)
1335 			return false;	/* Information not available, use V1. */
1336 		width = cpuid_ebx(base + 5) &
1337 			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1338 		return width > 32 + PAGE_SHIFT;
1339 	}
1340 #endif
1341 	return !!(max_possible_pfn >> 32);
1342 }
1343 
1344 static void gnttab_request_version(void)
1345 {
1346 	long rc;
1347 	struct gnttab_set_version gsv;
1348 
1349 	if (gnttab_need_v2())
1350 		gsv.version = 2;
1351 	else
1352 		gsv.version = 1;
1353 
1354 	/* Boot parameter overrides automatic selection. */
1355 	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1356 		gsv.version = xen_gnttab_version;
1357 
1358 	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1359 	if (rc == 0 && gsv.version == 2)
1360 		gnttab_interface = &gnttab_v2_ops;
1361 	else
1362 		gnttab_interface = &gnttab_v1_ops;
1363 	pr_info("Grant tables using version %d layout\n",
1364 		gnttab_interface->version);
1365 }
1366 
1367 static int gnttab_setup(void)
1368 {
1369 	unsigned int max_nr_gframes;
1370 
1371 	max_nr_gframes = gnttab_max_grant_frames();
1372 	if (max_nr_gframes < nr_grant_frames)
1373 		return -ENOSYS;
1374 
1375 	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1376 		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1377 		if (gnttab_shared.addr == NULL) {
1378 			pr_warn("gnttab share frames is not mapped!\n");
1379 			return -ENOMEM;
1380 		}
1381 	}
1382 	return gnttab_map(0, nr_grant_frames - 1);
1383 }
1384 
1385 int gnttab_resume(void)
1386 {
1387 	gnttab_request_version();
1388 	return gnttab_setup();
1389 }
1390 
1391 int gnttab_suspend(void)
1392 {
1393 	if (!xen_feature(XENFEAT_auto_translated_physmap))
1394 		gnttab_interface->unmap_frames();
1395 	return 0;
1396 }
1397 
1398 static int gnttab_expand(unsigned int req_entries)
1399 {
1400 	int rc;
1401 	unsigned int cur, extra;
1402 
1403 	cur = nr_grant_frames;
1404 	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1405 		 gnttab_interface->grefs_per_grant_frame);
1406 	if (cur + extra > gnttab_max_grant_frames()) {
1407 		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1408 				    " cur=%u extra=%u limit=%u"
1409 				    " gnttab_free_count=%u req_entries=%u\n",
1410 				    cur, extra, gnttab_max_grant_frames(),
1411 				    gnttab_free_count, req_entries);
1412 		return -ENOSPC;
1413 	}
1414 
1415 	rc = gnttab_map(cur, cur + extra - 1);
1416 	if (rc == 0)
1417 		rc = grow_gnttab_list(extra);
1418 
1419 	return rc;
1420 }
1421 
1422 int gnttab_init(void)
1423 {
1424 	int i;
1425 	unsigned long max_nr_grant_frames;
1426 	unsigned int max_nr_glist_frames, nr_glist_frames;
1427 	unsigned int nr_init_grefs;
1428 	int ret;
1429 
1430 	gnttab_request_version();
1431 	max_nr_grant_frames = gnttab_max_grant_frames();
1432 	nr_grant_frames = 1;
1433 
1434 	/* Determine the maximum number of frames required for the
1435 	 * grant reference free list on the current hypervisor.
1436 	 */
1437 	max_nr_glist_frames = (max_nr_grant_frames *
1438 			       gnttab_interface->grefs_per_grant_frame / RPP);
1439 
1440 	gnttab_list = kmalloc_array(max_nr_glist_frames,
1441 				    sizeof(grant_ref_t *),
1442 				    GFP_KERNEL);
1443 	if (gnttab_list == NULL)
1444 		return -ENOMEM;
1445 
1446 	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1447 	for (i = 0; i < nr_glist_frames; i++) {
1448 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1449 		if (gnttab_list[i] == NULL) {
1450 			ret = -ENOMEM;
1451 			goto ini_nomem;
1452 		}
1453 	}
1454 
1455 	ret = arch_gnttab_init(max_nr_grant_frames,
1456 			       nr_status_frames(max_nr_grant_frames));
1457 	if (ret < 0)
1458 		goto ini_nomem;
1459 
1460 	if (gnttab_setup() < 0) {
1461 		ret = -ENODEV;
1462 		goto ini_nomem;
1463 	}
1464 
1465 	nr_init_grefs = nr_grant_frames *
1466 			gnttab_interface->grefs_per_grant_frame;
1467 
1468 	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1469 		gnttab_entry(i) = i + 1;
1470 
1471 	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1472 	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1473 	gnttab_free_head  = NR_RESERVED_ENTRIES;
1474 
1475 	printk("Grant table initialized\n");
1476 	return 0;
1477 
1478  ini_nomem:
1479 	for (i--; i >= 0; i--)
1480 		free_page((unsigned long)gnttab_list[i]);
1481 	kfree(gnttab_list);
1482 	return ret;
1483 }
1484 EXPORT_SYMBOL_GPL(gnttab_init);
1485 
1486 static int __gnttab_init(void)
1487 {
1488 	if (!xen_domain())
1489 		return -ENODEV;
1490 
1491 	/* Delay grant-table initialization in the PV on HVM case */
1492 	if (xen_hvm_domain() && !xen_pvh_domain())
1493 		return 0;
1494 
1495 	return gnttab_init();
1496 }
1497 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1498  * beforehand to initialize xen_auto_xlat_grant_frames. */
1499 core_initcall_sync(__gnttab_init);
1500