xref: /openbmc/linux/drivers/xen/grant-table.c (revision 603c09f2)
1 /******************************************************************************
2  * grant_table.c
3  *
4  * Granting foreign access to our memory reservation.
5  *
6  * Copyright (c) 2005-2006, Christopher Clark
7  * Copyright (c) 2004-2005, K A Fraser
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License version 2
11  * as published by the Free Software Foundation; or, when distributed
12  * separately from the Linux kernel or incorporated into other
13  * software packages, subject to the following license:
14  *
15  * Permission is hereby granted, free of charge, to any person obtaining a copy
16  * of this source file (the "Software"), to deal in the Software without
17  * restriction, including without limitation the rights to use, copy, modify,
18  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19  * and to permit persons to whom the Software is furnished to do so, subject to
20  * the following conditions:
21  *
22  * The above copyright notice and this permission notice shall be included in
23  * all copies or substantial portions of the Software.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31  * IN THE SOFTWARE.
32  */
33 
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35 
36 #include <linux/bitmap.h>
37 #include <linux/memblock.h>
38 #include <linux/sched.h>
39 #include <linux/mm.h>
40 #include <linux/slab.h>
41 #include <linux/vmalloc.h>
42 #include <linux/uaccess.h>
43 #include <linux/io.h>
44 #include <linux/delay.h>
45 #include <linux/hardirq.h>
46 #include <linux/workqueue.h>
47 #include <linux/ratelimit.h>
48 #include <linux/moduleparam.h>
49 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
50 #include <linux/dma-mapping.h>
51 #endif
52 
53 #include <xen/xen.h>
54 #include <xen/interface/xen.h>
55 #include <xen/page.h>
56 #include <xen/grant_table.h>
57 #include <xen/interface/memory.h>
58 #include <xen/hvc-console.h>
59 #include <xen/swiotlb-xen.h>
60 #include <xen/balloon.h>
61 #ifdef CONFIG_X86
62 #include <asm/xen/cpuid.h>
63 #endif
64 #include <xen/mem-reservation.h>
65 #include <asm/xen/hypercall.h>
66 #include <asm/xen/interface.h>
67 
68 #include <asm/sync_bitops.h>
69 
70 #define GNTTAB_LIST_END 0xffffffff
71 
72 static grant_ref_t **gnttab_list;
73 static unsigned int nr_grant_frames;
74 
75 /*
76  * Handling of free grants:
77  *
78  * Free grants are in a simple list anchored in gnttab_free_head. They are
79  * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
80  * of free entries is stored in gnttab_free_count.
81  * Additionally there is a bitmap of free entries anchored in
82  * gnttab_free_bitmap. This is being used for simplifying allocation of
83  * multiple consecutive grants, which is needed e.g. for support of virtio.
84  * gnttab_last_free is used to add free entries of new frames at the end of
85  * the free list.
86  * gnttab_free_tail_ptr specifies the variable which references the start
87  * of consecutive free grants ending with gnttab_last_free. This pointer is
88  * updated in a rather defensive way, in order to avoid performance hits in
89  * hot paths.
90  * All those variables are protected by gnttab_list_lock.
91  */
92 static int gnttab_free_count;
93 static unsigned int gnttab_size;
94 static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
95 static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
96 static grant_ref_t *gnttab_free_tail_ptr;
97 static unsigned long *gnttab_free_bitmap;
98 static DEFINE_SPINLOCK(gnttab_list_lock);
99 
100 struct grant_frames xen_auto_xlat_grant_frames;
101 static unsigned int xen_gnttab_version;
102 module_param_named(version, xen_gnttab_version, uint, 0);
103 
104 static union {
105 	struct grant_entry_v1 *v1;
106 	union grant_entry_v2 *v2;
107 	void *addr;
108 } gnttab_shared;
109 
110 /*This is a structure of function pointers for grant table*/
111 struct gnttab_ops {
112 	/*
113 	 * Version of the grant interface.
114 	 */
115 	unsigned int version;
116 	/*
117 	 * Grant refs per grant frame.
118 	 */
119 	unsigned int grefs_per_grant_frame;
120 	/*
121 	 * Mapping a list of frames for storing grant entries. Frames parameter
122 	 * is used to store grant table address when grant table being setup,
123 	 * nr_gframes is the number of frames to map grant table. Returning
124 	 * GNTST_okay means success and negative value means failure.
125 	 */
126 	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
127 	/*
128 	 * Release a list of frames which are mapped in map_frames for grant
129 	 * entry status.
130 	 */
131 	void (*unmap_frames)(void);
132 	/*
133 	 * Introducing a valid entry into the grant table, granting the frame of
134 	 * this grant entry to domain for accessing. Ref
135 	 * parameter is reference of this introduced grant entry, domid is id of
136 	 * granted domain, frame is the page frame to be granted, and flags is
137 	 * status of the grant entry to be updated.
138 	 */
139 	void (*update_entry)(grant_ref_t ref, domid_t domid,
140 			     unsigned long frame, unsigned flags);
141 	/*
142 	 * Stop granting a grant entry to domain for accessing. Ref parameter is
143 	 * reference of a grant entry whose grant access will be stopped.
144 	 * If the grant entry is currently mapped for reading or writing, just
145 	 * return failure(==0) directly and don't tear down the grant access.
146 	 * Otherwise, stop grant access for this entry and return success(==1).
147 	 */
148 	int (*end_foreign_access_ref)(grant_ref_t ref);
149 	/*
150 	 * Read the frame number related to a given grant reference.
151 	 */
152 	unsigned long (*read_frame)(grant_ref_t ref);
153 };
154 
155 struct unmap_refs_callback_data {
156 	struct completion completion;
157 	int result;
158 };
159 
160 static const struct gnttab_ops *gnttab_interface;
161 
162 /* This reflects status of grant entries, so act as a global value. */
163 static grant_status_t *grstatus;
164 
165 static struct gnttab_free_callback *gnttab_free_callback_list;
166 
167 static int gnttab_expand(unsigned int req_entries);
168 
169 #define RPP (PAGE_SIZE / sizeof(grant_ref_t))
170 #define SPP (PAGE_SIZE / sizeof(grant_status_t))
171 
172 static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
173 {
174 	return &gnttab_list[(entry) / RPP][(entry) % RPP];
175 }
176 /* This can be used as an l-value */
177 #define gnttab_entry(entry) (*__gnttab_entry(entry))
178 
179 static int get_free_entries(unsigned count)
180 {
181 	unsigned long flags;
182 	int ref, rc = 0;
183 	grant_ref_t head;
184 
185 	spin_lock_irqsave(&gnttab_list_lock, flags);
186 
187 	if ((gnttab_free_count < count) &&
188 	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
189 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
190 		return rc;
191 	}
192 
193 	ref = head = gnttab_free_head;
194 	gnttab_free_count -= count;
195 	while (count--) {
196 		bitmap_clear(gnttab_free_bitmap, head, 1);
197 		if (gnttab_free_tail_ptr == __gnttab_entry(head))
198 			gnttab_free_tail_ptr = &gnttab_free_head;
199 		if (count)
200 			head = gnttab_entry(head);
201 	}
202 	gnttab_free_head = gnttab_entry(head);
203 	gnttab_entry(head) = GNTTAB_LIST_END;
204 
205 	if (!gnttab_free_count) {
206 		gnttab_last_free = GNTTAB_LIST_END;
207 		gnttab_free_tail_ptr = NULL;
208 	}
209 
210 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
211 
212 	return ref;
213 }
214 
215 static int get_seq_entry_count(void)
216 {
217 	if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
218 	    *gnttab_free_tail_ptr == GNTTAB_LIST_END)
219 		return 0;
220 
221 	return gnttab_last_free - *gnttab_free_tail_ptr + 1;
222 }
223 
224 /* Rebuilds the free grant list and tries to find count consecutive entries. */
225 static int get_free_seq(unsigned int count)
226 {
227 	int ret = -ENOSPC;
228 	unsigned int from, to;
229 	grant_ref_t *last;
230 
231 	gnttab_free_tail_ptr = &gnttab_free_head;
232 	last = &gnttab_free_head;
233 
234 	for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
235 	     from < gnttab_size;
236 	     from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
237 		to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
238 					from + 1);
239 		if (ret < 0 && to - from >= count) {
240 			ret = from;
241 			bitmap_clear(gnttab_free_bitmap, ret, count);
242 			from += count;
243 			gnttab_free_count -= count;
244 			if (from == to)
245 				continue;
246 		}
247 
248 		/*
249 		 * Recreate the free list in order to have it properly sorted.
250 		 * This is needed to make sure that the free tail has the maximum
251 		 * possible size.
252 		 */
253 		while (from < to) {
254 			*last = from;
255 			last = __gnttab_entry(from);
256 			gnttab_last_free = from;
257 			from++;
258 		}
259 		if (to < gnttab_size)
260 			gnttab_free_tail_ptr = __gnttab_entry(to - 1);
261 	}
262 
263 	*last = GNTTAB_LIST_END;
264 	if (gnttab_last_free != gnttab_size - 1)
265 		gnttab_free_tail_ptr = NULL;
266 
267 	return ret;
268 }
269 
270 static int get_free_entries_seq(unsigned int count)
271 {
272 	unsigned long flags;
273 	int ret = 0;
274 
275 	spin_lock_irqsave(&gnttab_list_lock, flags);
276 
277 	if (gnttab_free_count < count) {
278 		ret = gnttab_expand(count - gnttab_free_count);
279 		if (ret < 0)
280 			goto out;
281 	}
282 
283 	if (get_seq_entry_count() < count) {
284 		ret = get_free_seq(count);
285 		if (ret >= 0)
286 			goto out;
287 		ret = gnttab_expand(count - get_seq_entry_count());
288 		if (ret < 0)
289 			goto out;
290 	}
291 
292 	ret = *gnttab_free_tail_ptr;
293 	*gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
294 	gnttab_free_count -= count;
295 	if (!gnttab_free_count)
296 		gnttab_free_tail_ptr = NULL;
297 	bitmap_clear(gnttab_free_bitmap, ret, count);
298 
299  out:
300 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
301 
302 	return ret;
303 }
304 
305 static void do_free_callbacks(void)
306 {
307 	struct gnttab_free_callback *callback, *next;
308 
309 	callback = gnttab_free_callback_list;
310 	gnttab_free_callback_list = NULL;
311 
312 	while (callback != NULL) {
313 		next = callback->next;
314 		if (gnttab_free_count >= callback->count) {
315 			callback->next = NULL;
316 			callback->fn(callback->arg);
317 		} else {
318 			callback->next = gnttab_free_callback_list;
319 			gnttab_free_callback_list = callback;
320 		}
321 		callback = next;
322 	}
323 }
324 
325 static inline void check_free_callbacks(void)
326 {
327 	if (unlikely(gnttab_free_callback_list))
328 		do_free_callbacks();
329 }
330 
331 static void put_free_entry_locked(grant_ref_t ref)
332 {
333 	if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
334 		return;
335 
336 	gnttab_entry(ref) = gnttab_free_head;
337 	gnttab_free_head = ref;
338 	if (!gnttab_free_count)
339 		gnttab_last_free = ref;
340 	if (gnttab_free_tail_ptr == &gnttab_free_head)
341 		gnttab_free_tail_ptr = __gnttab_entry(ref);
342 	gnttab_free_count++;
343 	bitmap_set(gnttab_free_bitmap, ref, 1);
344 }
345 
346 static void put_free_entry(grant_ref_t ref)
347 {
348 	unsigned long flags;
349 
350 	spin_lock_irqsave(&gnttab_list_lock, flags);
351 	put_free_entry_locked(ref);
352 	check_free_callbacks();
353 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
354 }
355 
356 static void gnttab_set_free(unsigned int start, unsigned int n)
357 {
358 	unsigned int i;
359 
360 	for (i = start; i < start + n - 1; i++)
361 		gnttab_entry(i) = i + 1;
362 
363 	gnttab_entry(i) = GNTTAB_LIST_END;
364 	if (!gnttab_free_count) {
365 		gnttab_free_head = start;
366 		gnttab_free_tail_ptr = &gnttab_free_head;
367 	} else {
368 		gnttab_entry(gnttab_last_free) = start;
369 	}
370 	gnttab_free_count += n;
371 	gnttab_last_free = i;
372 
373 	bitmap_set(gnttab_free_bitmap, start, n);
374 }
375 
376 /*
377  * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
378  * Introducing a valid entry into the grant table:
379  *  1. Write ent->domid.
380  *  2. Write ent->frame: Frame to which access is permitted.
381  *  3. Write memory barrier (WMB).
382  *  4. Write ent->flags, inc. valid type.
383  */
384 static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
385 				   unsigned long frame, unsigned flags)
386 {
387 	gnttab_shared.v1[ref].domid = domid;
388 	gnttab_shared.v1[ref].frame = frame;
389 	wmb();
390 	gnttab_shared.v1[ref].flags = flags;
391 }
392 
393 static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
394 				   unsigned long frame, unsigned int flags)
395 {
396 	gnttab_shared.v2[ref].hdr.domid = domid;
397 	gnttab_shared.v2[ref].full_page.frame = frame;
398 	wmb();	/* Hypervisor concurrent accesses. */
399 	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
400 }
401 
402 /*
403  * Public grant-issuing interface functions
404  */
405 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
406 				     unsigned long frame, int readonly)
407 {
408 	gnttab_interface->update_entry(ref, domid, frame,
409 			   GTF_permit_access | (readonly ? GTF_readonly : 0));
410 }
411 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
412 
413 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
414 				int readonly)
415 {
416 	int ref;
417 
418 	ref = get_free_entries(1);
419 	if (unlikely(ref < 0))
420 		return -ENOSPC;
421 
422 	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
423 
424 	return ref;
425 }
426 EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
427 
428 static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
429 {
430 	u16 flags, nflags;
431 	u16 *pflags;
432 
433 	pflags = &gnttab_shared.v1[ref].flags;
434 	nflags = *pflags;
435 	do {
436 		flags = nflags;
437 		if (flags & (GTF_reading|GTF_writing))
438 			return 0;
439 	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
440 
441 	return 1;
442 }
443 
444 static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
445 {
446 	gnttab_shared.v2[ref].hdr.flags = 0;
447 	mb();	/* Concurrent access by hypervisor. */
448 	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
449 		return 0;
450 	} else {
451 		/*
452 		 * The read of grstatus needs to have acquire semantics.
453 		 *  On x86, reads already have that, and we just need to
454 		 * protect against compiler reorderings.
455 		 * On other architectures we may need a full barrier.
456 		 */
457 #ifdef CONFIG_X86
458 		barrier();
459 #else
460 		mb();
461 #endif
462 	}
463 
464 	return 1;
465 }
466 
467 static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
468 {
469 	return gnttab_interface->end_foreign_access_ref(ref);
470 }
471 
472 int gnttab_end_foreign_access_ref(grant_ref_t ref)
473 {
474 	if (_gnttab_end_foreign_access_ref(ref))
475 		return 1;
476 	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
477 	return 0;
478 }
479 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
480 
481 static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
482 {
483 	return gnttab_shared.v1[ref].frame;
484 }
485 
486 static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
487 {
488 	return gnttab_shared.v2[ref].full_page.frame;
489 }
490 
491 struct deferred_entry {
492 	struct list_head list;
493 	grant_ref_t ref;
494 	uint16_t warn_delay;
495 	struct page *page;
496 };
497 static LIST_HEAD(deferred_list);
498 static void gnttab_handle_deferred(struct timer_list *);
499 static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
500 
501 static void gnttab_handle_deferred(struct timer_list *unused)
502 {
503 	unsigned int nr = 10;
504 	struct deferred_entry *first = NULL;
505 	unsigned long flags;
506 
507 	spin_lock_irqsave(&gnttab_list_lock, flags);
508 	while (nr--) {
509 		struct deferred_entry *entry
510 			= list_first_entry(&deferred_list,
511 					   struct deferred_entry, list);
512 
513 		if (entry == first)
514 			break;
515 		list_del(&entry->list);
516 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
517 		if (_gnttab_end_foreign_access_ref(entry->ref)) {
518 			put_free_entry(entry->ref);
519 			pr_debug("freeing g.e. %#x (pfn %#lx)\n",
520 				 entry->ref, page_to_pfn(entry->page));
521 			put_page(entry->page);
522 			kfree(entry);
523 			entry = NULL;
524 		} else {
525 			if (!--entry->warn_delay)
526 				pr_info("g.e. %#x still pending\n", entry->ref);
527 			if (!first)
528 				first = entry;
529 		}
530 		spin_lock_irqsave(&gnttab_list_lock, flags);
531 		if (entry)
532 			list_add_tail(&entry->list, &deferred_list);
533 		else if (list_empty(&deferred_list))
534 			break;
535 	}
536 	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
537 		deferred_timer.expires = jiffies + HZ;
538 		add_timer(&deferred_timer);
539 	}
540 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
541 }
542 
543 static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
544 {
545 	struct deferred_entry *entry;
546 	gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
547 	const char *what = KERN_WARNING "leaking";
548 
549 	entry = kmalloc(sizeof(*entry), gfp);
550 	if (!page) {
551 		unsigned long gfn = gnttab_interface->read_frame(ref);
552 
553 		page = pfn_to_page(gfn_to_pfn(gfn));
554 		get_page(page);
555 	}
556 
557 	if (entry) {
558 		unsigned long flags;
559 
560 		entry->ref = ref;
561 		entry->page = page;
562 		entry->warn_delay = 60;
563 		spin_lock_irqsave(&gnttab_list_lock, flags);
564 		list_add_tail(&entry->list, &deferred_list);
565 		if (!timer_pending(&deferred_timer)) {
566 			deferred_timer.expires = jiffies + HZ;
567 			add_timer(&deferred_timer);
568 		}
569 		spin_unlock_irqrestore(&gnttab_list_lock, flags);
570 		what = KERN_DEBUG "deferring";
571 	}
572 	printk("%s g.e. %#x (pfn %#lx)\n",
573 	       what, ref, page ? page_to_pfn(page) : -1);
574 }
575 
576 int gnttab_try_end_foreign_access(grant_ref_t ref)
577 {
578 	int ret = _gnttab_end_foreign_access_ref(ref);
579 
580 	if (ret)
581 		put_free_entry(ref);
582 
583 	return ret;
584 }
585 EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
586 
587 void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
588 {
589 	if (gnttab_try_end_foreign_access(ref)) {
590 		if (page)
591 			put_page(page);
592 	} else
593 		gnttab_add_deferred(ref, page);
594 }
595 EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
596 
597 void gnttab_free_grant_reference(grant_ref_t ref)
598 {
599 	put_free_entry(ref);
600 }
601 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
602 
603 void gnttab_free_grant_references(grant_ref_t head)
604 {
605 	grant_ref_t ref;
606 	unsigned long flags;
607 
608 	spin_lock_irqsave(&gnttab_list_lock, flags);
609 	while (head != GNTTAB_LIST_END) {
610 		ref = gnttab_entry(head);
611 		put_free_entry_locked(head);
612 		head = ref;
613 	}
614 	check_free_callbacks();
615 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
616 }
617 EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
618 
619 void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
620 {
621 	unsigned long flags;
622 	unsigned int i;
623 
624 	spin_lock_irqsave(&gnttab_list_lock, flags);
625 	for (i = count; i > 0; i--)
626 		put_free_entry_locked(head + i - 1);
627 	check_free_callbacks();
628 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
629 }
630 EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
631 
632 int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
633 {
634 	int h = get_free_entries(count);
635 
636 	if (h < 0)
637 		return -ENOSPC;
638 
639 	*head = h;
640 
641 	return 0;
642 }
643 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
644 
645 int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
646 {
647 	int h;
648 
649 	if (count == 1)
650 		h = get_free_entries(1);
651 	else
652 		h = get_free_entries_seq(count);
653 
654 	if (h < 0)
655 		return -ENOSPC;
656 
657 	*first = h;
658 
659 	return 0;
660 }
661 EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
662 
663 int gnttab_empty_grant_references(const grant_ref_t *private_head)
664 {
665 	return (*private_head == GNTTAB_LIST_END);
666 }
667 EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
668 
669 int gnttab_claim_grant_reference(grant_ref_t *private_head)
670 {
671 	grant_ref_t g = *private_head;
672 	if (unlikely(g == GNTTAB_LIST_END))
673 		return -ENOSPC;
674 	*private_head = gnttab_entry(g);
675 	return g;
676 }
677 EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
678 
679 void gnttab_release_grant_reference(grant_ref_t *private_head,
680 				    grant_ref_t release)
681 {
682 	gnttab_entry(release) = *private_head;
683 	*private_head = release;
684 }
685 EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
686 
687 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
688 				  void (*fn)(void *), void *arg, u16 count)
689 {
690 	unsigned long flags;
691 	struct gnttab_free_callback *cb;
692 
693 	spin_lock_irqsave(&gnttab_list_lock, flags);
694 
695 	/* Check if the callback is already on the list */
696 	cb = gnttab_free_callback_list;
697 	while (cb) {
698 		if (cb == callback)
699 			goto out;
700 		cb = cb->next;
701 	}
702 
703 	callback->fn = fn;
704 	callback->arg = arg;
705 	callback->count = count;
706 	callback->next = gnttab_free_callback_list;
707 	gnttab_free_callback_list = callback;
708 	check_free_callbacks();
709 out:
710 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
711 }
712 EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
713 
714 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
715 {
716 	struct gnttab_free_callback **pcb;
717 	unsigned long flags;
718 
719 	spin_lock_irqsave(&gnttab_list_lock, flags);
720 	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
721 		if (*pcb == callback) {
722 			*pcb = callback->next;
723 			break;
724 		}
725 	}
726 	spin_unlock_irqrestore(&gnttab_list_lock, flags);
727 }
728 EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
729 
730 static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
731 {
732 	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
733 	       align;
734 }
735 
736 static int grow_gnttab_list(unsigned int more_frames)
737 {
738 	unsigned int new_nr_grant_frames, extra_entries, i;
739 	unsigned int nr_glist_frames, new_nr_glist_frames;
740 	unsigned int grefs_per_frame;
741 
742 	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
743 
744 	new_nr_grant_frames = nr_grant_frames + more_frames;
745 	extra_entries = more_frames * grefs_per_frame;
746 
747 	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
748 	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
749 	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
750 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
751 		if (!gnttab_list[i])
752 			goto grow_nomem;
753 	}
754 
755 	gnttab_set_free(gnttab_size, extra_entries);
756 
757 	if (!gnttab_free_tail_ptr)
758 		gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
759 
760 	nr_grant_frames = new_nr_grant_frames;
761 	gnttab_size += extra_entries;
762 
763 	check_free_callbacks();
764 
765 	return 0;
766 
767 grow_nomem:
768 	while (i-- > nr_glist_frames)
769 		free_page((unsigned long) gnttab_list[i]);
770 	return -ENOMEM;
771 }
772 
773 static unsigned int __max_nr_grant_frames(void)
774 {
775 	struct gnttab_query_size query;
776 	int rc;
777 
778 	query.dom = DOMID_SELF;
779 
780 	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
781 	if ((rc < 0) || (query.status != GNTST_okay))
782 		return 4; /* Legacy max supported number of frames */
783 
784 	return query.max_nr_frames;
785 }
786 
787 unsigned int gnttab_max_grant_frames(void)
788 {
789 	unsigned int xen_max = __max_nr_grant_frames();
790 	static unsigned int boot_max_nr_grant_frames;
791 
792 	/* First time, initialize it properly. */
793 	if (!boot_max_nr_grant_frames)
794 		boot_max_nr_grant_frames = __max_nr_grant_frames();
795 
796 	if (xen_max > boot_max_nr_grant_frames)
797 		return boot_max_nr_grant_frames;
798 	return xen_max;
799 }
800 EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
801 
802 int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
803 {
804 	xen_pfn_t *pfn;
805 	unsigned int max_nr_gframes = __max_nr_grant_frames();
806 	unsigned int i;
807 	void *vaddr;
808 
809 	if (xen_auto_xlat_grant_frames.count)
810 		return -EINVAL;
811 
812 	vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
813 	if (vaddr == NULL) {
814 		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
815 			&addr);
816 		return -ENOMEM;
817 	}
818 	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
819 	if (!pfn) {
820 		memunmap(vaddr);
821 		return -ENOMEM;
822 	}
823 	for (i = 0; i < max_nr_gframes; i++)
824 		pfn[i] = XEN_PFN_DOWN(addr) + i;
825 
826 	xen_auto_xlat_grant_frames.vaddr = vaddr;
827 	xen_auto_xlat_grant_frames.pfn = pfn;
828 	xen_auto_xlat_grant_frames.count = max_nr_gframes;
829 
830 	return 0;
831 }
832 EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
833 
834 void gnttab_free_auto_xlat_frames(void)
835 {
836 	if (!xen_auto_xlat_grant_frames.count)
837 		return;
838 	kfree(xen_auto_xlat_grant_frames.pfn);
839 	memunmap(xen_auto_xlat_grant_frames.vaddr);
840 
841 	xen_auto_xlat_grant_frames.pfn = NULL;
842 	xen_auto_xlat_grant_frames.count = 0;
843 	xen_auto_xlat_grant_frames.vaddr = NULL;
844 }
845 EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
846 
847 int gnttab_pages_set_private(int nr_pages, struct page **pages)
848 {
849 	int i;
850 
851 	for (i = 0; i < nr_pages; i++) {
852 #if BITS_PER_LONG < 64
853 		struct xen_page_foreign *foreign;
854 
855 		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
856 		if (!foreign)
857 			return -ENOMEM;
858 
859 		set_page_private(pages[i], (unsigned long)foreign);
860 #endif
861 		SetPagePrivate(pages[i]);
862 	}
863 
864 	return 0;
865 }
866 EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
867 
868 /**
869  * gnttab_alloc_pages - alloc pages suitable for grant mapping into
870  * @nr_pages: number of pages to alloc
871  * @pages: returns the pages
872  */
873 int gnttab_alloc_pages(int nr_pages, struct page **pages)
874 {
875 	int ret;
876 
877 	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
878 	if (ret < 0)
879 		return ret;
880 
881 	ret = gnttab_pages_set_private(nr_pages, pages);
882 	if (ret < 0)
883 		gnttab_free_pages(nr_pages, pages);
884 
885 	return ret;
886 }
887 EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
888 
889 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
890 static inline void cache_init(struct gnttab_page_cache *cache)
891 {
892 	cache->pages = NULL;
893 }
894 
895 static inline bool cache_empty(struct gnttab_page_cache *cache)
896 {
897 	return !cache->pages;
898 }
899 
900 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
901 {
902 	struct page *page;
903 
904 	page = cache->pages;
905 	cache->pages = page->zone_device_data;
906 
907 	return page;
908 }
909 
910 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
911 {
912 	page->zone_device_data = cache->pages;
913 	cache->pages = page;
914 }
915 #else
916 static inline void cache_init(struct gnttab_page_cache *cache)
917 {
918 	INIT_LIST_HEAD(&cache->pages);
919 }
920 
921 static inline bool cache_empty(struct gnttab_page_cache *cache)
922 {
923 	return list_empty(&cache->pages);
924 }
925 
926 static inline struct page *cache_deq(struct gnttab_page_cache *cache)
927 {
928 	struct page *page;
929 
930 	page = list_first_entry(&cache->pages, struct page, lru);
931 	list_del(&page->lru);
932 
933 	return page;
934 }
935 
936 static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
937 {
938 	list_add(&page->lru, &cache->pages);
939 }
940 #endif
941 
942 void gnttab_page_cache_init(struct gnttab_page_cache *cache)
943 {
944 	spin_lock_init(&cache->lock);
945 	cache_init(cache);
946 	cache->num_pages = 0;
947 }
948 EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
949 
950 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
951 {
952 	unsigned long flags;
953 
954 	spin_lock_irqsave(&cache->lock, flags);
955 
956 	if (cache_empty(cache)) {
957 		spin_unlock_irqrestore(&cache->lock, flags);
958 		return gnttab_alloc_pages(1, page);
959 	}
960 
961 	page[0] = cache_deq(cache);
962 	cache->num_pages--;
963 
964 	spin_unlock_irqrestore(&cache->lock, flags);
965 
966 	return 0;
967 }
968 EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
969 
970 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
971 			   unsigned int num)
972 {
973 	unsigned long flags;
974 	unsigned int i;
975 
976 	spin_lock_irqsave(&cache->lock, flags);
977 
978 	for (i = 0; i < num; i++)
979 		cache_enq(cache, page[i]);
980 	cache->num_pages += num;
981 
982 	spin_unlock_irqrestore(&cache->lock, flags);
983 }
984 EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
985 
986 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
987 {
988 	struct page *page[10];
989 	unsigned int i = 0;
990 	unsigned long flags;
991 
992 	spin_lock_irqsave(&cache->lock, flags);
993 
994 	while (cache->num_pages > num) {
995 		page[i] = cache_deq(cache);
996 		cache->num_pages--;
997 		if (++i == ARRAY_SIZE(page)) {
998 			spin_unlock_irqrestore(&cache->lock, flags);
999 			gnttab_free_pages(i, page);
1000 			i = 0;
1001 			spin_lock_irqsave(&cache->lock, flags);
1002 		}
1003 	}
1004 
1005 	spin_unlock_irqrestore(&cache->lock, flags);
1006 
1007 	if (i != 0)
1008 		gnttab_free_pages(i, page);
1009 }
1010 EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
1011 
1012 void gnttab_pages_clear_private(int nr_pages, struct page **pages)
1013 {
1014 	int i;
1015 
1016 	for (i = 0; i < nr_pages; i++) {
1017 		if (PagePrivate(pages[i])) {
1018 #if BITS_PER_LONG < 64
1019 			kfree((void *)page_private(pages[i]));
1020 #endif
1021 			ClearPagePrivate(pages[i]);
1022 		}
1023 	}
1024 }
1025 EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
1026 
1027 /**
1028  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
1029  * @nr_pages; number of pages to free
1030  * @pages: the pages
1031  */
1032 void gnttab_free_pages(int nr_pages, struct page **pages)
1033 {
1034 	gnttab_pages_clear_private(nr_pages, pages);
1035 	xen_free_unpopulated_pages(nr_pages, pages);
1036 }
1037 EXPORT_SYMBOL_GPL(gnttab_free_pages);
1038 
1039 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1040 /**
1041  * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
1042  * @args: arguments to the function
1043  */
1044 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
1045 {
1046 	unsigned long pfn, start_pfn;
1047 	size_t size;
1048 	int i, ret;
1049 
1050 	if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
1051 		return -ENOMEM;
1052 
1053 	size = args->nr_pages << PAGE_SHIFT;
1054 	if (args->coherent)
1055 		args->vaddr = dma_alloc_coherent(args->dev, size,
1056 						 &args->dev_bus_addr,
1057 						 GFP_KERNEL | __GFP_NOWARN);
1058 	else
1059 		args->vaddr = dma_alloc_wc(args->dev, size,
1060 					   &args->dev_bus_addr,
1061 					   GFP_KERNEL | __GFP_NOWARN);
1062 	if (!args->vaddr) {
1063 		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1064 		return -ENOMEM;
1065 	}
1066 
1067 	start_pfn = __phys_to_pfn(args->dev_bus_addr);
1068 	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1069 			pfn++, i++) {
1070 		struct page *page = pfn_to_page(pfn);
1071 
1072 		args->pages[i] = page;
1073 		args->frames[i] = xen_page_to_gfn(page);
1074 		xenmem_reservation_scrub_page(page);
1075 	}
1076 
1077 	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1078 
1079 	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1080 	if (ret != args->nr_pages) {
1081 		pr_debug("Failed to decrease reservation for DMA buffer\n");
1082 		ret = -EFAULT;
1083 		goto fail;
1084 	}
1085 
1086 	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1087 	if (ret < 0)
1088 		goto fail;
1089 
1090 	return 0;
1091 
1092 fail:
1093 	gnttab_dma_free_pages(args);
1094 	return ret;
1095 }
1096 EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1097 
1098 /**
1099  * gnttab_dma_free_pages - free DMAable pages
1100  * @args: arguments to the function
1101  */
1102 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1103 {
1104 	size_t size;
1105 	int i, ret;
1106 
1107 	gnttab_pages_clear_private(args->nr_pages, args->pages);
1108 
1109 	for (i = 0; i < args->nr_pages; i++)
1110 		args->frames[i] = page_to_xen_pfn(args->pages[i]);
1111 
1112 	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1113 	if (ret != args->nr_pages) {
1114 		pr_debug("Failed to increase reservation for DMA buffer\n");
1115 		ret = -EFAULT;
1116 	} else {
1117 		ret = 0;
1118 	}
1119 
1120 	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1121 					     args->frames);
1122 
1123 	size = args->nr_pages << PAGE_SHIFT;
1124 	if (args->coherent)
1125 		dma_free_coherent(args->dev, size,
1126 				  args->vaddr, args->dev_bus_addr);
1127 	else
1128 		dma_free_wc(args->dev, size,
1129 			    args->vaddr, args->dev_bus_addr);
1130 	return ret;
1131 }
1132 EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1133 #endif
1134 
1135 /* Handling of paged out grant targets (GNTST_eagain) */
1136 #define MAX_DELAY 256
1137 static inline void
1138 gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1139 						const char *func)
1140 {
1141 	unsigned delay = 1;
1142 
1143 	do {
1144 		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1145 		if (*status == GNTST_eagain)
1146 			msleep(delay++);
1147 	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1148 
1149 	if (delay >= MAX_DELAY) {
1150 		pr_err("%s: %s eagain grant\n", func, current->comm);
1151 		*status = GNTST_bad_page;
1152 	}
1153 }
1154 
1155 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1156 {
1157 	struct gnttab_map_grant_ref *op;
1158 
1159 	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1160 		BUG();
1161 	for (op = batch; op < batch + count; op++)
1162 		if (op->status == GNTST_eagain)
1163 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1164 						&op->status, __func__);
1165 }
1166 EXPORT_SYMBOL_GPL(gnttab_batch_map);
1167 
1168 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1169 {
1170 	struct gnttab_copy *op;
1171 
1172 	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1173 		BUG();
1174 	for (op = batch; op < batch + count; op++)
1175 		if (op->status == GNTST_eagain)
1176 			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1177 						&op->status, __func__);
1178 }
1179 EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1180 
1181 void gnttab_foreach_grant_in_range(struct page *page,
1182 				   unsigned int offset,
1183 				   unsigned int len,
1184 				   xen_grant_fn_t fn,
1185 				   void *data)
1186 {
1187 	unsigned int goffset;
1188 	unsigned int glen;
1189 	unsigned long xen_pfn;
1190 
1191 	len = min_t(unsigned int, PAGE_SIZE - offset, len);
1192 	goffset = xen_offset_in_page(offset);
1193 
1194 	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1195 
1196 	while (len) {
1197 		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1198 		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1199 
1200 		goffset = 0;
1201 		xen_pfn++;
1202 		len -= glen;
1203 	}
1204 }
1205 EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1206 
1207 void gnttab_foreach_grant(struct page **pages,
1208 			  unsigned int nr_grefs,
1209 			  xen_grant_fn_t fn,
1210 			  void *data)
1211 {
1212 	unsigned int goffset = 0;
1213 	unsigned long xen_pfn = 0;
1214 	unsigned int i;
1215 
1216 	for (i = 0; i < nr_grefs; i++) {
1217 		if ((i % XEN_PFN_PER_PAGE) == 0) {
1218 			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1219 			goffset = 0;
1220 		}
1221 
1222 		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1223 
1224 		goffset += XEN_PAGE_SIZE;
1225 		xen_pfn++;
1226 	}
1227 }
1228 
1229 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1230 		    struct gnttab_map_grant_ref *kmap_ops,
1231 		    struct page **pages, unsigned int count)
1232 {
1233 	int i, ret;
1234 
1235 	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1236 	if (ret)
1237 		return ret;
1238 
1239 	for (i = 0; i < count; i++) {
1240 		switch (map_ops[i].status) {
1241 		case GNTST_okay:
1242 		{
1243 			struct xen_page_foreign *foreign;
1244 
1245 			SetPageForeign(pages[i]);
1246 			foreign = xen_page_foreign(pages[i]);
1247 			foreign->domid = map_ops[i].dom;
1248 			foreign->gref = map_ops[i].ref;
1249 			break;
1250 		}
1251 
1252 		case GNTST_no_device_space:
1253 			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1254 			break;
1255 
1256 		case GNTST_eagain:
1257 			/* Retry eagain maps */
1258 			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1259 						map_ops + i,
1260 						&map_ops[i].status, __func__);
1261 			/* Test status in next loop iteration. */
1262 			i--;
1263 			break;
1264 
1265 		default:
1266 			break;
1267 		}
1268 	}
1269 
1270 	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1271 }
1272 EXPORT_SYMBOL_GPL(gnttab_map_refs);
1273 
1274 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1275 		      struct gnttab_unmap_grant_ref *kunmap_ops,
1276 		      struct page **pages, unsigned int count)
1277 {
1278 	unsigned int i;
1279 	int ret;
1280 
1281 	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1282 	if (ret)
1283 		return ret;
1284 
1285 	for (i = 0; i < count; i++)
1286 		ClearPageForeign(pages[i]);
1287 
1288 	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1289 }
1290 EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1291 
1292 #define GNTTAB_UNMAP_REFS_DELAY 5
1293 
1294 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1295 
1296 static void gnttab_unmap_work(struct work_struct *work)
1297 {
1298 	struct gntab_unmap_queue_data
1299 		*unmap_data = container_of(work,
1300 					   struct gntab_unmap_queue_data,
1301 					   gnttab_work.work);
1302 	if (unmap_data->age != UINT_MAX)
1303 		unmap_data->age++;
1304 	__gnttab_unmap_refs_async(unmap_data);
1305 }
1306 
1307 static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1308 {
1309 	int ret;
1310 	int pc;
1311 
1312 	for (pc = 0; pc < item->count; pc++) {
1313 		if (page_count(item->pages[pc]) > 1) {
1314 			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1315 			schedule_delayed_work(&item->gnttab_work,
1316 					      msecs_to_jiffies(delay));
1317 			return;
1318 		}
1319 	}
1320 
1321 	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1322 				item->pages, item->count);
1323 	item->done(ret, item);
1324 }
1325 
1326 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1327 {
1328 	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1329 	item->age = 0;
1330 
1331 	__gnttab_unmap_refs_async(item);
1332 }
1333 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1334 
1335 static void unmap_refs_callback(int result,
1336 		struct gntab_unmap_queue_data *data)
1337 {
1338 	struct unmap_refs_callback_data *d = data->data;
1339 
1340 	d->result = result;
1341 	complete(&d->completion);
1342 }
1343 
1344 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1345 {
1346 	struct unmap_refs_callback_data data;
1347 
1348 	init_completion(&data.completion);
1349 	item->data = &data;
1350 	item->done = &unmap_refs_callback;
1351 	gnttab_unmap_refs_async(item);
1352 	wait_for_completion(&data.completion);
1353 
1354 	return data.result;
1355 }
1356 EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1357 
1358 static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1359 {
1360 	return gnttab_frames(nr_grant_frames, SPP);
1361 }
1362 
1363 static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1364 {
1365 	int rc;
1366 
1367 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1368 				    gnttab_max_grant_frames(),
1369 				    &gnttab_shared.addr);
1370 	BUG_ON(rc);
1371 
1372 	return 0;
1373 }
1374 
1375 static void gnttab_unmap_frames_v1(void)
1376 {
1377 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1378 }
1379 
1380 static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1381 {
1382 	uint64_t *sframes;
1383 	unsigned int nr_sframes;
1384 	struct gnttab_get_status_frames getframes;
1385 	int rc;
1386 
1387 	nr_sframes = nr_status_frames(nr_gframes);
1388 
1389 	/* No need for kzalloc as it is initialized in following hypercall
1390 	 * GNTTABOP_get_status_frames.
1391 	 */
1392 	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1393 	if (!sframes)
1394 		return -ENOMEM;
1395 
1396 	getframes.dom        = DOMID_SELF;
1397 	getframes.nr_frames  = nr_sframes;
1398 	set_xen_guest_handle(getframes.frame_list, sframes);
1399 
1400 	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1401 				       &getframes, 1);
1402 	if (rc == -ENOSYS) {
1403 		kfree(sframes);
1404 		return -ENOSYS;
1405 	}
1406 
1407 	BUG_ON(rc || getframes.status);
1408 
1409 	rc = arch_gnttab_map_status(sframes, nr_sframes,
1410 				    nr_status_frames(gnttab_max_grant_frames()),
1411 				    &grstatus);
1412 	BUG_ON(rc);
1413 	kfree(sframes);
1414 
1415 	rc = arch_gnttab_map_shared(frames, nr_gframes,
1416 				    gnttab_max_grant_frames(),
1417 				    &gnttab_shared.addr);
1418 	BUG_ON(rc);
1419 
1420 	return 0;
1421 }
1422 
1423 static void gnttab_unmap_frames_v2(void)
1424 {
1425 	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1426 	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1427 }
1428 
1429 static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1430 {
1431 	struct gnttab_setup_table setup;
1432 	xen_pfn_t *frames;
1433 	unsigned int nr_gframes = end_idx + 1;
1434 	int rc;
1435 
1436 	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1437 		struct xen_add_to_physmap xatp;
1438 		unsigned int i = end_idx;
1439 		rc = 0;
1440 		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1441 		/*
1442 		 * Loop backwards, so that the first hypercall has the largest
1443 		 * index, ensuring that the table will grow only once.
1444 		 */
1445 		do {
1446 			xatp.domid = DOMID_SELF;
1447 			xatp.idx = i;
1448 			xatp.space = XENMAPSPACE_grant_table;
1449 			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1450 			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1451 			if (rc != 0) {
1452 				pr_warn("grant table add_to_physmap failed, err=%d\n",
1453 					rc);
1454 				break;
1455 			}
1456 		} while (i-- > start_idx);
1457 
1458 		return rc;
1459 	}
1460 
1461 	/* No need for kzalloc as it is initialized in following hypercall
1462 	 * GNTTABOP_setup_table.
1463 	 */
1464 	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1465 	if (!frames)
1466 		return -ENOMEM;
1467 
1468 	setup.dom        = DOMID_SELF;
1469 	setup.nr_frames  = nr_gframes;
1470 	set_xen_guest_handle(setup.frame_list, frames);
1471 
1472 	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1473 	if (rc == -ENOSYS) {
1474 		kfree(frames);
1475 		return -ENOSYS;
1476 	}
1477 
1478 	BUG_ON(rc || setup.status);
1479 
1480 	rc = gnttab_interface->map_frames(frames, nr_gframes);
1481 
1482 	kfree(frames);
1483 
1484 	return rc;
1485 }
1486 
1487 static const struct gnttab_ops gnttab_v1_ops = {
1488 	.version			= 1,
1489 	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1490 					  sizeof(struct grant_entry_v1),
1491 	.map_frames			= gnttab_map_frames_v1,
1492 	.unmap_frames			= gnttab_unmap_frames_v1,
1493 	.update_entry			= gnttab_update_entry_v1,
1494 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1495 	.read_frame			= gnttab_read_frame_v1,
1496 };
1497 
1498 static const struct gnttab_ops gnttab_v2_ops = {
1499 	.version			= 2,
1500 	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1501 					  sizeof(union grant_entry_v2),
1502 	.map_frames			= gnttab_map_frames_v2,
1503 	.unmap_frames			= gnttab_unmap_frames_v2,
1504 	.update_entry			= gnttab_update_entry_v2,
1505 	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1506 	.read_frame			= gnttab_read_frame_v2,
1507 };
1508 
1509 static bool gnttab_need_v2(void)
1510 {
1511 #ifdef CONFIG_X86
1512 	uint32_t base, width;
1513 
1514 	if (xen_pv_domain()) {
1515 		base = xen_cpuid_base();
1516 		if (cpuid_eax(base) < 5)
1517 			return false;	/* Information not available, use V1. */
1518 		width = cpuid_ebx(base + 5) &
1519 			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1520 		return width > 32 + PAGE_SHIFT;
1521 	}
1522 #endif
1523 	return !!(max_possible_pfn >> 32);
1524 }
1525 
1526 static void gnttab_request_version(void)
1527 {
1528 	long rc;
1529 	struct gnttab_set_version gsv;
1530 
1531 	if (gnttab_need_v2())
1532 		gsv.version = 2;
1533 	else
1534 		gsv.version = 1;
1535 
1536 	/* Boot parameter overrides automatic selection. */
1537 	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1538 		gsv.version = xen_gnttab_version;
1539 
1540 	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1541 	if (rc == 0 && gsv.version == 2)
1542 		gnttab_interface = &gnttab_v2_ops;
1543 	else
1544 		gnttab_interface = &gnttab_v1_ops;
1545 	pr_info("Grant tables using version %d layout\n",
1546 		gnttab_interface->version);
1547 }
1548 
1549 static int gnttab_setup(void)
1550 {
1551 	unsigned int max_nr_gframes;
1552 
1553 	max_nr_gframes = gnttab_max_grant_frames();
1554 	if (max_nr_gframes < nr_grant_frames)
1555 		return -ENOSYS;
1556 
1557 	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1558 		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1559 		if (gnttab_shared.addr == NULL) {
1560 			pr_warn("gnttab share frames is not mapped!\n");
1561 			return -ENOMEM;
1562 		}
1563 	}
1564 	return gnttab_map(0, nr_grant_frames - 1);
1565 }
1566 
1567 int gnttab_resume(void)
1568 {
1569 	gnttab_request_version();
1570 	return gnttab_setup();
1571 }
1572 
1573 int gnttab_suspend(void)
1574 {
1575 	if (!xen_feature(XENFEAT_auto_translated_physmap))
1576 		gnttab_interface->unmap_frames();
1577 	return 0;
1578 }
1579 
1580 static int gnttab_expand(unsigned int req_entries)
1581 {
1582 	int rc;
1583 	unsigned int cur, extra;
1584 
1585 	cur = nr_grant_frames;
1586 	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1587 		 gnttab_interface->grefs_per_grant_frame);
1588 	if (cur + extra > gnttab_max_grant_frames()) {
1589 		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1590 				    " cur=%u extra=%u limit=%u"
1591 				    " gnttab_free_count=%u req_entries=%u\n",
1592 				    cur, extra, gnttab_max_grant_frames(),
1593 				    gnttab_free_count, req_entries);
1594 		return -ENOSPC;
1595 	}
1596 
1597 	rc = gnttab_map(cur, cur + extra - 1);
1598 	if (rc == 0)
1599 		rc = grow_gnttab_list(extra);
1600 
1601 	return rc;
1602 }
1603 
1604 int gnttab_init(void)
1605 {
1606 	int i;
1607 	unsigned long max_nr_grant_frames, max_nr_grefs;
1608 	unsigned int max_nr_glist_frames, nr_glist_frames;
1609 	int ret;
1610 
1611 	gnttab_request_version();
1612 	max_nr_grant_frames = gnttab_max_grant_frames();
1613 	max_nr_grefs = max_nr_grant_frames *
1614 			gnttab_interface->grefs_per_grant_frame;
1615 	nr_grant_frames = 1;
1616 
1617 	/* Determine the maximum number of frames required for the
1618 	 * grant reference free list on the current hypervisor.
1619 	 */
1620 	max_nr_glist_frames = max_nr_grefs / RPP;
1621 
1622 	gnttab_list = kmalloc_array(max_nr_glist_frames,
1623 				    sizeof(grant_ref_t *),
1624 				    GFP_KERNEL);
1625 	if (gnttab_list == NULL)
1626 		return -ENOMEM;
1627 
1628 	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1629 	for (i = 0; i < nr_glist_frames; i++) {
1630 		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1631 		if (gnttab_list[i] == NULL) {
1632 			ret = -ENOMEM;
1633 			goto ini_nomem;
1634 		}
1635 	}
1636 
1637 	gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
1638 	if (!gnttab_free_bitmap) {
1639 		ret = -ENOMEM;
1640 		goto ini_nomem;
1641 	}
1642 
1643 	ret = arch_gnttab_init(max_nr_grant_frames,
1644 			       nr_status_frames(max_nr_grant_frames));
1645 	if (ret < 0)
1646 		goto ini_nomem;
1647 
1648 	if (gnttab_setup() < 0) {
1649 		ret = -ENODEV;
1650 		goto ini_nomem;
1651 	}
1652 
1653 	gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
1654 
1655 	gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
1656 			gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
1657 
1658 	printk("Grant table initialized\n");
1659 	return 0;
1660 
1661  ini_nomem:
1662 	for (i--; i >= 0; i--)
1663 		free_page((unsigned long)gnttab_list[i]);
1664 	kfree(gnttab_list);
1665 	bitmap_free(gnttab_free_bitmap);
1666 	return ret;
1667 }
1668 EXPORT_SYMBOL_GPL(gnttab_init);
1669 
1670 static int __gnttab_init(void)
1671 {
1672 	if (!xen_domain())
1673 		return -ENODEV;
1674 
1675 	/* Delay grant-table initialization in the PV on HVM case */
1676 	if (xen_hvm_domain() && !xen_pvh_domain())
1677 		return 0;
1678 
1679 	return gnttab_init();
1680 }
1681 /* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1682  * beforehand to initialize xen_auto_xlat_grant_frames. */
1683 core_initcall_sync(__gnttab_init);
1684