xref: /openbmc/linux/drivers/xen/balloon.c (revision 206a81c1)
1 /******************************************************************************
2  * Xen balloon driver - enables returning/claiming memory to/from Xen.
3  *
4  * Copyright (c) 2003, B Dragovic
5  * Copyright (c) 2003-2004, M Williamson, K Fraser
6  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
7  * Copyright (c) 2010 Daniel Kiper
8  *
9  * Memory hotplug support was written by Daniel Kiper. Work on
10  * it was sponsored by Google under Google Summer of Code 2010
11  * program. Jeremy Fitzhardinge from Citrix was the mentor for
12  * this project.
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License version 2
16  * as published by the Free Software Foundation; or, when distributed
17  * separately from the Linux kernel or incorporated into other
18  * software packages, subject to the following license:
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining a copy
21  * of this source file (the "Software"), to deal in the Software without
22  * restriction, including without limitation the rights to use, copy, modify,
23  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24  * and to permit persons to whom the Software is furnished to do so, subject to
25  * the following conditions:
26  *
27  * The above copyright notice and this permission notice shall be included in
28  * all copies or substantial portions of the Software.
29  *
30  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36  * IN THE SOFTWARE.
37  */
38 
39 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
40 
41 #include <linux/cpu.h>
42 #include <linux/kernel.h>
43 #include <linux/sched.h>
44 #include <linux/errno.h>
45 #include <linux/module.h>
46 #include <linux/mm.h>
47 #include <linux/bootmem.h>
48 #include <linux/pagemap.h>
49 #include <linux/highmem.h>
50 #include <linux/mutex.h>
51 #include <linux/list.h>
52 #include <linux/gfp.h>
53 #include <linux/notifier.h>
54 #include <linux/memory.h>
55 #include <linux/memory_hotplug.h>
56 #include <linux/percpu-defs.h>
57 
58 #include <asm/page.h>
59 #include <asm/pgalloc.h>
60 #include <asm/pgtable.h>
61 #include <asm/tlb.h>
62 
63 #include <asm/xen/hypervisor.h>
64 #include <asm/xen/hypercall.h>
65 
66 #include <xen/xen.h>
67 #include <xen/interface/xen.h>
68 #include <xen/interface/memory.h>
69 #include <xen/balloon.h>
70 #include <xen/features.h>
71 #include <xen/page.h>
72 
73 /*
74  * balloon_process() state:
75  *
76  * BP_DONE: done or nothing to do,
77  * BP_EAGAIN: error, go to sleep,
78  * BP_ECANCELED: error, balloon operation canceled.
79  */
80 
81 enum bp_state {
82 	BP_DONE,
83 	BP_EAGAIN,
84 	BP_ECANCELED
85 };
86 
87 
88 static DEFINE_MUTEX(balloon_mutex);
89 
90 struct balloon_stats balloon_stats;
91 EXPORT_SYMBOL_GPL(balloon_stats);
92 
93 /* We increase/decrease in batches which fit in a page */
94 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
95 static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
96 
97 
98 /* List of ballooned pages, threaded through the mem_map array. */
99 static LIST_HEAD(ballooned_pages);
100 
101 /* Main work function, always executed in process context. */
102 static void balloon_process(struct work_struct *work);
103 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
104 
105 /* When ballooning out (allocating memory to return to Xen) we don't really
106    want the kernel to try too hard since that can trigger the oom killer. */
107 #define GFP_BALLOON \
108 	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
109 
110 static void scrub_page(struct page *page)
111 {
112 #ifdef CONFIG_XEN_SCRUB_PAGES
113 	clear_highpage(page);
114 #endif
115 }
116 
117 /* balloon_append: add the given page to the balloon. */
118 static void __balloon_append(struct page *page)
119 {
120 	/* Lowmem is re-populated first, so highmem pages go at list tail. */
121 	if (PageHighMem(page)) {
122 		list_add_tail(&page->lru, &ballooned_pages);
123 		balloon_stats.balloon_high++;
124 	} else {
125 		list_add(&page->lru, &ballooned_pages);
126 		balloon_stats.balloon_low++;
127 	}
128 }
129 
130 static void balloon_append(struct page *page)
131 {
132 	__balloon_append(page);
133 	adjust_managed_page_count(page, -1);
134 }
135 
136 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
137 static struct page *balloon_retrieve(bool prefer_highmem)
138 {
139 	struct page *page;
140 
141 	if (list_empty(&ballooned_pages))
142 		return NULL;
143 
144 	if (prefer_highmem)
145 		page = list_entry(ballooned_pages.prev, struct page, lru);
146 	else
147 		page = list_entry(ballooned_pages.next, struct page, lru);
148 	list_del(&page->lru);
149 
150 	if (PageHighMem(page))
151 		balloon_stats.balloon_high--;
152 	else
153 		balloon_stats.balloon_low--;
154 
155 	adjust_managed_page_count(page, 1);
156 
157 	return page;
158 }
159 
160 static struct page *balloon_next_page(struct page *page)
161 {
162 	struct list_head *next = page->lru.next;
163 	if (next == &ballooned_pages)
164 		return NULL;
165 	return list_entry(next, struct page, lru);
166 }
167 
168 static enum bp_state update_schedule(enum bp_state state)
169 {
170 	if (state == BP_DONE) {
171 		balloon_stats.schedule_delay = 1;
172 		balloon_stats.retry_count = 1;
173 		return BP_DONE;
174 	}
175 
176 	++balloon_stats.retry_count;
177 
178 	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
179 			balloon_stats.retry_count > balloon_stats.max_retry_count) {
180 		balloon_stats.schedule_delay = 1;
181 		balloon_stats.retry_count = 1;
182 		return BP_ECANCELED;
183 	}
184 
185 	balloon_stats.schedule_delay <<= 1;
186 
187 	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
188 		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
189 
190 	return BP_EAGAIN;
191 }
192 
193 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
194 static long current_credit(void)
195 {
196 	return balloon_stats.target_pages - balloon_stats.current_pages -
197 		balloon_stats.hotplug_pages;
198 }
199 
200 static bool balloon_is_inflated(void)
201 {
202 	if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
203 			balloon_stats.balloon_hotplug)
204 		return true;
205 	else
206 		return false;
207 }
208 
209 /*
210  * reserve_additional_memory() adds memory region of size >= credit above
211  * max_pfn. New region is section aligned and size is modified to be multiple
212  * of section size. Those features allow optimal use of address space and
213  * establish proper alignment when this function is called first time after
214  * boot (last section not fully populated at boot time contains unused memory
215  * pages with PG_reserved bit not set; online_pages_range() does not allow page
216  * onlining in whole range if first onlined page does not have PG_reserved
217  * bit set). Real size of added memory is established at page onlining stage.
218  */
219 
220 static enum bp_state reserve_additional_memory(long credit)
221 {
222 	int nid, rc;
223 	u64 hotplug_start_paddr;
224 	unsigned long balloon_hotplug = credit;
225 
226 	hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
227 	balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
228 	nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
229 
230 	rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
231 
232 	if (rc) {
233 		pr_info("%s: add_memory() failed: %i\n", __func__, rc);
234 		return BP_EAGAIN;
235 	}
236 
237 	balloon_hotplug -= credit;
238 
239 	balloon_stats.hotplug_pages += credit;
240 	balloon_stats.balloon_hotplug = balloon_hotplug;
241 
242 	return BP_DONE;
243 }
244 
245 static void xen_online_page(struct page *page)
246 {
247 	__online_page_set_limits(page);
248 
249 	mutex_lock(&balloon_mutex);
250 
251 	__balloon_append(page);
252 
253 	if (balloon_stats.hotplug_pages)
254 		--balloon_stats.hotplug_pages;
255 	else
256 		--balloon_stats.balloon_hotplug;
257 
258 	mutex_unlock(&balloon_mutex);
259 }
260 
261 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
262 {
263 	if (val == MEM_ONLINE)
264 		schedule_delayed_work(&balloon_worker, 0);
265 
266 	return NOTIFY_OK;
267 }
268 
269 static struct notifier_block xen_memory_nb = {
270 	.notifier_call = xen_memory_notifier,
271 	.priority = 0
272 };
273 #else
274 static long current_credit(void)
275 {
276 	unsigned long target = balloon_stats.target_pages;
277 
278 	target = min(target,
279 		     balloon_stats.current_pages +
280 		     balloon_stats.balloon_low +
281 		     balloon_stats.balloon_high);
282 
283 	return target - balloon_stats.current_pages;
284 }
285 
286 static bool balloon_is_inflated(void)
287 {
288 	if (balloon_stats.balloon_low || balloon_stats.balloon_high)
289 		return true;
290 	else
291 		return false;
292 }
293 
294 static enum bp_state reserve_additional_memory(long credit)
295 {
296 	balloon_stats.target_pages = balloon_stats.current_pages;
297 	return BP_DONE;
298 }
299 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
300 
301 static enum bp_state increase_reservation(unsigned long nr_pages)
302 {
303 	int rc;
304 	unsigned long  pfn, i;
305 	struct page   *page;
306 	struct xen_memory_reservation reservation = {
307 		.address_bits = 0,
308 		.extent_order = 0,
309 		.domid        = DOMID_SELF
310 	};
311 
312 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
313 	if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
314 		nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
315 		balloon_stats.hotplug_pages += nr_pages;
316 		balloon_stats.balloon_hotplug -= nr_pages;
317 		return BP_DONE;
318 	}
319 #endif
320 
321 	if (nr_pages > ARRAY_SIZE(frame_list))
322 		nr_pages = ARRAY_SIZE(frame_list);
323 
324 	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
325 	for (i = 0; i < nr_pages; i++) {
326 		if (!page) {
327 			nr_pages = i;
328 			break;
329 		}
330 		frame_list[i] = page_to_pfn(page);
331 		page = balloon_next_page(page);
332 	}
333 
334 	set_xen_guest_handle(reservation.extent_start, frame_list);
335 	reservation.nr_extents = nr_pages;
336 	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
337 	if (rc <= 0)
338 		return BP_EAGAIN;
339 
340 	for (i = 0; i < rc; i++) {
341 		page = balloon_retrieve(false);
342 		BUG_ON(page == NULL);
343 
344 		pfn = page_to_pfn(page);
345 
346 #ifdef CONFIG_XEN_HAVE_PVMMU
347 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
348 			set_phys_to_machine(pfn, frame_list[i]);
349 
350 			/* Link back into the page tables if not highmem. */
351 			if (!PageHighMem(page)) {
352 				int ret;
353 				ret = HYPERVISOR_update_va_mapping(
354 						(unsigned long)__va(pfn << PAGE_SHIFT),
355 						mfn_pte(frame_list[i], PAGE_KERNEL),
356 						0);
357 				BUG_ON(ret);
358 			}
359 		}
360 #endif
361 
362 		/* Relinquish the page back to the allocator. */
363 		__free_reserved_page(page);
364 	}
365 
366 	balloon_stats.current_pages += rc;
367 
368 	return BP_DONE;
369 }
370 
371 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
372 {
373 	enum bp_state state = BP_DONE;
374 	unsigned long  pfn, i;
375 	struct page   *page;
376 	int ret;
377 	struct xen_memory_reservation reservation = {
378 		.address_bits = 0,
379 		.extent_order = 0,
380 		.domid        = DOMID_SELF
381 	};
382 
383 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
384 	if (balloon_stats.hotplug_pages) {
385 		nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
386 		balloon_stats.hotplug_pages -= nr_pages;
387 		balloon_stats.balloon_hotplug += nr_pages;
388 		return BP_DONE;
389 	}
390 #endif
391 
392 	if (nr_pages > ARRAY_SIZE(frame_list))
393 		nr_pages = ARRAY_SIZE(frame_list);
394 
395 	for (i = 0; i < nr_pages; i++) {
396 		page = alloc_page(gfp);
397 		if (page == NULL) {
398 			nr_pages = i;
399 			state = BP_EAGAIN;
400 			break;
401 		}
402 		scrub_page(page);
403 
404 		frame_list[i] = page_to_pfn(page);
405 	}
406 
407 	/*
408 	 * Ensure that ballooned highmem pages don't have kmaps.
409 	 *
410 	 * Do this before changing the p2m as kmap_flush_unused()
411 	 * reads PTEs to obtain pages (and hence needs the original
412 	 * p2m entry).
413 	 */
414 	kmap_flush_unused();
415 
416 	/* Update direct mapping, invalidate P2M, and add to balloon. */
417 	for (i = 0; i < nr_pages; i++) {
418 		pfn = frame_list[i];
419 		frame_list[i] = pfn_to_mfn(pfn);
420 		page = pfn_to_page(pfn);
421 
422 #ifdef CONFIG_XEN_HAVE_PVMMU
423 		/*
424 		 * Ballooned out frames are effectively replaced with
425 		 * a scratch frame.  Ensure direct mappings and the
426 		 * p2m are consistent.
427 		 */
428 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
429 			unsigned long p;
430 			struct page   *scratch_page = get_balloon_scratch_page();
431 
432 			if (!PageHighMem(page)) {
433 				ret = HYPERVISOR_update_va_mapping(
434 						(unsigned long)__va(pfn << PAGE_SHIFT),
435 						pfn_pte(page_to_pfn(scratch_page),
436 							PAGE_KERNEL_RO), 0);
437 				BUG_ON(ret);
438 			}
439 			p = page_to_pfn(scratch_page);
440 			__set_phys_to_machine(pfn, pfn_to_mfn(p));
441 
442 			put_balloon_scratch_page();
443 		}
444 #endif
445 
446 		balloon_append(page);
447 	}
448 
449 	flush_tlb_all();
450 
451 	set_xen_guest_handle(reservation.extent_start, frame_list);
452 	reservation.nr_extents   = nr_pages;
453 	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
454 	BUG_ON(ret != nr_pages);
455 
456 	balloon_stats.current_pages -= nr_pages;
457 
458 	return state;
459 }
460 
461 /*
462  * We avoid multiple worker processes conflicting via the balloon mutex.
463  * We may of course race updates of the target counts (which are protected
464  * by the balloon lock), or with changes to the Xen hard limit, but we will
465  * recover from these in time.
466  */
467 static void balloon_process(struct work_struct *work)
468 {
469 	enum bp_state state = BP_DONE;
470 	long credit;
471 
472 	mutex_lock(&balloon_mutex);
473 
474 	do {
475 		credit = current_credit();
476 
477 		if (credit > 0) {
478 			if (balloon_is_inflated())
479 				state = increase_reservation(credit);
480 			else
481 				state = reserve_additional_memory(credit);
482 		}
483 
484 		if (credit < 0)
485 			state = decrease_reservation(-credit, GFP_BALLOON);
486 
487 		state = update_schedule(state);
488 
489 #ifndef CONFIG_PREEMPT
490 		if (need_resched())
491 			schedule();
492 #endif
493 	} while (credit && state == BP_DONE);
494 
495 	/* Schedule more work if there is some still to be done. */
496 	if (state == BP_EAGAIN)
497 		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
498 
499 	mutex_unlock(&balloon_mutex);
500 }
501 
502 struct page *get_balloon_scratch_page(void)
503 {
504 	struct page *ret = get_cpu_var(balloon_scratch_page);
505 	BUG_ON(ret == NULL);
506 	return ret;
507 }
508 
509 void put_balloon_scratch_page(void)
510 {
511 	put_cpu_var(balloon_scratch_page);
512 }
513 
514 /* Resets the Xen limit, sets new target, and kicks off processing. */
515 void balloon_set_new_target(unsigned long target)
516 {
517 	/* No need for lock. Not read-modify-write updates. */
518 	balloon_stats.target_pages = target;
519 	schedule_delayed_work(&balloon_worker, 0);
520 }
521 EXPORT_SYMBOL_GPL(balloon_set_new_target);
522 
523 /**
524  * alloc_xenballooned_pages - get pages that have been ballooned out
525  * @nr_pages: Number of pages to get
526  * @pages: pages returned
527  * @highmem: allow highmem pages
528  * @return 0 on success, error otherwise
529  */
530 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
531 {
532 	int pgno = 0;
533 	struct page *page;
534 	mutex_lock(&balloon_mutex);
535 	while (pgno < nr_pages) {
536 		page = balloon_retrieve(highmem);
537 		if (page && (highmem || !PageHighMem(page))) {
538 			pages[pgno++] = page;
539 		} else {
540 			enum bp_state st;
541 			if (page)
542 				balloon_append(page);
543 			st = decrease_reservation(nr_pages - pgno,
544 					highmem ? GFP_HIGHUSER : GFP_USER);
545 			if (st != BP_DONE)
546 				goto out_undo;
547 		}
548 	}
549 	mutex_unlock(&balloon_mutex);
550 	return 0;
551  out_undo:
552 	while (pgno)
553 		balloon_append(pages[--pgno]);
554 	/* Free the memory back to the kernel soon */
555 	schedule_delayed_work(&balloon_worker, 0);
556 	mutex_unlock(&balloon_mutex);
557 	return -ENOMEM;
558 }
559 EXPORT_SYMBOL(alloc_xenballooned_pages);
560 
561 /**
562  * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
563  * @nr_pages: Number of pages
564  * @pages: pages to return
565  */
566 void free_xenballooned_pages(int nr_pages, struct page **pages)
567 {
568 	int i;
569 
570 	mutex_lock(&balloon_mutex);
571 
572 	for (i = 0; i < nr_pages; i++) {
573 		if (pages[i])
574 			balloon_append(pages[i]);
575 	}
576 
577 	/* The balloon may be too large now. Shrink it if needed. */
578 	if (current_credit())
579 		schedule_delayed_work(&balloon_worker, 0);
580 
581 	mutex_unlock(&balloon_mutex);
582 }
583 EXPORT_SYMBOL(free_xenballooned_pages);
584 
585 static void __init balloon_add_region(unsigned long start_pfn,
586 				      unsigned long pages)
587 {
588 	unsigned long pfn, extra_pfn_end;
589 	struct page *page;
590 
591 	/*
592 	 * If the amount of usable memory has been limited (e.g., with
593 	 * the 'mem' command line parameter), don't add pages beyond
594 	 * this limit.
595 	 */
596 	extra_pfn_end = min(max_pfn, start_pfn + pages);
597 
598 	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
599 		page = pfn_to_page(pfn);
600 		/* totalram_pages and totalhigh_pages do not
601 		   include the boot-time balloon extension, so
602 		   don't subtract from it. */
603 		__balloon_append(page);
604 	}
605 }
606 
607 static int alloc_balloon_scratch_page(int cpu)
608 {
609 	if (per_cpu(balloon_scratch_page, cpu) != NULL)
610 		return 0;
611 
612 	per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
613 	if (per_cpu(balloon_scratch_page, cpu) == NULL) {
614 		pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
615 		return -ENOMEM;
616 	}
617 
618 	return 0;
619 }
620 
621 
622 static int balloon_cpu_notify(struct notifier_block *self,
623 				    unsigned long action, void *hcpu)
624 {
625 	int cpu = (long)hcpu;
626 	switch (action) {
627 	case CPU_UP_PREPARE:
628 		if (alloc_balloon_scratch_page(cpu))
629 			return NOTIFY_BAD;
630 		break;
631 	default:
632 		break;
633 	}
634 	return NOTIFY_OK;
635 }
636 
637 static struct notifier_block balloon_cpu_notifier = {
638 	.notifier_call	= balloon_cpu_notify,
639 };
640 
641 static int __init balloon_init(void)
642 {
643 	int i, cpu;
644 
645 	if (!xen_domain())
646 		return -ENODEV;
647 
648 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
649 		register_cpu_notifier(&balloon_cpu_notifier);
650 
651 		get_online_cpus();
652 		for_each_online_cpu(cpu) {
653 			if (alloc_balloon_scratch_page(cpu)) {
654 				put_online_cpus();
655 				unregister_cpu_notifier(&balloon_cpu_notifier);
656 				return -ENOMEM;
657 			}
658 		}
659 		put_online_cpus();
660 	}
661 
662 	pr_info("Initialising balloon driver\n");
663 
664 	balloon_stats.current_pages = xen_pv_domain()
665 		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
666 		: get_num_physpages();
667 	balloon_stats.target_pages  = balloon_stats.current_pages;
668 	balloon_stats.balloon_low   = 0;
669 	balloon_stats.balloon_high  = 0;
670 
671 	balloon_stats.schedule_delay = 1;
672 	balloon_stats.max_schedule_delay = 32;
673 	balloon_stats.retry_count = 1;
674 	balloon_stats.max_retry_count = RETRY_UNLIMITED;
675 
676 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
677 	balloon_stats.hotplug_pages = 0;
678 	balloon_stats.balloon_hotplug = 0;
679 
680 	set_online_page_callback(&xen_online_page);
681 	register_memory_notifier(&xen_memory_nb);
682 #endif
683 
684 	/*
685 	 * Initialize the balloon with pages from the extra memory
686 	 * regions (see arch/x86/xen/setup.c).
687 	 */
688 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
689 		if (xen_extra_mem[i].size)
690 			balloon_add_region(PFN_UP(xen_extra_mem[i].start),
691 					   PFN_DOWN(xen_extra_mem[i].size));
692 
693 	return 0;
694 }
695 
696 subsys_initcall(balloon_init);
697 
698 static int __init balloon_clear(void)
699 {
700 	int cpu;
701 
702 	for_each_possible_cpu(cpu)
703 		per_cpu(balloon_scratch_page, cpu) = NULL;
704 
705 	return 0;
706 }
707 early_initcall(balloon_clear);
708 
709 MODULE_LICENSE("GPL");
710