xref: /openbmc/linux/drivers/xen/balloon.c (revision de6e9190)
1 /******************************************************************************
2  * Xen balloon driver - enables returning/claiming memory to/from Xen.
3  *
4  * Copyright (c) 2003, B Dragovic
5  * Copyright (c) 2003-2004, M Williamson, K Fraser
6  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
7  * Copyright (c) 2010 Daniel Kiper
8  *
9  * Memory hotplug support was written by Daniel Kiper. Work on
10  * it was sponsored by Google under Google Summer of Code 2010
11  * program. Jeremy Fitzhardinge from Citrix was the mentor for
12  * this project.
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License version 2
16  * as published by the Free Software Foundation; or, when distributed
17  * separately from the Linux kernel or incorporated into other
18  * software packages, subject to the following license:
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining a copy
21  * of this source file (the "Software"), to deal in the Software without
22  * restriction, including without limitation the rights to use, copy, modify,
23  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24  * and to permit persons to whom the Software is furnished to do so, subject to
25  * the following conditions:
26  *
27  * The above copyright notice and this permission notice shall be included in
28  * all copies or substantial portions of the Software.
29  *
30  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36  * IN THE SOFTWARE.
37  */
38 
39 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
40 
41 #include <linux/cpu.h>
42 #include <linux/kernel.h>
43 #include <linux/sched.h>
44 #include <linux/cred.h>
45 #include <linux/errno.h>
46 #include <linux/freezer.h>
47 #include <linux/kthread.h>
48 #include <linux/mm.h>
49 #include <linux/memblock.h>
50 #include <linux/pagemap.h>
51 #include <linux/highmem.h>
52 #include <linux/mutex.h>
53 #include <linux/list.h>
54 #include <linux/gfp.h>
55 #include <linux/notifier.h>
56 #include <linux/memory.h>
57 #include <linux/memory_hotplug.h>
58 #include <linux/percpu-defs.h>
59 #include <linux/slab.h>
60 #include <linux/sysctl.h>
61 
62 #include <asm/page.h>
63 #include <asm/tlb.h>
64 
65 #include <asm/xen/hypervisor.h>
66 #include <asm/xen/hypercall.h>
67 
68 #include <xen/xen.h>
69 #include <xen/interface/xen.h>
70 #include <xen/interface/memory.h>
71 #include <xen/balloon.h>
72 #include <xen/features.h>
73 #include <xen/page.h>
74 #include <xen/mem-reservation.h>
75 
76 static int xen_hotplug_unpopulated;
77 
78 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
79 
80 static struct ctl_table balloon_table[] = {
81 	{
82 		.procname	= "hotplug_unpopulated",
83 		.data		= &xen_hotplug_unpopulated,
84 		.maxlen		= sizeof(int),
85 		.mode		= 0644,
86 		.proc_handler	= proc_dointvec_minmax,
87 		.extra1         = SYSCTL_ZERO,
88 		.extra2         = SYSCTL_ONE,
89 	},
90 	{ }
91 };
92 
93 static struct ctl_table balloon_root[] = {
94 	{
95 		.procname	= "balloon",
96 		.mode		= 0555,
97 		.child		= balloon_table,
98 	},
99 	{ }
100 };
101 
102 static struct ctl_table xen_root[] = {
103 	{
104 		.procname	= "xen",
105 		.mode		= 0555,
106 		.child		= balloon_root,
107 	},
108 	{ }
109 };
110 
111 #endif
112 
113 /*
114  * Use one extent per PAGE_SIZE to avoid to break down the page into
115  * multiple frame.
116  */
117 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
118 
119 /*
120  * balloon_thread() state:
121  *
122  * BP_DONE: done or nothing to do,
123  * BP_WAIT: wait to be rescheduled,
124  * BP_EAGAIN: error, go to sleep,
125  * BP_ECANCELED: error, balloon operation canceled.
126  */
127 
128 enum bp_state {
129 	BP_DONE,
130 	BP_WAIT,
131 	BP_EAGAIN,
132 	BP_ECANCELED
133 };
134 
135 /* Main waiting point for xen-balloon thread. */
136 static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
137 
138 static DEFINE_MUTEX(balloon_mutex);
139 
140 struct balloon_stats balloon_stats;
141 EXPORT_SYMBOL_GPL(balloon_stats);
142 
143 /* We increase/decrease in batches which fit in a page */
144 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
145 
146 
147 /* List of ballooned pages, threaded through the mem_map array. */
148 static LIST_HEAD(ballooned_pages);
149 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
150 
151 /* When ballooning out (allocating memory to return to Xen) we don't really
152    want the kernel to try too hard since that can trigger the oom killer. */
153 #define GFP_BALLOON \
154 	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
155 
156 /* balloon_append: add the given page to the balloon. */
157 static void balloon_append(struct page *page)
158 {
159 	__SetPageOffline(page);
160 
161 	/* Lowmem is re-populated first, so highmem pages go at list tail. */
162 	if (PageHighMem(page)) {
163 		list_add_tail(&page->lru, &ballooned_pages);
164 		balloon_stats.balloon_high++;
165 	} else {
166 		list_add(&page->lru, &ballooned_pages);
167 		balloon_stats.balloon_low++;
168 	}
169 	wake_up(&balloon_wq);
170 }
171 
172 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
173 static struct page *balloon_retrieve(bool require_lowmem)
174 {
175 	struct page *page;
176 
177 	if (list_empty(&ballooned_pages))
178 		return NULL;
179 
180 	page = list_entry(ballooned_pages.next, struct page, lru);
181 	if (require_lowmem && PageHighMem(page))
182 		return NULL;
183 	list_del(&page->lru);
184 
185 	if (PageHighMem(page))
186 		balloon_stats.balloon_high--;
187 	else
188 		balloon_stats.balloon_low--;
189 
190 	__ClearPageOffline(page);
191 	return page;
192 }
193 
194 static struct page *balloon_next_page(struct page *page)
195 {
196 	struct list_head *next = page->lru.next;
197 	if (next == &ballooned_pages)
198 		return NULL;
199 	return list_entry(next, struct page, lru);
200 }
201 
202 static enum bp_state update_schedule(enum bp_state state)
203 {
204 	if (state == BP_WAIT)
205 		return BP_WAIT;
206 
207 	if (state == BP_ECANCELED)
208 		return BP_ECANCELED;
209 
210 	if (state == BP_DONE) {
211 		balloon_stats.schedule_delay = 1;
212 		balloon_stats.retry_count = 1;
213 		return BP_DONE;
214 	}
215 
216 	++balloon_stats.retry_count;
217 
218 	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
219 			balloon_stats.retry_count > balloon_stats.max_retry_count) {
220 		balloon_stats.schedule_delay = 1;
221 		balloon_stats.retry_count = 1;
222 		return BP_ECANCELED;
223 	}
224 
225 	balloon_stats.schedule_delay <<= 1;
226 
227 	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
228 		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
229 
230 	return BP_EAGAIN;
231 }
232 
233 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
234 static void release_memory_resource(struct resource *resource)
235 {
236 	if (!resource)
237 		return;
238 
239 	/*
240 	 * No need to reset region to identity mapped since we now
241 	 * know that no I/O can be in this region
242 	 */
243 	release_resource(resource);
244 	kfree(resource);
245 }
246 
247 static struct resource *additional_memory_resource(phys_addr_t size)
248 {
249 	struct resource *res;
250 	int ret;
251 
252 	res = kzalloc(sizeof(*res), GFP_KERNEL);
253 	if (!res)
254 		return NULL;
255 
256 	res->name = "System RAM";
257 	res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
258 
259 	ret = allocate_resource(&iomem_resource, res,
260 				size, 0, -1,
261 				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
262 	if (ret < 0) {
263 		pr_err("Cannot allocate new System RAM resource\n");
264 		kfree(res);
265 		return NULL;
266 	}
267 
268 	return res;
269 }
270 
271 static enum bp_state reserve_additional_memory(void)
272 {
273 	long credit;
274 	struct resource *resource;
275 	int nid, rc;
276 	unsigned long balloon_hotplug;
277 
278 	credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
279 		- balloon_stats.total_pages;
280 
281 	/*
282 	 * Already hotplugged enough pages?  Wait for them to be
283 	 * onlined.
284 	 */
285 	if (credit <= 0)
286 		return BP_WAIT;
287 
288 	balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
289 
290 	resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
291 	if (!resource)
292 		goto err;
293 
294 	nid = memory_add_physaddr_to_nid(resource->start);
295 
296 #ifdef CONFIG_XEN_HAVE_PVMMU
297 	/*
298 	 * We don't support PV MMU when Linux and Xen is using
299 	 * different page granularity.
300 	 */
301 	BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
302 
303         /*
304          * add_memory() will build page tables for the new memory so
305          * the p2m must contain invalid entries so the correct
306          * non-present PTEs will be written.
307          *
308          * If a failure occurs, the original (identity) p2m entries
309          * are not restored since this region is now known not to
310          * conflict with any devices.
311          */
312 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
313 		unsigned long pfn, i;
314 
315 		pfn = PFN_DOWN(resource->start);
316 		for (i = 0; i < balloon_hotplug; i++) {
317 			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
318 				pr_warn("set_phys_to_machine() failed, no memory added\n");
319 				goto err;
320 			}
321                 }
322 	}
323 #endif
324 
325 	/*
326 	 * add_memory_resource() will call online_pages() which in its turn
327 	 * will call xen_online_page() callback causing deadlock if we don't
328 	 * release balloon_mutex here. Unlocking here is safe because the
329 	 * callers drop the mutex before trying again.
330 	 */
331 	mutex_unlock(&balloon_mutex);
332 	/* add_memory_resource() requires the device_hotplug lock */
333 	lock_device_hotplug();
334 	rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE);
335 	unlock_device_hotplug();
336 	mutex_lock(&balloon_mutex);
337 
338 	if (rc) {
339 		pr_warn("Cannot add additional memory (%i)\n", rc);
340 		goto err;
341 	}
342 
343 	balloon_stats.total_pages += balloon_hotplug;
344 
345 	return BP_WAIT;
346   err:
347 	release_memory_resource(resource);
348 	return BP_ECANCELED;
349 }
350 
351 static void xen_online_page(struct page *page, unsigned int order)
352 {
353 	unsigned long i, size = (1 << order);
354 	unsigned long start_pfn = page_to_pfn(page);
355 	struct page *p;
356 
357 	pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
358 	mutex_lock(&balloon_mutex);
359 	for (i = 0; i < size; i++) {
360 		p = pfn_to_page(start_pfn + i);
361 		balloon_append(p);
362 	}
363 	mutex_unlock(&balloon_mutex);
364 }
365 
366 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
367 {
368 	if (val == MEM_ONLINE)
369 		wake_up(&balloon_thread_wq);
370 
371 	return NOTIFY_OK;
372 }
373 
374 static struct notifier_block xen_memory_nb = {
375 	.notifier_call = xen_memory_notifier,
376 	.priority = 0
377 };
378 #else
379 static enum bp_state reserve_additional_memory(void)
380 {
381 	balloon_stats.target_pages = balloon_stats.current_pages +
382 				     balloon_stats.target_unpopulated;
383 	return BP_ECANCELED;
384 }
385 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
386 
387 static long current_credit(void)
388 {
389 	return balloon_stats.target_pages - balloon_stats.current_pages;
390 }
391 
392 static bool balloon_is_inflated(void)
393 {
394 	return balloon_stats.balloon_low || balloon_stats.balloon_high;
395 }
396 
397 static enum bp_state increase_reservation(unsigned long nr_pages)
398 {
399 	int rc;
400 	unsigned long i;
401 	struct page   *page;
402 
403 	if (nr_pages > ARRAY_SIZE(frame_list))
404 		nr_pages = ARRAY_SIZE(frame_list);
405 
406 	page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
407 	for (i = 0; i < nr_pages; i++) {
408 		if (!page) {
409 			nr_pages = i;
410 			break;
411 		}
412 
413 		frame_list[i] = page_to_xen_pfn(page);
414 		page = balloon_next_page(page);
415 	}
416 
417 	rc = xenmem_reservation_increase(nr_pages, frame_list);
418 	if (rc <= 0)
419 		return BP_EAGAIN;
420 
421 	for (i = 0; i < rc; i++) {
422 		page = balloon_retrieve(false);
423 		BUG_ON(page == NULL);
424 
425 		xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
426 
427 		/* Relinquish the page back to the allocator. */
428 		free_reserved_page(page);
429 	}
430 
431 	balloon_stats.current_pages += rc;
432 
433 	return BP_DONE;
434 }
435 
436 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
437 {
438 	enum bp_state state = BP_DONE;
439 	unsigned long i;
440 	struct page *page, *tmp;
441 	int ret;
442 	LIST_HEAD(pages);
443 
444 	if (nr_pages > ARRAY_SIZE(frame_list))
445 		nr_pages = ARRAY_SIZE(frame_list);
446 
447 	for (i = 0; i < nr_pages; i++) {
448 		page = alloc_page(gfp);
449 		if (page == NULL) {
450 			nr_pages = i;
451 			state = BP_EAGAIN;
452 			break;
453 		}
454 		adjust_managed_page_count(page, -1);
455 		xenmem_reservation_scrub_page(page);
456 		list_add(&page->lru, &pages);
457 	}
458 
459 	/*
460 	 * Ensure that ballooned highmem pages don't have kmaps.
461 	 *
462 	 * Do this before changing the p2m as kmap_flush_unused()
463 	 * reads PTEs to obtain pages (and hence needs the original
464 	 * p2m entry).
465 	 */
466 	kmap_flush_unused();
467 
468 	/*
469 	 * Setup the frame, update direct mapping, invalidate P2M,
470 	 * and add to balloon.
471 	 */
472 	i = 0;
473 	list_for_each_entry_safe(page, tmp, &pages, lru) {
474 		frame_list[i++] = xen_page_to_gfn(page);
475 
476 		xenmem_reservation_va_mapping_reset(1, &page);
477 
478 		list_del(&page->lru);
479 
480 		balloon_append(page);
481 	}
482 
483 	flush_tlb_all();
484 
485 	ret = xenmem_reservation_decrease(nr_pages, frame_list);
486 	BUG_ON(ret != nr_pages);
487 
488 	balloon_stats.current_pages -= nr_pages;
489 
490 	return state;
491 }
492 
493 /*
494  * Stop waiting if either state is BP_DONE and ballooning action is
495  * needed, or if the credit has changed while state is not BP_DONE.
496  */
497 static bool balloon_thread_cond(enum bp_state state, long credit)
498 {
499 	if (state == BP_DONE)
500 		credit = 0;
501 
502 	return current_credit() != credit || kthread_should_stop();
503 }
504 
505 /*
506  * As this is a kthread it is guaranteed to run as a single instance only.
507  * We may of course race updates of the target counts (which are protected
508  * by the balloon lock), or with changes to the Xen hard limit, but we will
509  * recover from these in time.
510  */
511 static int balloon_thread(void *unused)
512 {
513 	enum bp_state state = BP_DONE;
514 	long credit;
515 	unsigned long timeout;
516 
517 	set_freezable();
518 	for (;;) {
519 		switch (state) {
520 		case BP_DONE:
521 		case BP_ECANCELED:
522 			timeout = 3600 * HZ;
523 			break;
524 		case BP_EAGAIN:
525 			timeout = balloon_stats.schedule_delay * HZ;
526 			break;
527 		case BP_WAIT:
528 			timeout = HZ;
529 			break;
530 		}
531 
532 		credit = current_credit();
533 
534 		wait_event_freezable_timeout(balloon_thread_wq,
535 			balloon_thread_cond(state, credit), timeout);
536 
537 		if (kthread_should_stop())
538 			return 0;
539 
540 		mutex_lock(&balloon_mutex);
541 
542 		credit = current_credit();
543 
544 		if (credit > 0) {
545 			if (balloon_is_inflated())
546 				state = increase_reservation(credit);
547 			else
548 				state = reserve_additional_memory();
549 		}
550 
551 		if (credit < 0) {
552 			long n_pages;
553 
554 			n_pages = min(-credit, si_mem_available());
555 			state = decrease_reservation(n_pages, GFP_BALLOON);
556 			if (state == BP_DONE && n_pages != -credit &&
557 			    n_pages < totalreserve_pages)
558 				state = BP_EAGAIN;
559 		}
560 
561 		state = update_schedule(state);
562 
563 		mutex_unlock(&balloon_mutex);
564 
565 		cond_resched();
566 	}
567 }
568 
569 /* Resets the Xen limit, sets new target, and kicks off processing. */
570 void balloon_set_new_target(unsigned long target)
571 {
572 	/* No need for lock. Not read-modify-write updates. */
573 	balloon_stats.target_pages = target;
574 	wake_up(&balloon_thread_wq);
575 }
576 EXPORT_SYMBOL_GPL(balloon_set_new_target);
577 
578 static int add_ballooned_pages(int nr_pages)
579 {
580 	enum bp_state st;
581 
582 	if (xen_hotplug_unpopulated) {
583 		st = reserve_additional_memory();
584 		if (st != BP_ECANCELED) {
585 			int rc;
586 
587 			mutex_unlock(&balloon_mutex);
588 			rc = wait_event_interruptible(balloon_wq,
589 				   !list_empty(&ballooned_pages));
590 			mutex_lock(&balloon_mutex);
591 			return rc ? -ENOMEM : 0;
592 		}
593 	}
594 
595 	if (si_mem_available() < nr_pages)
596 		return -ENOMEM;
597 
598 	st = decrease_reservation(nr_pages, GFP_USER);
599 	if (st != BP_DONE)
600 		return -ENOMEM;
601 
602 	return 0;
603 }
604 
605 /**
606  * alloc_xenballooned_pages - get pages that have been ballooned out
607  * @nr_pages: Number of pages to get
608  * @pages: pages returned
609  * @return 0 on success, error otherwise
610  */
611 int alloc_xenballooned_pages(int nr_pages, struct page **pages)
612 {
613 	int pgno = 0;
614 	struct page *page;
615 	int ret;
616 
617 	mutex_lock(&balloon_mutex);
618 
619 	balloon_stats.target_unpopulated += nr_pages;
620 
621 	while (pgno < nr_pages) {
622 		page = balloon_retrieve(true);
623 		if (page) {
624 			pages[pgno++] = page;
625 #ifdef CONFIG_XEN_HAVE_PVMMU
626 			/*
627 			 * We don't support PV MMU when Linux and Xen is using
628 			 * different page granularity.
629 			 */
630 			BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
631 
632 			if (!xen_feature(XENFEAT_auto_translated_physmap)) {
633 				ret = xen_alloc_p2m_entry(page_to_pfn(page));
634 				if (ret < 0)
635 					goto out_undo;
636 			}
637 #endif
638 		} else {
639 			ret = add_ballooned_pages(nr_pages - pgno);
640 			if (ret < 0)
641 				goto out_undo;
642 		}
643 	}
644 	mutex_unlock(&balloon_mutex);
645 	return 0;
646  out_undo:
647 	mutex_unlock(&balloon_mutex);
648 	free_xenballooned_pages(pgno, pages);
649 	/*
650 	 * NB: free_xenballooned_pages will only subtract pgno pages, but since
651 	 * target_unpopulated is incremented with nr_pages at the start we need
652 	 * to remove the remaining ones also, or accounting will be screwed.
653 	 */
654 	balloon_stats.target_unpopulated -= nr_pages - pgno;
655 	return ret;
656 }
657 EXPORT_SYMBOL(alloc_xenballooned_pages);
658 
659 /**
660  * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
661  * @nr_pages: Number of pages
662  * @pages: pages to return
663  */
664 void free_xenballooned_pages(int nr_pages, struct page **pages)
665 {
666 	int i;
667 
668 	mutex_lock(&balloon_mutex);
669 
670 	for (i = 0; i < nr_pages; i++) {
671 		if (pages[i])
672 			balloon_append(pages[i]);
673 	}
674 
675 	balloon_stats.target_unpopulated -= nr_pages;
676 
677 	/* The balloon may be too large now. Shrink it if needed. */
678 	if (current_credit())
679 		wake_up(&balloon_thread_wq);
680 
681 	mutex_unlock(&balloon_mutex);
682 }
683 EXPORT_SYMBOL(free_xenballooned_pages);
684 
685 #if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
686 static void __init balloon_add_region(unsigned long start_pfn,
687 				      unsigned long pages)
688 {
689 	unsigned long pfn, extra_pfn_end;
690 
691 	/*
692 	 * If the amount of usable memory has been limited (e.g., with
693 	 * the 'mem' command line parameter), don't add pages beyond
694 	 * this limit.
695 	 */
696 	extra_pfn_end = min(max_pfn, start_pfn + pages);
697 
698 	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
699 		/* totalram_pages and totalhigh_pages do not
700 		   include the boot-time balloon extension, so
701 		   don't subtract from it. */
702 		balloon_append(pfn_to_page(pfn));
703 	}
704 
705 	balloon_stats.total_pages += extra_pfn_end - start_pfn;
706 }
707 #endif
708 
709 static int __init balloon_init(void)
710 {
711 	struct task_struct *task;
712 
713 	if (!xen_domain())
714 		return -ENODEV;
715 
716 	pr_info("Initialising balloon driver\n");
717 
718 #ifdef CONFIG_XEN_PV
719 	balloon_stats.current_pages = xen_pv_domain()
720 		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
721 		: get_num_physpages();
722 #else
723 	balloon_stats.current_pages = get_num_physpages();
724 #endif
725 	balloon_stats.target_pages  = balloon_stats.current_pages;
726 	balloon_stats.balloon_low   = 0;
727 	balloon_stats.balloon_high  = 0;
728 	balloon_stats.total_pages   = balloon_stats.current_pages;
729 
730 	balloon_stats.schedule_delay = 1;
731 	balloon_stats.max_schedule_delay = 32;
732 	balloon_stats.retry_count = 1;
733 	balloon_stats.max_retry_count = 4;
734 
735 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
736 	set_online_page_callback(&xen_online_page);
737 	register_memory_notifier(&xen_memory_nb);
738 	register_sysctl_table(xen_root);
739 #endif
740 
741 #if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
742 	{
743 		int i;
744 
745 		/*
746 		 * Initialize the balloon with pages from the extra memory
747 		 * regions (see arch/x86/xen/setup.c).
748 		 */
749 		for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
750 			if (xen_extra_mem[i].n_pfns)
751 				balloon_add_region(xen_extra_mem[i].start_pfn,
752 						   xen_extra_mem[i].n_pfns);
753 	}
754 #endif
755 
756 	task = kthread_run(balloon_thread, NULL, "xen-balloon");
757 	if (IS_ERR(task)) {
758 		pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
759 		return PTR_ERR(task);
760 	}
761 
762 	/* Init the xen-balloon driver. */
763 	xen_balloon_init();
764 
765 	return 0;
766 }
767 subsys_initcall(balloon_init);
768