xref: /openbmc/linux/drivers/xen/balloon.c (revision 930beb5a)
1 /******************************************************************************
2  * Xen balloon driver - enables returning/claiming memory to/from Xen.
3  *
4  * Copyright (c) 2003, B Dragovic
5  * Copyright (c) 2003-2004, M Williamson, K Fraser
6  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
7  * Copyright (c) 2010 Daniel Kiper
8  *
9  * Memory hotplug support was written by Daniel Kiper. Work on
10  * it was sponsored by Google under Google Summer of Code 2010
11  * program. Jeremy Fitzhardinge from Citrix was the mentor for
12  * this project.
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License version 2
16  * as published by the Free Software Foundation; or, when distributed
17  * separately from the Linux kernel or incorporated into other
18  * software packages, subject to the following license:
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining a copy
21  * of this source file (the "Software"), to deal in the Software without
22  * restriction, including without limitation the rights to use, copy, modify,
23  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24  * and to permit persons to whom the Software is furnished to do so, subject to
25  * the following conditions:
26  *
27  * The above copyright notice and this permission notice shall be included in
28  * all copies or substantial portions of the Software.
29  *
30  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36  * IN THE SOFTWARE.
37  */
38 
39 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
40 
41 #include <linux/cpu.h>
42 #include <linux/kernel.h>
43 #include <linux/sched.h>
44 #include <linux/errno.h>
45 #include <linux/module.h>
46 #include <linux/mm.h>
47 #include <linux/bootmem.h>
48 #include <linux/pagemap.h>
49 #include <linux/highmem.h>
50 #include <linux/mutex.h>
51 #include <linux/list.h>
52 #include <linux/gfp.h>
53 #include <linux/notifier.h>
54 #include <linux/memory.h>
55 #include <linux/memory_hotplug.h>
56 #include <linux/percpu-defs.h>
57 
58 #include <asm/page.h>
59 #include <asm/pgalloc.h>
60 #include <asm/pgtable.h>
61 #include <asm/tlb.h>
62 
63 #include <asm/xen/hypervisor.h>
64 #include <asm/xen/hypercall.h>
65 
66 #include <xen/xen.h>
67 #include <xen/interface/xen.h>
68 #include <xen/interface/memory.h>
69 #include <xen/balloon.h>
70 #include <xen/features.h>
71 #include <xen/page.h>
72 
73 /*
74  * balloon_process() state:
75  *
76  * BP_DONE: done or nothing to do,
77  * BP_EAGAIN: error, go to sleep,
78  * BP_ECANCELED: error, balloon operation canceled.
79  */
80 
81 enum bp_state {
82 	BP_DONE,
83 	BP_EAGAIN,
84 	BP_ECANCELED
85 };
86 
87 
88 static DEFINE_MUTEX(balloon_mutex);
89 
90 struct balloon_stats balloon_stats;
91 EXPORT_SYMBOL_GPL(balloon_stats);
92 
93 /* We increase/decrease in batches which fit in a page */
94 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
95 static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
96 
97 
98 /* List of ballooned pages, threaded through the mem_map array. */
99 static LIST_HEAD(ballooned_pages);
100 
101 /* Main work function, always executed in process context. */
102 static void balloon_process(struct work_struct *work);
103 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
104 
105 /* When ballooning out (allocating memory to return to Xen) we don't really
106    want the kernel to try too hard since that can trigger the oom killer. */
107 #define GFP_BALLOON \
108 	(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
109 
110 static void scrub_page(struct page *page)
111 {
112 #ifdef CONFIG_XEN_SCRUB_PAGES
113 	clear_highpage(page);
114 #endif
115 }
116 
117 /* balloon_append: add the given page to the balloon. */
118 static void __balloon_append(struct page *page)
119 {
120 	/* Lowmem is re-populated first, so highmem pages go at list tail. */
121 	if (PageHighMem(page)) {
122 		list_add_tail(&page->lru, &ballooned_pages);
123 		balloon_stats.balloon_high++;
124 	} else {
125 		list_add(&page->lru, &ballooned_pages);
126 		balloon_stats.balloon_low++;
127 	}
128 }
129 
130 static void balloon_append(struct page *page)
131 {
132 	__balloon_append(page);
133 	adjust_managed_page_count(page, -1);
134 }
135 
136 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
137 static struct page *balloon_retrieve(bool prefer_highmem)
138 {
139 	struct page *page;
140 
141 	if (list_empty(&ballooned_pages))
142 		return NULL;
143 
144 	if (prefer_highmem)
145 		page = list_entry(ballooned_pages.prev, struct page, lru);
146 	else
147 		page = list_entry(ballooned_pages.next, struct page, lru);
148 	list_del(&page->lru);
149 
150 	if (PageHighMem(page))
151 		balloon_stats.balloon_high--;
152 	else
153 		balloon_stats.balloon_low--;
154 
155 	adjust_managed_page_count(page, 1);
156 
157 	return page;
158 }
159 
160 static struct page *balloon_first_page(void)
161 {
162 	if (list_empty(&ballooned_pages))
163 		return NULL;
164 	return list_entry(ballooned_pages.next, struct page, lru);
165 }
166 
167 static struct page *balloon_next_page(struct page *page)
168 {
169 	struct list_head *next = page->lru.next;
170 	if (next == &ballooned_pages)
171 		return NULL;
172 	return list_entry(next, struct page, lru);
173 }
174 
175 static enum bp_state update_schedule(enum bp_state state)
176 {
177 	if (state == BP_DONE) {
178 		balloon_stats.schedule_delay = 1;
179 		balloon_stats.retry_count = 1;
180 		return BP_DONE;
181 	}
182 
183 	++balloon_stats.retry_count;
184 
185 	if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
186 			balloon_stats.retry_count > balloon_stats.max_retry_count) {
187 		balloon_stats.schedule_delay = 1;
188 		balloon_stats.retry_count = 1;
189 		return BP_ECANCELED;
190 	}
191 
192 	balloon_stats.schedule_delay <<= 1;
193 
194 	if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
195 		balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
196 
197 	return BP_EAGAIN;
198 }
199 
200 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
201 static long current_credit(void)
202 {
203 	return balloon_stats.target_pages - balloon_stats.current_pages -
204 		balloon_stats.hotplug_pages;
205 }
206 
207 static bool balloon_is_inflated(void)
208 {
209 	if (balloon_stats.balloon_low || balloon_stats.balloon_high ||
210 			balloon_stats.balloon_hotplug)
211 		return true;
212 	else
213 		return false;
214 }
215 
216 /*
217  * reserve_additional_memory() adds memory region of size >= credit above
218  * max_pfn. New region is section aligned and size is modified to be multiple
219  * of section size. Those features allow optimal use of address space and
220  * establish proper alignment when this function is called first time after
221  * boot (last section not fully populated at boot time contains unused memory
222  * pages with PG_reserved bit not set; online_pages_range() does not allow page
223  * onlining in whole range if first onlined page does not have PG_reserved
224  * bit set). Real size of added memory is established at page onlining stage.
225  */
226 
227 static enum bp_state reserve_additional_memory(long credit)
228 {
229 	int nid, rc;
230 	u64 hotplug_start_paddr;
231 	unsigned long balloon_hotplug = credit;
232 
233 	hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn));
234 	balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION);
235 	nid = memory_add_physaddr_to_nid(hotplug_start_paddr);
236 
237 	rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT);
238 
239 	if (rc) {
240 		pr_info("%s: add_memory() failed: %i\n", __func__, rc);
241 		return BP_EAGAIN;
242 	}
243 
244 	balloon_hotplug -= credit;
245 
246 	balloon_stats.hotplug_pages += credit;
247 	balloon_stats.balloon_hotplug = balloon_hotplug;
248 
249 	return BP_DONE;
250 }
251 
252 static void xen_online_page(struct page *page)
253 {
254 	__online_page_set_limits(page);
255 
256 	mutex_lock(&balloon_mutex);
257 
258 	__balloon_append(page);
259 
260 	if (balloon_stats.hotplug_pages)
261 		--balloon_stats.hotplug_pages;
262 	else
263 		--balloon_stats.balloon_hotplug;
264 
265 	mutex_unlock(&balloon_mutex);
266 }
267 
268 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
269 {
270 	if (val == MEM_ONLINE)
271 		schedule_delayed_work(&balloon_worker, 0);
272 
273 	return NOTIFY_OK;
274 }
275 
276 static struct notifier_block xen_memory_nb = {
277 	.notifier_call = xen_memory_notifier,
278 	.priority = 0
279 };
280 #else
281 static long current_credit(void)
282 {
283 	unsigned long target = balloon_stats.target_pages;
284 
285 	target = min(target,
286 		     balloon_stats.current_pages +
287 		     balloon_stats.balloon_low +
288 		     balloon_stats.balloon_high);
289 
290 	return target - balloon_stats.current_pages;
291 }
292 
293 static bool balloon_is_inflated(void)
294 {
295 	if (balloon_stats.balloon_low || balloon_stats.balloon_high)
296 		return true;
297 	else
298 		return false;
299 }
300 
301 static enum bp_state reserve_additional_memory(long credit)
302 {
303 	balloon_stats.target_pages = balloon_stats.current_pages;
304 	return BP_DONE;
305 }
306 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
307 
308 static enum bp_state increase_reservation(unsigned long nr_pages)
309 {
310 	int rc;
311 	unsigned long  pfn, i;
312 	struct page   *page;
313 	struct xen_memory_reservation reservation = {
314 		.address_bits = 0,
315 		.extent_order = 0,
316 		.domid        = DOMID_SELF
317 	};
318 
319 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
320 	if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) {
321 		nr_pages = min(nr_pages, balloon_stats.balloon_hotplug);
322 		balloon_stats.hotplug_pages += nr_pages;
323 		balloon_stats.balloon_hotplug -= nr_pages;
324 		return BP_DONE;
325 	}
326 #endif
327 
328 	if (nr_pages > ARRAY_SIZE(frame_list))
329 		nr_pages = ARRAY_SIZE(frame_list);
330 
331 	page = balloon_first_page();
332 	for (i = 0; i < nr_pages; i++) {
333 		if (!page) {
334 			nr_pages = i;
335 			break;
336 		}
337 		frame_list[i] = page_to_pfn(page);
338 		page = balloon_next_page(page);
339 	}
340 
341 	set_xen_guest_handle(reservation.extent_start, frame_list);
342 	reservation.nr_extents = nr_pages;
343 	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
344 	if (rc <= 0)
345 		return BP_EAGAIN;
346 
347 	for (i = 0; i < rc; i++) {
348 		page = balloon_retrieve(false);
349 		BUG_ON(page == NULL);
350 
351 		pfn = page_to_pfn(page);
352 
353 #ifdef CONFIG_XEN_HAVE_PVMMU
354 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
355 			set_phys_to_machine(pfn, frame_list[i]);
356 
357 			/* Link back into the page tables if not highmem. */
358 			if (!PageHighMem(page)) {
359 				int ret;
360 				ret = HYPERVISOR_update_va_mapping(
361 						(unsigned long)__va(pfn << PAGE_SHIFT),
362 						mfn_pte(frame_list[i], PAGE_KERNEL),
363 						0);
364 				BUG_ON(ret);
365 			}
366 		}
367 #endif
368 
369 		/* Relinquish the page back to the allocator. */
370 		__free_reserved_page(page);
371 	}
372 
373 	balloon_stats.current_pages += rc;
374 
375 	return BP_DONE;
376 }
377 
378 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
379 {
380 	enum bp_state state = BP_DONE;
381 	unsigned long  pfn, i;
382 	struct page   *page;
383 	int ret;
384 	struct xen_memory_reservation reservation = {
385 		.address_bits = 0,
386 		.extent_order = 0,
387 		.domid        = DOMID_SELF
388 	};
389 
390 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
391 	if (balloon_stats.hotplug_pages) {
392 		nr_pages = min(nr_pages, balloon_stats.hotplug_pages);
393 		balloon_stats.hotplug_pages -= nr_pages;
394 		balloon_stats.balloon_hotplug += nr_pages;
395 		return BP_DONE;
396 	}
397 #endif
398 
399 	if (nr_pages > ARRAY_SIZE(frame_list))
400 		nr_pages = ARRAY_SIZE(frame_list);
401 
402 	for (i = 0; i < nr_pages; i++) {
403 		page = alloc_page(gfp);
404 		if (page == NULL) {
405 			nr_pages = i;
406 			state = BP_EAGAIN;
407 			break;
408 		}
409 
410 		pfn = page_to_pfn(page);
411 		frame_list[i] = pfn_to_mfn(pfn);
412 
413 		scrub_page(page);
414 
415 #ifdef CONFIG_XEN_HAVE_PVMMU
416 		/*
417 		 * Ballooned out frames are effectively replaced with
418 		 * a scratch frame.  Ensure direct mappings and the
419 		 * p2m are consistent.
420 		 */
421 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
422 			unsigned long p;
423 			struct page   *scratch_page = get_balloon_scratch_page();
424 
425 			if (!PageHighMem(page)) {
426 				ret = HYPERVISOR_update_va_mapping(
427 						(unsigned long)__va(pfn << PAGE_SHIFT),
428 						pfn_pte(page_to_pfn(scratch_page),
429 							PAGE_KERNEL_RO), 0);
430 				BUG_ON(ret);
431 			}
432 			p = page_to_pfn(scratch_page);
433 			__set_phys_to_machine(pfn, pfn_to_mfn(p));
434 
435 			put_balloon_scratch_page();
436 		}
437 #endif
438 
439 		balloon_append(pfn_to_page(pfn));
440 	}
441 
442 	/* Ensure that ballooned highmem pages don't have kmaps. */
443 	kmap_flush_unused();
444 	flush_tlb_all();
445 
446 	set_xen_guest_handle(reservation.extent_start, frame_list);
447 	reservation.nr_extents   = nr_pages;
448 	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
449 	BUG_ON(ret != nr_pages);
450 
451 	balloon_stats.current_pages -= nr_pages;
452 
453 	return state;
454 }
455 
456 /*
457  * We avoid multiple worker processes conflicting via the balloon mutex.
458  * We may of course race updates of the target counts (which are protected
459  * by the balloon lock), or with changes to the Xen hard limit, but we will
460  * recover from these in time.
461  */
462 static void balloon_process(struct work_struct *work)
463 {
464 	enum bp_state state = BP_DONE;
465 	long credit;
466 
467 	mutex_lock(&balloon_mutex);
468 
469 	do {
470 		credit = current_credit();
471 
472 		if (credit > 0) {
473 			if (balloon_is_inflated())
474 				state = increase_reservation(credit);
475 			else
476 				state = reserve_additional_memory(credit);
477 		}
478 
479 		if (credit < 0)
480 			state = decrease_reservation(-credit, GFP_BALLOON);
481 
482 		state = update_schedule(state);
483 
484 #ifndef CONFIG_PREEMPT
485 		if (need_resched())
486 			schedule();
487 #endif
488 	} while (credit && state == BP_DONE);
489 
490 	/* Schedule more work if there is some still to be done. */
491 	if (state == BP_EAGAIN)
492 		schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
493 
494 	mutex_unlock(&balloon_mutex);
495 }
496 
497 struct page *get_balloon_scratch_page(void)
498 {
499 	struct page *ret = get_cpu_var(balloon_scratch_page);
500 	BUG_ON(ret == NULL);
501 	return ret;
502 }
503 
504 void put_balloon_scratch_page(void)
505 {
506 	put_cpu_var(balloon_scratch_page);
507 }
508 
509 /* Resets the Xen limit, sets new target, and kicks off processing. */
510 void balloon_set_new_target(unsigned long target)
511 {
512 	/* No need for lock. Not read-modify-write updates. */
513 	balloon_stats.target_pages = target;
514 	schedule_delayed_work(&balloon_worker, 0);
515 }
516 EXPORT_SYMBOL_GPL(balloon_set_new_target);
517 
518 /**
519  * alloc_xenballooned_pages - get pages that have been ballooned out
520  * @nr_pages: Number of pages to get
521  * @pages: pages returned
522  * @highmem: allow highmem pages
523  * @return 0 on success, error otherwise
524  */
525 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
526 {
527 	int pgno = 0;
528 	struct page *page;
529 	mutex_lock(&balloon_mutex);
530 	while (pgno < nr_pages) {
531 		page = balloon_retrieve(highmem);
532 		if (page && (highmem || !PageHighMem(page))) {
533 			pages[pgno++] = page;
534 		} else {
535 			enum bp_state st;
536 			if (page)
537 				balloon_append(page);
538 			st = decrease_reservation(nr_pages - pgno,
539 					highmem ? GFP_HIGHUSER : GFP_USER);
540 			if (st != BP_DONE)
541 				goto out_undo;
542 		}
543 	}
544 	mutex_unlock(&balloon_mutex);
545 	return 0;
546  out_undo:
547 	while (pgno)
548 		balloon_append(pages[--pgno]);
549 	/* Free the memory back to the kernel soon */
550 	schedule_delayed_work(&balloon_worker, 0);
551 	mutex_unlock(&balloon_mutex);
552 	return -ENOMEM;
553 }
554 EXPORT_SYMBOL(alloc_xenballooned_pages);
555 
556 /**
557  * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
558  * @nr_pages: Number of pages
559  * @pages: pages to return
560  */
561 void free_xenballooned_pages(int nr_pages, struct page **pages)
562 {
563 	int i;
564 
565 	mutex_lock(&balloon_mutex);
566 
567 	for (i = 0; i < nr_pages; i++) {
568 		if (pages[i])
569 			balloon_append(pages[i]);
570 	}
571 
572 	/* The balloon may be too large now. Shrink it if needed. */
573 	if (current_credit())
574 		schedule_delayed_work(&balloon_worker, 0);
575 
576 	mutex_unlock(&balloon_mutex);
577 }
578 EXPORT_SYMBOL(free_xenballooned_pages);
579 
580 static void __init balloon_add_region(unsigned long start_pfn,
581 				      unsigned long pages)
582 {
583 	unsigned long pfn, extra_pfn_end;
584 	struct page *page;
585 
586 	/*
587 	 * If the amount of usable memory has been limited (e.g., with
588 	 * the 'mem' command line parameter), don't add pages beyond
589 	 * this limit.
590 	 */
591 	extra_pfn_end = min(max_pfn, start_pfn + pages);
592 
593 	for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
594 		page = pfn_to_page(pfn);
595 		/* totalram_pages and totalhigh_pages do not
596 		   include the boot-time balloon extension, so
597 		   don't subtract from it. */
598 		__balloon_append(page);
599 	}
600 }
601 
602 static int balloon_cpu_notify(struct notifier_block *self,
603 				    unsigned long action, void *hcpu)
604 {
605 	int cpu = (long)hcpu;
606 	switch (action) {
607 	case CPU_UP_PREPARE:
608 		if (per_cpu(balloon_scratch_page, cpu) != NULL)
609 			break;
610 		per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
611 		if (per_cpu(balloon_scratch_page, cpu) == NULL) {
612 			pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
613 			return NOTIFY_BAD;
614 		}
615 		break;
616 	default:
617 		break;
618 	}
619 	return NOTIFY_OK;
620 }
621 
622 static struct notifier_block balloon_cpu_notifier = {
623 	.notifier_call	= balloon_cpu_notify,
624 };
625 
626 static int __init balloon_init(void)
627 {
628 	int i, cpu;
629 
630 	if (!xen_domain())
631 		return -ENODEV;
632 
633 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
634 		for_each_online_cpu(cpu)
635 		{
636 			per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
637 			if (per_cpu(balloon_scratch_page, cpu) == NULL) {
638 				pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
639 				return -ENOMEM;
640 			}
641 		}
642 		register_cpu_notifier(&balloon_cpu_notifier);
643 	}
644 
645 	pr_info("Initialising balloon driver\n");
646 
647 	balloon_stats.current_pages = xen_pv_domain()
648 		? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
649 		: get_num_physpages();
650 	balloon_stats.target_pages  = balloon_stats.current_pages;
651 	balloon_stats.balloon_low   = 0;
652 	balloon_stats.balloon_high  = 0;
653 
654 	balloon_stats.schedule_delay = 1;
655 	balloon_stats.max_schedule_delay = 32;
656 	balloon_stats.retry_count = 1;
657 	balloon_stats.max_retry_count = RETRY_UNLIMITED;
658 
659 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
660 	balloon_stats.hotplug_pages = 0;
661 	balloon_stats.balloon_hotplug = 0;
662 
663 	set_online_page_callback(&xen_online_page);
664 	register_memory_notifier(&xen_memory_nb);
665 #endif
666 
667 	/*
668 	 * Initialize the balloon with pages from the extra memory
669 	 * regions (see arch/x86/xen/setup.c).
670 	 */
671 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
672 		if (xen_extra_mem[i].size)
673 			balloon_add_region(PFN_UP(xen_extra_mem[i].start),
674 					   PFN_DOWN(xen_extra_mem[i].size));
675 
676 	return 0;
677 }
678 
679 subsys_initcall(balloon_init);
680 
681 static int __init balloon_clear(void)
682 {
683 	int cpu;
684 
685 	for_each_possible_cpu(cpu)
686 		per_cpu(balloon_scratch_page, cpu) = NULL;
687 
688 	return 0;
689 }
690 early_initcall(balloon_clear);
691 
692 MODULE_LICENSE("GPL");
693