11775826cSJeremy Fitzhardinge /****************************************************************************** 21775826cSJeremy Fitzhardinge * Xen balloon driver - enables returning/claiming memory to/from Xen. 31775826cSJeremy Fitzhardinge * 41775826cSJeremy Fitzhardinge * Copyright (c) 2003, B Dragovic 51775826cSJeremy Fitzhardinge * Copyright (c) 2003-2004, M Williamson, K Fraser 61775826cSJeremy Fitzhardinge * Copyright (c) 2005 Dan M. Smith, IBM Corporation 7080e2be7SDaniel Kiper * Copyright (c) 2010 Daniel Kiper 8080e2be7SDaniel Kiper * 9080e2be7SDaniel Kiper * Memory hotplug support was written by Daniel Kiper. Work on 10080e2be7SDaniel Kiper * it was sponsored by Google under Google Summer of Code 2010 11080e2be7SDaniel Kiper * program. Jeremy Fitzhardinge from Citrix was the mentor for 12080e2be7SDaniel Kiper * this project. 131775826cSJeremy Fitzhardinge * 141775826cSJeremy Fitzhardinge * This program is free software; you can redistribute it and/or 151775826cSJeremy Fitzhardinge * modify it under the terms of the GNU General Public License version 2 161775826cSJeremy Fitzhardinge * as published by the Free Software Foundation; or, when distributed 171775826cSJeremy Fitzhardinge * separately from the Linux kernel or incorporated into other 181775826cSJeremy Fitzhardinge * software packages, subject to the following license: 191775826cSJeremy Fitzhardinge * 201775826cSJeremy Fitzhardinge * Permission is hereby granted, free of charge, to any person obtaining a copy 211775826cSJeremy Fitzhardinge * of this source file (the "Software"), to deal in the Software without 221775826cSJeremy Fitzhardinge * restriction, including without limitation the rights to use, copy, modify, 231775826cSJeremy Fitzhardinge * merge, publish, distribute, sublicense, and/or sell copies of the Software, 241775826cSJeremy Fitzhardinge * and to permit persons to whom the Software is furnished to do so, subject to 251775826cSJeremy Fitzhardinge * the following conditions: 261775826cSJeremy Fitzhardinge * 271775826cSJeremy Fitzhardinge * The above copyright notice and this permission notice shall be included in 281775826cSJeremy Fitzhardinge * all copies or substantial portions of the Software. 291775826cSJeremy Fitzhardinge * 301775826cSJeremy Fitzhardinge * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 311775826cSJeremy Fitzhardinge * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 321775826cSJeremy Fitzhardinge * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 331775826cSJeremy Fitzhardinge * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 341775826cSJeremy Fitzhardinge * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 351775826cSJeremy Fitzhardinge * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 361775826cSJeremy Fitzhardinge * IN THE SOFTWARE. 371775826cSJeremy Fitzhardinge */ 381775826cSJeremy Fitzhardinge 39283c0972SJoe Perches #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 40283c0972SJoe Perches 41cd9151e2SStefano Stabellini #include <linux/cpu.h> 421775826cSJeremy Fitzhardinge #include <linux/kernel.h> 431775826cSJeremy Fitzhardinge #include <linux/sched.h> 445b825c3aSIngo Molnar #include <linux/cred.h> 451775826cSJeremy Fitzhardinge #include <linux/errno.h> 461775826cSJeremy Fitzhardinge #include <linux/mm.h> 4757c8a661SMike Rapoport #include <linux/memblock.h> 481775826cSJeremy Fitzhardinge #include <linux/pagemap.h> 491775826cSJeremy Fitzhardinge #include <linux/highmem.h> 501775826cSJeremy Fitzhardinge #include <linux/mutex.h> 511775826cSJeremy Fitzhardinge #include <linux/list.h> 525a0e3ad6STejun Heo #include <linux/gfp.h> 53080e2be7SDaniel Kiper #include <linux/notifier.h> 54080e2be7SDaniel Kiper #include <linux/memory.h> 55080e2be7SDaniel Kiper #include <linux/memory_hotplug.h> 56cd9151e2SStefano Stabellini #include <linux/percpu-defs.h> 5755b3da98SDavid Vrabel #include <linux/slab.h> 581cf6a6c8SDavid Vrabel #include <linux/sysctl.h> 591775826cSJeremy Fitzhardinge 601775826cSJeremy Fitzhardinge #include <asm/page.h> 611775826cSJeremy Fitzhardinge #include <asm/pgalloc.h> 621775826cSJeremy Fitzhardinge #include <asm/pgtable.h> 631775826cSJeremy Fitzhardinge #include <asm/tlb.h> 641775826cSJeremy Fitzhardinge 65ecbf29cdSJeremy Fitzhardinge #include <asm/xen/hypervisor.h> 66ecbf29cdSJeremy Fitzhardinge #include <asm/xen/hypercall.h> 671ccbf534SJeremy Fitzhardinge 681ccbf534SJeremy Fitzhardinge #include <xen/xen.h> 69ecbf29cdSJeremy Fitzhardinge #include <xen/interface/xen.h> 701775826cSJeremy Fitzhardinge #include <xen/interface/memory.h> 71803eb047SDaniel De Graaf #include <xen/balloon.h> 721775826cSJeremy Fitzhardinge #include <xen/features.h> 731775826cSJeremy Fitzhardinge #include <xen/page.h> 74ae4c51a5SOleksandr Andrushchenko #include <xen/mem-reservation.h> 751775826cSJeremy Fitzhardinge 761cf6a6c8SDavid Vrabel static int xen_hotplug_unpopulated; 771cf6a6c8SDavid Vrabel 781cf6a6c8SDavid Vrabel #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 791cf6a6c8SDavid Vrabel 801cf6a6c8SDavid Vrabel static int zero; 811cf6a6c8SDavid Vrabel static int one = 1; 821cf6a6c8SDavid Vrabel 831cf6a6c8SDavid Vrabel static struct ctl_table balloon_table[] = { 841cf6a6c8SDavid Vrabel { 851cf6a6c8SDavid Vrabel .procname = "hotplug_unpopulated", 861cf6a6c8SDavid Vrabel .data = &xen_hotplug_unpopulated, 871cf6a6c8SDavid Vrabel .maxlen = sizeof(int), 881cf6a6c8SDavid Vrabel .mode = 0644, 891cf6a6c8SDavid Vrabel .proc_handler = proc_dointvec_minmax, 901cf6a6c8SDavid Vrabel .extra1 = &zero, 911cf6a6c8SDavid Vrabel .extra2 = &one, 921cf6a6c8SDavid Vrabel }, 931cf6a6c8SDavid Vrabel { } 941cf6a6c8SDavid Vrabel }; 951cf6a6c8SDavid Vrabel 961cf6a6c8SDavid Vrabel static struct ctl_table balloon_root[] = { 971cf6a6c8SDavid Vrabel { 981cf6a6c8SDavid Vrabel .procname = "balloon", 991cf6a6c8SDavid Vrabel .mode = 0555, 1001cf6a6c8SDavid Vrabel .child = balloon_table, 1011cf6a6c8SDavid Vrabel }, 1021cf6a6c8SDavid Vrabel { } 1031cf6a6c8SDavid Vrabel }; 1041cf6a6c8SDavid Vrabel 1051cf6a6c8SDavid Vrabel static struct ctl_table xen_root[] = { 1061cf6a6c8SDavid Vrabel { 1071cf6a6c8SDavid Vrabel .procname = "xen", 1081cf6a6c8SDavid Vrabel .mode = 0555, 1091cf6a6c8SDavid Vrabel .child = balloon_root, 1101cf6a6c8SDavid Vrabel }, 1111cf6a6c8SDavid Vrabel { } 1121cf6a6c8SDavid Vrabel }; 1131cf6a6c8SDavid Vrabel 1141cf6a6c8SDavid Vrabel #endif 1151cf6a6c8SDavid Vrabel 1161775826cSJeremy Fitzhardinge /* 11730756c62SJulien Grall * Use one extent per PAGE_SIZE to avoid to break down the page into 11830756c62SJulien Grall * multiple frame. 11930756c62SJulien Grall */ 12030756c62SJulien Grall #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) 12130756c62SJulien Grall 12230756c62SJulien Grall /* 12395d2ac4aSDaniel Kiper * balloon_process() state: 12495d2ac4aSDaniel Kiper * 12595d2ac4aSDaniel Kiper * BP_DONE: done or nothing to do, 126b2ac6aa8SDavid Vrabel * BP_WAIT: wait to be rescheduled, 12795d2ac4aSDaniel Kiper * BP_EAGAIN: error, go to sleep, 12895d2ac4aSDaniel Kiper * BP_ECANCELED: error, balloon operation canceled. 1291775826cSJeremy Fitzhardinge */ 13095d2ac4aSDaniel Kiper 13195d2ac4aSDaniel Kiper enum bp_state { 13295d2ac4aSDaniel Kiper BP_DONE, 133b2ac6aa8SDavid Vrabel BP_WAIT, 13495d2ac4aSDaniel Kiper BP_EAGAIN, 13595d2ac4aSDaniel Kiper BP_ECANCELED 1361775826cSJeremy Fitzhardinge }; 1371775826cSJeremy Fitzhardinge 1381775826cSJeremy Fitzhardinge 1391775826cSJeremy Fitzhardinge static DEFINE_MUTEX(balloon_mutex); 1401775826cSJeremy Fitzhardinge 141803eb047SDaniel De Graaf struct balloon_stats balloon_stats; 142803eb047SDaniel De Graaf EXPORT_SYMBOL_GPL(balloon_stats); 1431775826cSJeremy Fitzhardinge 1441775826cSJeremy Fitzhardinge /* We increase/decrease in batches which fit in a page */ 1453990dd27SJulien Grall static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)]; 146cd9151e2SStefano Stabellini 1471775826cSJeremy Fitzhardinge 1481775826cSJeremy Fitzhardinge /* List of ballooned pages, threaded through the mem_map array. */ 1491775826cSJeremy Fitzhardinge static LIST_HEAD(ballooned_pages); 1501cf6a6c8SDavid Vrabel static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); 1511775826cSJeremy Fitzhardinge 1521775826cSJeremy Fitzhardinge /* Main work function, always executed in process context. */ 1531775826cSJeremy Fitzhardinge static void balloon_process(struct work_struct *work); 15495170b2eSDaniel Kiper static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 1551775826cSJeremy Fitzhardinge 1561775826cSJeremy Fitzhardinge /* When ballooning out (allocating memory to return to Xen) we don't really 1571775826cSJeremy Fitzhardinge want the kernel to try too hard since that can trigger the oom killer. */ 1581775826cSJeremy Fitzhardinge #define GFP_BALLOON \ 1591775826cSJeremy Fitzhardinge (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) 1601775826cSJeremy Fitzhardinge 1611775826cSJeremy Fitzhardinge /* balloon_append: add the given page to the balloon. */ 1629be4d457SJeremy Fitzhardinge static void __balloon_append(struct page *page) 1631775826cSJeremy Fitzhardinge { 1641775826cSJeremy Fitzhardinge /* Lowmem is re-populated first, so highmem pages go at list tail. */ 1651775826cSJeremy Fitzhardinge if (PageHighMem(page)) { 1661775826cSJeremy Fitzhardinge list_add_tail(&page->lru, &ballooned_pages); 1671775826cSJeremy Fitzhardinge balloon_stats.balloon_high++; 1681775826cSJeremy Fitzhardinge } else { 1691775826cSJeremy Fitzhardinge list_add(&page->lru, &ballooned_pages); 1701775826cSJeremy Fitzhardinge balloon_stats.balloon_low++; 1711775826cSJeremy Fitzhardinge } 1721cf6a6c8SDavid Vrabel wake_up(&balloon_wq); 1739be4d457SJeremy Fitzhardinge } 1743d65c948SGianluca Guida 1759be4d457SJeremy Fitzhardinge static void balloon_append(struct page *page) 1769be4d457SJeremy Fitzhardinge { 1779be4d457SJeremy Fitzhardinge __balloon_append(page); 1781775826cSJeremy Fitzhardinge } 1791775826cSJeremy Fitzhardinge 1801775826cSJeremy Fitzhardinge /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ 18181b286e0SDavid Vrabel static struct page *balloon_retrieve(bool require_lowmem) 1821775826cSJeremy Fitzhardinge { 1831775826cSJeremy Fitzhardinge struct page *page; 1841775826cSJeremy Fitzhardinge 1851775826cSJeremy Fitzhardinge if (list_empty(&ballooned_pages)) 1861775826cSJeremy Fitzhardinge return NULL; 1871775826cSJeremy Fitzhardinge 1881775826cSJeremy Fitzhardinge page = list_entry(ballooned_pages.next, struct page, lru); 18981b286e0SDavid Vrabel if (require_lowmem && PageHighMem(page)) 19081b286e0SDavid Vrabel return NULL; 1911775826cSJeremy Fitzhardinge list_del(&page->lru); 1921775826cSJeremy Fitzhardinge 1933dcc0571SJiang Liu if (PageHighMem(page)) 1941775826cSJeremy Fitzhardinge balloon_stats.balloon_high--; 1953dcc0571SJiang Liu else 1961775826cSJeremy Fitzhardinge balloon_stats.balloon_low--; 1971775826cSJeremy Fitzhardinge 1981775826cSJeremy Fitzhardinge return page; 1991775826cSJeremy Fitzhardinge } 2001775826cSJeremy Fitzhardinge 2011775826cSJeremy Fitzhardinge static struct page *balloon_next_page(struct page *page) 2021775826cSJeremy Fitzhardinge { 2031775826cSJeremy Fitzhardinge struct list_head *next = page->lru.next; 2041775826cSJeremy Fitzhardinge if (next == &ballooned_pages) 2051775826cSJeremy Fitzhardinge return NULL; 2061775826cSJeremy Fitzhardinge return list_entry(next, struct page, lru); 2071775826cSJeremy Fitzhardinge } 2081775826cSJeremy Fitzhardinge 20995d2ac4aSDaniel Kiper static enum bp_state update_schedule(enum bp_state state) 2101775826cSJeremy Fitzhardinge { 211b2ac6aa8SDavid Vrabel if (state == BP_WAIT) 212b2ac6aa8SDavid Vrabel return BP_WAIT; 213b2ac6aa8SDavid Vrabel 214fd8b7951SBoris Ostrovsky if (state == BP_ECANCELED) 215fd8b7951SBoris Ostrovsky return BP_ECANCELED; 216fd8b7951SBoris Ostrovsky 21795d2ac4aSDaniel Kiper if (state == BP_DONE) { 21895d2ac4aSDaniel Kiper balloon_stats.schedule_delay = 1; 21995d2ac4aSDaniel Kiper balloon_stats.retry_count = 1; 22095d2ac4aSDaniel Kiper return BP_DONE; 22195d2ac4aSDaniel Kiper } 22295d2ac4aSDaniel Kiper 22395d2ac4aSDaniel Kiper ++balloon_stats.retry_count; 22495d2ac4aSDaniel Kiper 22595d2ac4aSDaniel Kiper if (balloon_stats.max_retry_count != RETRY_UNLIMITED && 22695d2ac4aSDaniel Kiper balloon_stats.retry_count > balloon_stats.max_retry_count) { 22795d2ac4aSDaniel Kiper balloon_stats.schedule_delay = 1; 22895d2ac4aSDaniel Kiper balloon_stats.retry_count = 1; 22995d2ac4aSDaniel Kiper return BP_ECANCELED; 23095d2ac4aSDaniel Kiper } 23195d2ac4aSDaniel Kiper 23295d2ac4aSDaniel Kiper balloon_stats.schedule_delay <<= 1; 23395d2ac4aSDaniel Kiper 23495d2ac4aSDaniel Kiper if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) 23595d2ac4aSDaniel Kiper balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; 23695d2ac4aSDaniel Kiper 23795d2ac4aSDaniel Kiper return BP_EAGAIN; 2381775826cSJeremy Fitzhardinge } 2391775826cSJeremy Fitzhardinge 240080e2be7SDaniel Kiper #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 241842775f1SRoss Lagerwall static void release_memory_resource(struct resource *resource) 242842775f1SRoss Lagerwall { 243842775f1SRoss Lagerwall if (!resource) 244842775f1SRoss Lagerwall return; 245842775f1SRoss Lagerwall 246842775f1SRoss Lagerwall /* 247842775f1SRoss Lagerwall * No need to reset region to identity mapped since we now 248842775f1SRoss Lagerwall * know that no I/O can be in this region 249842775f1SRoss Lagerwall */ 250842775f1SRoss Lagerwall release_resource(resource); 251842775f1SRoss Lagerwall kfree(resource); 252842775f1SRoss Lagerwall } 253842775f1SRoss Lagerwall 25455b3da98SDavid Vrabel static struct resource *additional_memory_resource(phys_addr_t size) 25555b3da98SDavid Vrabel { 25612366410SIgor Druzhinin struct resource *res; 25712366410SIgor Druzhinin int ret; 25855b3da98SDavid Vrabel 25955b3da98SDavid Vrabel res = kzalloc(sizeof(*res), GFP_KERNEL); 26055b3da98SDavid Vrabel if (!res) 26155b3da98SDavid Vrabel return NULL; 26255b3da98SDavid Vrabel 26355b3da98SDavid Vrabel res->name = "System RAM"; 264782b8664SToshi Kani res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 26555b3da98SDavid Vrabel 26655b3da98SDavid Vrabel ret = allocate_resource(&iomem_resource, res, 26755b3da98SDavid Vrabel size, 0, -1, 26855b3da98SDavid Vrabel PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 26955b3da98SDavid Vrabel if (ret < 0) { 27055b3da98SDavid Vrabel pr_err("Cannot allocate new System RAM resource\n"); 27155b3da98SDavid Vrabel kfree(res); 27255b3da98SDavid Vrabel return NULL; 27355b3da98SDavid Vrabel } 27455b3da98SDavid Vrabel 275dfd74a1eSRoss Lagerwall #ifdef CONFIG_SPARSEMEM 276dfd74a1eSRoss Lagerwall { 277dfd74a1eSRoss Lagerwall unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); 278dfd74a1eSRoss Lagerwall unsigned long pfn = res->start >> PAGE_SHIFT; 279dfd74a1eSRoss Lagerwall 280dfd74a1eSRoss Lagerwall if (pfn > limit) { 281dfd74a1eSRoss Lagerwall pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", 282dfd74a1eSRoss Lagerwall pfn, limit); 283dfd74a1eSRoss Lagerwall release_memory_resource(res); 284dfd74a1eSRoss Lagerwall return NULL; 285dfd74a1eSRoss Lagerwall } 286dfd74a1eSRoss Lagerwall } 287dfd74a1eSRoss Lagerwall #endif 288dfd74a1eSRoss Lagerwall 28955b3da98SDavid Vrabel return res; 29055b3da98SDavid Vrabel } 29155b3da98SDavid Vrabel 292b2ac6aa8SDavid Vrabel static enum bp_state reserve_additional_memory(void) 293080e2be7SDaniel Kiper { 294b2ac6aa8SDavid Vrabel long credit; 29555b3da98SDavid Vrabel struct resource *resource; 296080e2be7SDaniel Kiper int nid, rc; 29755b3da98SDavid Vrabel unsigned long balloon_hotplug; 298080e2be7SDaniel Kiper 2991cf6a6c8SDavid Vrabel credit = balloon_stats.target_pages + balloon_stats.target_unpopulated 3001cf6a6c8SDavid Vrabel - balloon_stats.total_pages; 301b2ac6aa8SDavid Vrabel 302b2ac6aa8SDavid Vrabel /* 303b2ac6aa8SDavid Vrabel * Already hotplugged enough pages? Wait for them to be 304b2ac6aa8SDavid Vrabel * onlined. 305b2ac6aa8SDavid Vrabel */ 306b2ac6aa8SDavid Vrabel if (credit <= 0) 307b2ac6aa8SDavid Vrabel return BP_WAIT; 308b2ac6aa8SDavid Vrabel 30955b3da98SDavid Vrabel balloon_hotplug = round_up(credit, PAGES_PER_SECTION); 31055b3da98SDavid Vrabel 31155b3da98SDavid Vrabel resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE); 31255b3da98SDavid Vrabel if (!resource) 31355b3da98SDavid Vrabel goto err; 31455b3da98SDavid Vrabel 31555b3da98SDavid Vrabel nid = memory_add_physaddr_to_nid(resource->start); 316080e2be7SDaniel Kiper 3173c56b3a1SJuergen Gross #ifdef CONFIG_XEN_HAVE_PVMMU 3183c56b3a1SJuergen Gross /* 31930756c62SJulien Grall * We don't support PV MMU when Linux and Xen is using 32030756c62SJulien Grall * different page granularity. 32130756c62SJulien Grall */ 32230756c62SJulien Grall BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 32330756c62SJulien Grall 32430756c62SJulien Grall /* 3253c56b3a1SJuergen Gross * add_memory() will build page tables for the new memory so 3263c56b3a1SJuergen Gross * the p2m must contain invalid entries so the correct 3273c56b3a1SJuergen Gross * non-present PTEs will be written. 3283c56b3a1SJuergen Gross * 3293c56b3a1SJuergen Gross * If a failure occurs, the original (identity) p2m entries 3303c56b3a1SJuergen Gross * are not restored since this region is now known not to 3313c56b3a1SJuergen Gross * conflict with any devices. 3323c56b3a1SJuergen Gross */ 3333c56b3a1SJuergen Gross if (!xen_feature(XENFEAT_auto_translated_physmap)) { 3343c56b3a1SJuergen Gross unsigned long pfn, i; 3353c56b3a1SJuergen Gross 33655b3da98SDavid Vrabel pfn = PFN_DOWN(resource->start); 3373c56b3a1SJuergen Gross for (i = 0; i < balloon_hotplug; i++) { 3383c56b3a1SJuergen Gross if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { 3393c56b3a1SJuergen Gross pr_warn("set_phys_to_machine() failed, no memory added\n"); 34055b3da98SDavid Vrabel goto err; 3413c56b3a1SJuergen Gross } 3423c56b3a1SJuergen Gross } 3433c56b3a1SJuergen Gross } 3443c56b3a1SJuergen Gross #endif 3453c56b3a1SJuergen Gross 346703fc13aSVitaly Kuznetsov /* 347703fc13aSVitaly Kuznetsov * add_memory_resource() will call online_pages() which in its turn 348703fc13aSVitaly Kuznetsov * will call xen_online_page() callback causing deadlock if we don't 349703fc13aSVitaly Kuznetsov * release balloon_mutex here. Unlocking here is safe because the 350703fc13aSVitaly Kuznetsov * callers drop the mutex before trying again. 351703fc13aSVitaly Kuznetsov */ 352703fc13aSVitaly Kuznetsov mutex_unlock(&balloon_mutex); 3538df1d0e4SDavid Hildenbrand /* add_memory_resource() requires the device_hotplug lock */ 3548df1d0e4SDavid Hildenbrand lock_device_hotplug(); 355f29d8e9cSDavid Hildenbrand rc = add_memory_resource(nid, resource); 3568df1d0e4SDavid Hildenbrand unlock_device_hotplug(); 357703fc13aSVitaly Kuznetsov mutex_lock(&balloon_mutex); 358703fc13aSVitaly Kuznetsov 359080e2be7SDaniel Kiper if (rc) { 3603dcf6367SDavid Vrabel pr_warn("Cannot add additional memory (%i)\n", rc); 36155b3da98SDavid Vrabel goto err; 362080e2be7SDaniel Kiper } 363080e2be7SDaniel Kiper 364de5a77d8SDavid Vrabel balloon_stats.total_pages += balloon_hotplug; 365080e2be7SDaniel Kiper 366b2ac6aa8SDavid Vrabel return BP_WAIT; 36755b3da98SDavid Vrabel err: 36855b3da98SDavid Vrabel release_memory_resource(resource); 36955b3da98SDavid Vrabel return BP_ECANCELED; 370080e2be7SDaniel Kiper } 371080e2be7SDaniel Kiper 372a9cd410aSArun KS static void xen_online_page(struct page *page, unsigned int order) 373080e2be7SDaniel Kiper { 374a9cd410aSArun KS unsigned long i, size = (1 << order); 375a9cd410aSArun KS unsigned long start_pfn = page_to_pfn(page); 376a9cd410aSArun KS struct page *p; 377080e2be7SDaniel Kiper 378a9cd410aSArun KS pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); 379080e2be7SDaniel Kiper mutex_lock(&balloon_mutex); 380a9cd410aSArun KS for (i = 0; i < size; i++) { 381a9cd410aSArun KS p = pfn_to_page(start_pfn + i); 382a9cd410aSArun KS __online_page_set_limits(p); 38377c4adf6SDavid Hildenbrand __SetPageOffline(p); 384a9cd410aSArun KS __balloon_append(p); 385a9cd410aSArun KS } 386080e2be7SDaniel Kiper mutex_unlock(&balloon_mutex); 387080e2be7SDaniel Kiper } 388080e2be7SDaniel Kiper 389080e2be7SDaniel Kiper static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) 390080e2be7SDaniel Kiper { 391080e2be7SDaniel Kiper if (val == MEM_ONLINE) 392080e2be7SDaniel Kiper schedule_delayed_work(&balloon_worker, 0); 393080e2be7SDaniel Kiper 394080e2be7SDaniel Kiper return NOTIFY_OK; 395080e2be7SDaniel Kiper } 396080e2be7SDaniel Kiper 397080e2be7SDaniel Kiper static struct notifier_block xen_memory_nb = { 398080e2be7SDaniel Kiper .notifier_call = xen_memory_notifier, 399080e2be7SDaniel Kiper .priority = 0 400080e2be7SDaniel Kiper }; 401080e2be7SDaniel Kiper #else 402b2ac6aa8SDavid Vrabel static enum bp_state reserve_additional_memory(void) 403080e2be7SDaniel Kiper { 404080e2be7SDaniel Kiper balloon_stats.target_pages = balloon_stats.current_pages; 4051cf6a6c8SDavid Vrabel return BP_ECANCELED; 406080e2be7SDaniel Kiper } 407080e2be7SDaniel Kiper #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ 408080e2be7SDaniel Kiper 409de5a77d8SDavid Vrabel static long current_credit(void) 410de5a77d8SDavid Vrabel { 411de5a77d8SDavid Vrabel return balloon_stats.target_pages - balloon_stats.current_pages; 412de5a77d8SDavid Vrabel } 413de5a77d8SDavid Vrabel 414de5a77d8SDavid Vrabel static bool balloon_is_inflated(void) 415de5a77d8SDavid Vrabel { 416de5a77d8SDavid Vrabel return balloon_stats.balloon_low || balloon_stats.balloon_high; 417de5a77d8SDavid Vrabel } 418de5a77d8SDavid Vrabel 41995d2ac4aSDaniel Kiper static enum bp_state increase_reservation(unsigned long nr_pages) 4201775826cSJeremy Fitzhardinge { 42195d2ac4aSDaniel Kiper int rc; 42230756c62SJulien Grall unsigned long i; 4231775826cSJeremy Fitzhardinge struct page *page; 4241775826cSJeremy Fitzhardinge 4251775826cSJeremy Fitzhardinge if (nr_pages > ARRAY_SIZE(frame_list)) 4261775826cSJeremy Fitzhardinge nr_pages = ARRAY_SIZE(frame_list); 4271775826cSJeremy Fitzhardinge 4289346c2a8SJie Liu page = list_first_entry_or_null(&ballooned_pages, struct page, lru); 4291775826cSJeremy Fitzhardinge for (i = 0; i < nr_pages; i++) { 43095d2ac4aSDaniel Kiper if (!page) { 43195d2ac4aSDaniel Kiper nr_pages = i; 43295d2ac4aSDaniel Kiper break; 43395d2ac4aSDaniel Kiper } 43430756c62SJulien Grall 43530756c62SJulien Grall frame_list[i] = page_to_xen_pfn(page); 4361775826cSJeremy Fitzhardinge page = balloon_next_page(page); 4371775826cSJeremy Fitzhardinge } 4381775826cSJeremy Fitzhardinge 439ae4c51a5SOleksandr Andrushchenko rc = xenmem_reservation_increase(nr_pages, frame_list); 44040095de1SKonrad Rzeszutek Wilk if (rc <= 0) 44195d2ac4aSDaniel Kiper return BP_EAGAIN; 4421775826cSJeremy Fitzhardinge 443bc2c0303SIan Campbell for (i = 0; i < rc; i++) { 444b6f30679SKonrad Rzeszutek Wilk page = balloon_retrieve(false); 4451775826cSJeremy Fitzhardinge BUG_ON(page == NULL); 4461775826cSJeremy Fitzhardinge 447ae4c51a5SOleksandr Andrushchenko xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]); 4481775826cSJeremy Fitzhardinge 4491775826cSJeremy Fitzhardinge /* Relinquish the page back to the allocator. */ 45077c4adf6SDavid Hildenbrand __ClearPageOffline(page); 451709613adSRoss Lagerwall free_reserved_page(page); 4521775826cSJeremy Fitzhardinge } 4531775826cSJeremy Fitzhardinge 454bc2c0303SIan Campbell balloon_stats.current_pages += rc; 4551775826cSJeremy Fitzhardinge 45695d2ac4aSDaniel Kiper return BP_DONE; 4571775826cSJeremy Fitzhardinge } 4581775826cSJeremy Fitzhardinge 459b6f30679SKonrad Rzeszutek Wilk static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) 4601775826cSJeremy Fitzhardinge { 46195d2ac4aSDaniel Kiper enum bp_state state = BP_DONE; 46230756c62SJulien Grall unsigned long i; 46330756c62SJulien Grall struct page *page, *tmp; 4641775826cSJeremy Fitzhardinge int ret; 46530756c62SJulien Grall LIST_HEAD(pages); 4661775826cSJeremy Fitzhardinge 4671775826cSJeremy Fitzhardinge if (nr_pages > ARRAY_SIZE(frame_list)) 4681775826cSJeremy Fitzhardinge nr_pages = ARRAY_SIZE(frame_list); 4691775826cSJeremy Fitzhardinge 4701775826cSJeremy Fitzhardinge for (i = 0; i < nr_pages; i++) { 471fce92683SLisa Nguyen page = alloc_page(gfp); 472fce92683SLisa Nguyen if (page == NULL) { 4731775826cSJeremy Fitzhardinge nr_pages = i; 47495d2ac4aSDaniel Kiper state = BP_EAGAIN; 4751775826cSJeremy Fitzhardinge break; 4761775826cSJeremy Fitzhardinge } 47777c4adf6SDavid Hildenbrand __SetPageOffline(page); 478709613adSRoss Lagerwall adjust_managed_page_count(page, -1); 479ae4c51a5SOleksandr Andrushchenko xenmem_reservation_scrub_page(page); 48030756c62SJulien Grall list_add(&page->lru, &pages); 48109ed3d5bSWei Liu } 48209ed3d5bSWei Liu 48309ed3d5bSWei Liu /* 48409ed3d5bSWei Liu * Ensure that ballooned highmem pages don't have kmaps. 48509ed3d5bSWei Liu * 48609ed3d5bSWei Liu * Do this before changing the p2m as kmap_flush_unused() 48709ed3d5bSWei Liu * reads PTEs to obtain pages (and hence needs the original 48809ed3d5bSWei Liu * p2m entry). 48909ed3d5bSWei Liu */ 49009ed3d5bSWei Liu kmap_flush_unused(); 49109ed3d5bSWei Liu 49230756c62SJulien Grall /* 49330756c62SJulien Grall * Setup the frame, update direct mapping, invalidate P2M, 49430756c62SJulien Grall * and add to balloon. 49530756c62SJulien Grall */ 49630756c62SJulien Grall i = 0; 49730756c62SJulien Grall list_for_each_entry_safe(page, tmp, &pages, lru) { 49830756c62SJulien Grall frame_list[i++] = xen_page_to_gfn(page); 49909ed3d5bSWei Liu 500ae4c51a5SOleksandr Andrushchenko xenmem_reservation_va_mapping_reset(1, &page); 50130756c62SJulien Grall 50230756c62SJulien Grall list_del(&page->lru); 50324f69373SDavid Vrabel 50409ed3d5bSWei Liu balloon_append(page); 5051775826cSJeremy Fitzhardinge } 5061775826cSJeremy Fitzhardinge 50724f69373SDavid Vrabel flush_tlb_all(); 5082bad07ceSDavid Vrabel 509ae4c51a5SOleksandr Andrushchenko ret = xenmem_reservation_decrease(nr_pages, frame_list); 5101775826cSJeremy Fitzhardinge BUG_ON(ret != nr_pages); 5111775826cSJeremy Fitzhardinge 5121775826cSJeremy Fitzhardinge balloon_stats.current_pages -= nr_pages; 5131775826cSJeremy Fitzhardinge 51495d2ac4aSDaniel Kiper return state; 5151775826cSJeremy Fitzhardinge } 5161775826cSJeremy Fitzhardinge 5171775826cSJeremy Fitzhardinge /* 518929423faSJuergen Gross * As this is a work item it is guaranteed to run as a single instance only. 5191775826cSJeremy Fitzhardinge * We may of course race updates of the target counts (which are protected 5201775826cSJeremy Fitzhardinge * by the balloon lock), or with changes to the Xen hard limit, but we will 5211775826cSJeremy Fitzhardinge * recover from these in time. 5221775826cSJeremy Fitzhardinge */ 5231775826cSJeremy Fitzhardinge static void balloon_process(struct work_struct *work) 5241775826cSJeremy Fitzhardinge { 52595d2ac4aSDaniel Kiper enum bp_state state = BP_DONE; 5261775826cSJeremy Fitzhardinge long credit; 5271775826cSJeremy Fitzhardinge 5281775826cSJeremy Fitzhardinge 5291775826cSJeremy Fitzhardinge do { 530929423faSJuergen Gross mutex_lock(&balloon_mutex); 531929423faSJuergen Gross 53283be7e52SDaniel Kiper credit = current_credit(); 53395d2ac4aSDaniel Kiper 534080e2be7SDaniel Kiper if (credit > 0) { 535080e2be7SDaniel Kiper if (balloon_is_inflated()) 53695d2ac4aSDaniel Kiper state = increase_reservation(credit); 537080e2be7SDaniel Kiper else 538b2ac6aa8SDavid Vrabel state = reserve_additional_memory(); 539080e2be7SDaniel Kiper } 54095d2ac4aSDaniel Kiper 541a1078e82SJuergen Gross if (credit < 0) { 542a1078e82SJuergen Gross long n_pages; 543a1078e82SJuergen Gross 544a1078e82SJuergen Gross n_pages = min(-credit, si_mem_available()); 545a1078e82SJuergen Gross state = decrease_reservation(n_pages, GFP_BALLOON); 546a1078e82SJuergen Gross if (state == BP_DONE && n_pages != -credit && 547a1078e82SJuergen Gross n_pages < totalreserve_pages) 548a1078e82SJuergen Gross state = BP_EAGAIN; 549a1078e82SJuergen Gross } 55095d2ac4aSDaniel Kiper 55195d2ac4aSDaniel Kiper state = update_schedule(state); 5521775826cSJeremy Fitzhardinge 553929423faSJuergen Gross mutex_unlock(&balloon_mutex); 554929423faSJuergen Gross 555929423faSJuergen Gross cond_resched(); 556929423faSJuergen Gross 55795d2ac4aSDaniel Kiper } while (credit && state == BP_DONE); 5581775826cSJeremy Fitzhardinge 5591775826cSJeremy Fitzhardinge /* Schedule more work if there is some still to be done. */ 56095d2ac4aSDaniel Kiper if (state == BP_EAGAIN) 56195d2ac4aSDaniel Kiper schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 5621775826cSJeremy Fitzhardinge } 5631775826cSJeremy Fitzhardinge 5641775826cSJeremy Fitzhardinge /* Resets the Xen limit, sets new target, and kicks off processing. */ 565803eb047SDaniel De Graaf void balloon_set_new_target(unsigned long target) 5661775826cSJeremy Fitzhardinge { 5671775826cSJeremy Fitzhardinge /* No need for lock. Not read-modify-write updates. */ 5681775826cSJeremy Fitzhardinge balloon_stats.target_pages = target; 56995170b2eSDaniel Kiper schedule_delayed_work(&balloon_worker, 0); 5701775826cSJeremy Fitzhardinge } 571803eb047SDaniel De Graaf EXPORT_SYMBOL_GPL(balloon_set_new_target); 5721775826cSJeremy Fitzhardinge 5731cf6a6c8SDavid Vrabel static int add_ballooned_pages(int nr_pages) 5741cf6a6c8SDavid Vrabel { 5751cf6a6c8SDavid Vrabel enum bp_state st; 5761cf6a6c8SDavid Vrabel 5771cf6a6c8SDavid Vrabel if (xen_hotplug_unpopulated) { 5781cf6a6c8SDavid Vrabel st = reserve_additional_memory(); 5791cf6a6c8SDavid Vrabel if (st != BP_ECANCELED) { 5801cf6a6c8SDavid Vrabel mutex_unlock(&balloon_mutex); 5811cf6a6c8SDavid Vrabel wait_event(balloon_wq, 5821cf6a6c8SDavid Vrabel !list_empty(&ballooned_pages)); 5831cf6a6c8SDavid Vrabel mutex_lock(&balloon_mutex); 5841cf6a6c8SDavid Vrabel return 0; 5851cf6a6c8SDavid Vrabel } 5861cf6a6c8SDavid Vrabel } 5871cf6a6c8SDavid Vrabel 588a1078e82SJuergen Gross if (si_mem_available() < nr_pages) 589a1078e82SJuergen Gross return -ENOMEM; 590a1078e82SJuergen Gross 5911cf6a6c8SDavid Vrabel st = decrease_reservation(nr_pages, GFP_USER); 5921cf6a6c8SDavid Vrabel if (st != BP_DONE) 5931cf6a6c8SDavid Vrabel return -ENOMEM; 5941cf6a6c8SDavid Vrabel 5951cf6a6c8SDavid Vrabel return 0; 5961cf6a6c8SDavid Vrabel } 5971cf6a6c8SDavid Vrabel 598b6f30679SKonrad Rzeszutek Wilk /** 599b6f30679SKonrad Rzeszutek Wilk * alloc_xenballooned_pages - get pages that have been ballooned out 600b6f30679SKonrad Rzeszutek Wilk * @nr_pages: Number of pages to get 601b6f30679SKonrad Rzeszutek Wilk * @pages: pages returned 602b6f30679SKonrad Rzeszutek Wilk * @return 0 on success, error otherwise 6031775826cSJeremy Fitzhardinge */ 60481b286e0SDavid Vrabel int alloc_xenballooned_pages(int nr_pages, struct page **pages) 6051775826cSJeremy Fitzhardinge { 606b6f30679SKonrad Rzeszutek Wilk int pgno = 0; 607b6f30679SKonrad Rzeszutek Wilk struct page *page; 6081cf6a6c8SDavid Vrabel int ret; 6091cf6a6c8SDavid Vrabel 610b6f30679SKonrad Rzeszutek Wilk mutex_lock(&balloon_mutex); 6111cf6a6c8SDavid Vrabel 6121cf6a6c8SDavid Vrabel balloon_stats.target_unpopulated += nr_pages; 6131cf6a6c8SDavid Vrabel 614b6f30679SKonrad Rzeszutek Wilk while (pgno < nr_pages) { 61581b286e0SDavid Vrabel page = balloon_retrieve(true); 61681b286e0SDavid Vrabel if (page) { 6170266def9SDavid Hildenbrand __ClearPageOffline(page); 618b6f30679SKonrad Rzeszutek Wilk pages[pgno++] = page; 6194a69c909SDavid Vrabel #ifdef CONFIG_XEN_HAVE_PVMMU 62030756c62SJulien Grall /* 62130756c62SJulien Grall * We don't support PV MMU when Linux and Xen is using 62230756c62SJulien Grall * different page granularity. 62330756c62SJulien Grall */ 62430756c62SJulien Grall BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 62530756c62SJulien Grall 626b194da25SBoris Ostrovsky if (!xen_feature(XENFEAT_auto_translated_physmap)) { 6274a69c909SDavid Vrabel ret = xen_alloc_p2m_entry(page_to_pfn(page)); 6284a69c909SDavid Vrabel if (ret < 0) 6294a69c909SDavid Vrabel goto out_undo; 630b194da25SBoris Ostrovsky } 6314a69c909SDavid Vrabel #endif 632b6f30679SKonrad Rzeszutek Wilk } else { 6331cf6a6c8SDavid Vrabel ret = add_ballooned_pages(nr_pages - pgno); 6341cf6a6c8SDavid Vrabel if (ret < 0) 635b6f30679SKonrad Rzeszutek Wilk goto out_undo; 636b6f30679SKonrad Rzeszutek Wilk } 637b6f30679SKonrad Rzeszutek Wilk } 638b6f30679SKonrad Rzeszutek Wilk mutex_unlock(&balloon_mutex); 639b6f30679SKonrad Rzeszutek Wilk return 0; 640b6f30679SKonrad Rzeszutek Wilk out_undo: 641b6f30679SKonrad Rzeszutek Wilk mutex_unlock(&balloon_mutex); 6421cf6a6c8SDavid Vrabel free_xenballooned_pages(pgno, pages); 6431cf6a6c8SDavid Vrabel return ret; 644b6f30679SKonrad Rzeszutek Wilk } 645b6f30679SKonrad Rzeszutek Wilk EXPORT_SYMBOL(alloc_xenballooned_pages); 6461775826cSJeremy Fitzhardinge 647b6f30679SKonrad Rzeszutek Wilk /** 648b6f30679SKonrad Rzeszutek Wilk * free_xenballooned_pages - return pages retrieved with get_ballooned_pages 649b6f30679SKonrad Rzeszutek Wilk * @nr_pages: Number of pages 650b6f30679SKonrad Rzeszutek Wilk * @pages: pages to return 651b6f30679SKonrad Rzeszutek Wilk */ 652b6f30679SKonrad Rzeszutek Wilk void free_xenballooned_pages(int nr_pages, struct page **pages) 653b6f30679SKonrad Rzeszutek Wilk { 654b6f30679SKonrad Rzeszutek Wilk int i; 6551775826cSJeremy Fitzhardinge 656b6f30679SKonrad Rzeszutek Wilk mutex_lock(&balloon_mutex); 657b6f30679SKonrad Rzeszutek Wilk 658b6f30679SKonrad Rzeszutek Wilk for (i = 0; i < nr_pages; i++) { 6590266def9SDavid Hildenbrand if (pages[i]) { 6600266def9SDavid Hildenbrand __SetPageOffline(pages[i]); 661b6f30679SKonrad Rzeszutek Wilk balloon_append(pages[i]); 6621775826cSJeremy Fitzhardinge } 6630266def9SDavid Hildenbrand } 6641775826cSJeremy Fitzhardinge 6651cf6a6c8SDavid Vrabel balloon_stats.target_unpopulated -= nr_pages; 6661cf6a6c8SDavid Vrabel 667b6f30679SKonrad Rzeszutek Wilk /* The balloon may be too large now. Shrink it if needed. */ 66883be7e52SDaniel Kiper if (current_credit()) 669b6f30679SKonrad Rzeszutek Wilk schedule_delayed_work(&balloon_worker, 0); 670b6f30679SKonrad Rzeszutek Wilk 671b6f30679SKonrad Rzeszutek Wilk mutex_unlock(&balloon_mutex); 672b6f30679SKonrad Rzeszutek Wilk } 673b6f30679SKonrad Rzeszutek Wilk EXPORT_SYMBOL(free_xenballooned_pages); 6741775826cSJeremy Fitzhardinge 6754fee9ad8SVitaly Kuznetsov #ifdef CONFIG_XEN_PV 6768b5d44a5SDavid Vrabel static void __init balloon_add_region(unsigned long start_pfn, 6778b5d44a5SDavid Vrabel unsigned long pages) 6781775826cSJeremy Fitzhardinge { 6794dfe22f5SDaniel Kiper unsigned long pfn, extra_pfn_end; 6801775826cSJeremy Fitzhardinge struct page *page; 6811775826cSJeremy Fitzhardinge 6828b5d44a5SDavid Vrabel /* 6838b5d44a5SDavid Vrabel * If the amount of usable memory has been limited (e.g., with 6848b5d44a5SDavid Vrabel * the 'mem' command line parameter), don't add pages beyond 6858b5d44a5SDavid Vrabel * this limit. 6868b5d44a5SDavid Vrabel */ 6878b5d44a5SDavid Vrabel extra_pfn_end = min(max_pfn, start_pfn + pages); 6888b5d44a5SDavid Vrabel 6898b5d44a5SDavid Vrabel for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { 6908b5d44a5SDavid Vrabel page = pfn_to_page(pfn); 6918b5d44a5SDavid Vrabel /* totalram_pages and totalhigh_pages do not 6928b5d44a5SDavid Vrabel include the boot-time balloon extension, so 6938b5d44a5SDavid Vrabel don't subtract from it. */ 6948b5d44a5SDavid Vrabel __balloon_append(page); 6958b5d44a5SDavid Vrabel } 696de5a77d8SDavid Vrabel 697de5a77d8SDavid Vrabel balloon_stats.total_pages += extra_pfn_end - start_pfn; 6988b5d44a5SDavid Vrabel } 6994fee9ad8SVitaly Kuznetsov #endif 7008b5d44a5SDavid Vrabel 7018b5d44a5SDavid Vrabel static int __init balloon_init(void) 7028b5d44a5SDavid Vrabel { 70353d5522cSStefano Stabellini if (!xen_domain()) 7041775826cSJeremy Fitzhardinge return -ENODEV; 7051775826cSJeremy Fitzhardinge 706283c0972SJoe Perches pr_info("Initialising balloon driver\n"); 7071775826cSJeremy Fitzhardinge 7084fee9ad8SVitaly Kuznetsov #ifdef CONFIG_XEN_PV 709aa24411bSDavid Vrabel balloon_stats.current_pages = xen_pv_domain() 710aa24411bSDavid Vrabel ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) 711c275a57fSBoris Ostrovsky : get_num_physpages(); 7124fee9ad8SVitaly Kuznetsov #else 7134fee9ad8SVitaly Kuznetsov balloon_stats.current_pages = get_num_physpages(); 7144fee9ad8SVitaly Kuznetsov #endif 7151775826cSJeremy Fitzhardinge balloon_stats.target_pages = balloon_stats.current_pages; 7161775826cSJeremy Fitzhardinge balloon_stats.balloon_low = 0; 7171775826cSJeremy Fitzhardinge balloon_stats.balloon_high = 0; 718de5a77d8SDavid Vrabel balloon_stats.total_pages = balloon_stats.current_pages; 7191775826cSJeremy Fitzhardinge 72095d2ac4aSDaniel Kiper balloon_stats.schedule_delay = 1; 72195d2ac4aSDaniel Kiper balloon_stats.max_schedule_delay = 32; 72295d2ac4aSDaniel Kiper balloon_stats.retry_count = 1; 723a1078e82SJuergen Gross balloon_stats.max_retry_count = 4; 7241775826cSJeremy Fitzhardinge 725080e2be7SDaniel Kiper #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 726080e2be7SDaniel Kiper set_online_page_callback(&xen_online_page); 727080e2be7SDaniel Kiper register_memory_notifier(&xen_memory_nb); 7281cf6a6c8SDavid Vrabel register_sysctl_table(xen_root); 729080e2be7SDaniel Kiper #endif 730080e2be7SDaniel Kiper 7314fee9ad8SVitaly Kuznetsov #ifdef CONFIG_XEN_PV 7324fee9ad8SVitaly Kuznetsov { 7334fee9ad8SVitaly Kuznetsov int i; 7344fee9ad8SVitaly Kuznetsov 7352a4c92faSJeremy Fitzhardinge /* 736b1cbf9b1SDavid Vrabel * Initialize the balloon with pages from the extra memory 7378b5d44a5SDavid Vrabel * regions (see arch/x86/xen/setup.c). 7382a4c92faSJeremy Fitzhardinge */ 7398b5d44a5SDavid Vrabel for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) 740626d7508SJuergen Gross if (xen_extra_mem[i].n_pfns) 741626d7508SJuergen Gross balloon_add_region(xen_extra_mem[i].start_pfn, 742626d7508SJuergen Gross xen_extra_mem[i].n_pfns); 7434fee9ad8SVitaly Kuznetsov } 7444fee9ad8SVitaly Kuznetsov #endif 7451775826cSJeremy Fitzhardinge 74696edd61dSJuergen Gross /* Init the xen-balloon driver. */ 74796edd61dSJuergen Gross xen_balloon_init(); 74896edd61dSJuergen Gross 7491775826cSJeremy Fitzhardinge return 0; 7501775826cSJeremy Fitzhardinge } 7511775826cSJeremy Fitzhardinge subsys_initcall(balloon_init); 752