1 /****************************************************************************** 2 * Xen balloon driver - enables returning/claiming memory to/from Xen. 3 * 4 * Copyright (c) 2003, B Dragovic 5 * Copyright (c) 2003-2004, M Williamson, K Fraser 6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation 7 * Copyright (c) 2010 Daniel Kiper 8 * 9 * Memory hotplug support was written by Daniel Kiper. Work on 10 * it was sponsored by Google under Google Summer of Code 2010 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for 12 * this project. 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License version 2 16 * as published by the Free Software Foundation; or, when distributed 17 * separately from the Linux kernel or incorporated into other 18 * software packages, subject to the following license: 19 * 20 * Permission is hereby granted, free of charge, to any person obtaining a copy 21 * of this source file (the "Software"), to deal in the Software without 22 * restriction, including without limitation the rights to use, copy, modify, 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 24 * and to permit persons to whom the Software is furnished to do so, subject to 25 * the following conditions: 26 * 27 * The above copyright notice and this permission notice shall be included in 28 * all copies or substantial portions of the Software. 29 * 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 36 * IN THE SOFTWARE. 37 */ 38 39 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 40 41 #include <linux/cpu.h> 42 #include <linux/kernel.h> 43 #include <linux/sched.h> 44 #include <linux/errno.h> 45 #include <linux/module.h> 46 #include <linux/mm.h> 47 #include <linux/bootmem.h> 48 #include <linux/pagemap.h> 49 #include <linux/highmem.h> 50 #include <linux/mutex.h> 51 #include <linux/list.h> 52 #include <linux/gfp.h> 53 #include <linux/notifier.h> 54 #include <linux/memory.h> 55 #include <linux/memory_hotplug.h> 56 #include <linux/percpu-defs.h> 57 58 #include <asm/page.h> 59 #include <asm/pgalloc.h> 60 #include <asm/pgtable.h> 61 #include <asm/tlb.h> 62 63 #include <asm/xen/hypervisor.h> 64 #include <asm/xen/hypercall.h> 65 66 #include <xen/xen.h> 67 #include <xen/interface/xen.h> 68 #include <xen/interface/memory.h> 69 #include <xen/balloon.h> 70 #include <xen/features.h> 71 #include <xen/page.h> 72 73 /* 74 * balloon_process() state: 75 * 76 * BP_DONE: done or nothing to do, 77 * BP_EAGAIN: error, go to sleep, 78 * BP_ECANCELED: error, balloon operation canceled. 79 */ 80 81 enum bp_state { 82 BP_DONE, 83 BP_EAGAIN, 84 BP_ECANCELED 85 }; 86 87 88 static DEFINE_MUTEX(balloon_mutex); 89 90 struct balloon_stats balloon_stats; 91 EXPORT_SYMBOL_GPL(balloon_stats); 92 93 /* We increase/decrease in batches which fit in a page */ 94 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; 95 static DEFINE_PER_CPU(struct page *, balloon_scratch_page); 96 97 98 /* List of ballooned pages, threaded through the mem_map array. */ 99 static LIST_HEAD(ballooned_pages); 100 101 /* Main work function, always executed in process context. */ 102 static void balloon_process(struct work_struct *work); 103 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 104 105 /* When ballooning out (allocating memory to return to Xen) we don't really 106 want the kernel to try too hard since that can trigger the oom killer. */ 107 #define GFP_BALLOON \ 108 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) 109 110 static void scrub_page(struct page *page) 111 { 112 #ifdef CONFIG_XEN_SCRUB_PAGES 113 clear_highpage(page); 114 #endif 115 } 116 117 /* balloon_append: add the given page to the balloon. */ 118 static void __balloon_append(struct page *page) 119 { 120 /* Lowmem is re-populated first, so highmem pages go at list tail. */ 121 if (PageHighMem(page)) { 122 list_add_tail(&page->lru, &ballooned_pages); 123 balloon_stats.balloon_high++; 124 } else { 125 list_add(&page->lru, &ballooned_pages); 126 balloon_stats.balloon_low++; 127 } 128 } 129 130 static void balloon_append(struct page *page) 131 { 132 __balloon_append(page); 133 adjust_managed_page_count(page, -1); 134 } 135 136 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ 137 static struct page *balloon_retrieve(bool prefer_highmem) 138 { 139 struct page *page; 140 141 if (list_empty(&ballooned_pages)) 142 return NULL; 143 144 if (prefer_highmem) 145 page = list_entry(ballooned_pages.prev, struct page, lru); 146 else 147 page = list_entry(ballooned_pages.next, struct page, lru); 148 list_del(&page->lru); 149 150 if (PageHighMem(page)) 151 balloon_stats.balloon_high--; 152 else 153 balloon_stats.balloon_low--; 154 155 adjust_managed_page_count(page, 1); 156 157 return page; 158 } 159 160 static struct page *balloon_next_page(struct page *page) 161 { 162 struct list_head *next = page->lru.next; 163 if (next == &ballooned_pages) 164 return NULL; 165 return list_entry(next, struct page, lru); 166 } 167 168 static enum bp_state update_schedule(enum bp_state state) 169 { 170 if (state == BP_DONE) { 171 balloon_stats.schedule_delay = 1; 172 balloon_stats.retry_count = 1; 173 return BP_DONE; 174 } 175 176 ++balloon_stats.retry_count; 177 178 if (balloon_stats.max_retry_count != RETRY_UNLIMITED && 179 balloon_stats.retry_count > balloon_stats.max_retry_count) { 180 balloon_stats.schedule_delay = 1; 181 balloon_stats.retry_count = 1; 182 return BP_ECANCELED; 183 } 184 185 balloon_stats.schedule_delay <<= 1; 186 187 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) 188 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; 189 190 return BP_EAGAIN; 191 } 192 193 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 194 static long current_credit(void) 195 { 196 return balloon_stats.target_pages - balloon_stats.current_pages - 197 balloon_stats.hotplug_pages; 198 } 199 200 static bool balloon_is_inflated(void) 201 { 202 if (balloon_stats.balloon_low || balloon_stats.balloon_high || 203 balloon_stats.balloon_hotplug) 204 return true; 205 else 206 return false; 207 } 208 209 /* 210 * reserve_additional_memory() adds memory region of size >= credit above 211 * max_pfn. New region is section aligned and size is modified to be multiple 212 * of section size. Those features allow optimal use of address space and 213 * establish proper alignment when this function is called first time after 214 * boot (last section not fully populated at boot time contains unused memory 215 * pages with PG_reserved bit not set; online_pages_range() does not allow page 216 * onlining in whole range if first onlined page does not have PG_reserved 217 * bit set). Real size of added memory is established at page onlining stage. 218 */ 219 220 static enum bp_state reserve_additional_memory(long credit) 221 { 222 int nid, rc; 223 u64 hotplug_start_paddr; 224 unsigned long balloon_hotplug = credit; 225 226 hotplug_start_paddr = PFN_PHYS(SECTION_ALIGN_UP(max_pfn)); 227 balloon_hotplug = round_up(balloon_hotplug, PAGES_PER_SECTION); 228 nid = memory_add_physaddr_to_nid(hotplug_start_paddr); 229 230 rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); 231 232 if (rc) { 233 pr_info("%s: add_memory() failed: %i\n", __func__, rc); 234 return BP_EAGAIN; 235 } 236 237 balloon_hotplug -= credit; 238 239 balloon_stats.hotplug_pages += credit; 240 balloon_stats.balloon_hotplug = balloon_hotplug; 241 242 return BP_DONE; 243 } 244 245 static void xen_online_page(struct page *page) 246 { 247 __online_page_set_limits(page); 248 249 mutex_lock(&balloon_mutex); 250 251 __balloon_append(page); 252 253 if (balloon_stats.hotplug_pages) 254 --balloon_stats.hotplug_pages; 255 else 256 --balloon_stats.balloon_hotplug; 257 258 mutex_unlock(&balloon_mutex); 259 } 260 261 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) 262 { 263 if (val == MEM_ONLINE) 264 schedule_delayed_work(&balloon_worker, 0); 265 266 return NOTIFY_OK; 267 } 268 269 static struct notifier_block xen_memory_nb = { 270 .notifier_call = xen_memory_notifier, 271 .priority = 0 272 }; 273 #else 274 static long current_credit(void) 275 { 276 unsigned long target = balloon_stats.target_pages; 277 278 target = min(target, 279 balloon_stats.current_pages + 280 balloon_stats.balloon_low + 281 balloon_stats.balloon_high); 282 283 return target - balloon_stats.current_pages; 284 } 285 286 static bool balloon_is_inflated(void) 287 { 288 if (balloon_stats.balloon_low || balloon_stats.balloon_high) 289 return true; 290 else 291 return false; 292 } 293 294 static enum bp_state reserve_additional_memory(long credit) 295 { 296 balloon_stats.target_pages = balloon_stats.current_pages; 297 return BP_DONE; 298 } 299 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ 300 301 static enum bp_state increase_reservation(unsigned long nr_pages) 302 { 303 int rc; 304 unsigned long pfn, i; 305 struct page *page; 306 struct xen_memory_reservation reservation = { 307 .address_bits = 0, 308 .extent_order = 0, 309 .domid = DOMID_SELF 310 }; 311 312 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 313 if (!balloon_stats.balloon_low && !balloon_stats.balloon_high) { 314 nr_pages = min(nr_pages, balloon_stats.balloon_hotplug); 315 balloon_stats.hotplug_pages += nr_pages; 316 balloon_stats.balloon_hotplug -= nr_pages; 317 return BP_DONE; 318 } 319 #endif 320 321 if (nr_pages > ARRAY_SIZE(frame_list)) 322 nr_pages = ARRAY_SIZE(frame_list); 323 324 page = list_first_entry_or_null(&ballooned_pages, struct page, lru); 325 for (i = 0; i < nr_pages; i++) { 326 if (!page) { 327 nr_pages = i; 328 break; 329 } 330 frame_list[i] = page_to_pfn(page); 331 page = balloon_next_page(page); 332 } 333 334 set_xen_guest_handle(reservation.extent_start, frame_list); 335 reservation.nr_extents = nr_pages; 336 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); 337 if (rc <= 0) 338 return BP_EAGAIN; 339 340 for (i = 0; i < rc; i++) { 341 page = balloon_retrieve(false); 342 BUG_ON(page == NULL); 343 344 pfn = page_to_pfn(page); 345 346 #ifdef CONFIG_XEN_HAVE_PVMMU 347 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 348 set_phys_to_machine(pfn, frame_list[i]); 349 350 /* Link back into the page tables if not highmem. */ 351 if (!PageHighMem(page)) { 352 int ret; 353 ret = HYPERVISOR_update_va_mapping( 354 (unsigned long)__va(pfn << PAGE_SHIFT), 355 mfn_pte(frame_list[i], PAGE_KERNEL), 356 0); 357 BUG_ON(ret); 358 } 359 } 360 #endif 361 362 /* Relinquish the page back to the allocator. */ 363 __free_reserved_page(page); 364 } 365 366 balloon_stats.current_pages += rc; 367 368 return BP_DONE; 369 } 370 371 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) 372 { 373 enum bp_state state = BP_DONE; 374 unsigned long pfn, i; 375 struct page *page; 376 int ret; 377 struct xen_memory_reservation reservation = { 378 .address_bits = 0, 379 .extent_order = 0, 380 .domid = DOMID_SELF 381 }; 382 383 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 384 if (balloon_stats.hotplug_pages) { 385 nr_pages = min(nr_pages, balloon_stats.hotplug_pages); 386 balloon_stats.hotplug_pages -= nr_pages; 387 balloon_stats.balloon_hotplug += nr_pages; 388 return BP_DONE; 389 } 390 #endif 391 392 if (nr_pages > ARRAY_SIZE(frame_list)) 393 nr_pages = ARRAY_SIZE(frame_list); 394 395 for (i = 0; i < nr_pages; i++) { 396 page = alloc_page(gfp); 397 if (page == NULL) { 398 nr_pages = i; 399 state = BP_EAGAIN; 400 break; 401 } 402 403 pfn = page_to_pfn(page); 404 frame_list[i] = pfn_to_mfn(pfn); 405 406 scrub_page(page); 407 408 #ifdef CONFIG_XEN_HAVE_PVMMU 409 /* 410 * Ballooned out frames are effectively replaced with 411 * a scratch frame. Ensure direct mappings and the 412 * p2m are consistent. 413 */ 414 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 415 unsigned long p; 416 struct page *scratch_page = get_balloon_scratch_page(); 417 418 if (!PageHighMem(page)) { 419 ret = HYPERVISOR_update_va_mapping( 420 (unsigned long)__va(pfn << PAGE_SHIFT), 421 pfn_pte(page_to_pfn(scratch_page), 422 PAGE_KERNEL_RO), 0); 423 BUG_ON(ret); 424 } 425 p = page_to_pfn(scratch_page); 426 __set_phys_to_machine(pfn, pfn_to_mfn(p)); 427 428 put_balloon_scratch_page(); 429 } 430 #endif 431 432 balloon_append(pfn_to_page(pfn)); 433 } 434 435 /* Ensure that ballooned highmem pages don't have kmaps. */ 436 kmap_flush_unused(); 437 flush_tlb_all(); 438 439 set_xen_guest_handle(reservation.extent_start, frame_list); 440 reservation.nr_extents = nr_pages; 441 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 442 BUG_ON(ret != nr_pages); 443 444 balloon_stats.current_pages -= nr_pages; 445 446 return state; 447 } 448 449 /* 450 * We avoid multiple worker processes conflicting via the balloon mutex. 451 * We may of course race updates of the target counts (which are protected 452 * by the balloon lock), or with changes to the Xen hard limit, but we will 453 * recover from these in time. 454 */ 455 static void balloon_process(struct work_struct *work) 456 { 457 enum bp_state state = BP_DONE; 458 long credit; 459 460 mutex_lock(&balloon_mutex); 461 462 do { 463 credit = current_credit(); 464 465 if (credit > 0) { 466 if (balloon_is_inflated()) 467 state = increase_reservation(credit); 468 else 469 state = reserve_additional_memory(credit); 470 } 471 472 if (credit < 0) 473 state = decrease_reservation(-credit, GFP_BALLOON); 474 475 state = update_schedule(state); 476 477 #ifndef CONFIG_PREEMPT 478 if (need_resched()) 479 schedule(); 480 #endif 481 } while (credit && state == BP_DONE); 482 483 /* Schedule more work if there is some still to be done. */ 484 if (state == BP_EAGAIN) 485 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 486 487 mutex_unlock(&balloon_mutex); 488 } 489 490 struct page *get_balloon_scratch_page(void) 491 { 492 struct page *ret = get_cpu_var(balloon_scratch_page); 493 BUG_ON(ret == NULL); 494 return ret; 495 } 496 497 void put_balloon_scratch_page(void) 498 { 499 put_cpu_var(balloon_scratch_page); 500 } 501 502 /* Resets the Xen limit, sets new target, and kicks off processing. */ 503 void balloon_set_new_target(unsigned long target) 504 { 505 /* No need for lock. Not read-modify-write updates. */ 506 balloon_stats.target_pages = target; 507 schedule_delayed_work(&balloon_worker, 0); 508 } 509 EXPORT_SYMBOL_GPL(balloon_set_new_target); 510 511 /** 512 * alloc_xenballooned_pages - get pages that have been ballooned out 513 * @nr_pages: Number of pages to get 514 * @pages: pages returned 515 * @highmem: allow highmem pages 516 * @return 0 on success, error otherwise 517 */ 518 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) 519 { 520 int pgno = 0; 521 struct page *page; 522 mutex_lock(&balloon_mutex); 523 while (pgno < nr_pages) { 524 page = balloon_retrieve(highmem); 525 if (page && (highmem || !PageHighMem(page))) { 526 pages[pgno++] = page; 527 } else { 528 enum bp_state st; 529 if (page) 530 balloon_append(page); 531 st = decrease_reservation(nr_pages - pgno, 532 highmem ? GFP_HIGHUSER : GFP_USER); 533 if (st != BP_DONE) 534 goto out_undo; 535 } 536 } 537 mutex_unlock(&balloon_mutex); 538 return 0; 539 out_undo: 540 while (pgno) 541 balloon_append(pages[--pgno]); 542 /* Free the memory back to the kernel soon */ 543 schedule_delayed_work(&balloon_worker, 0); 544 mutex_unlock(&balloon_mutex); 545 return -ENOMEM; 546 } 547 EXPORT_SYMBOL(alloc_xenballooned_pages); 548 549 /** 550 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages 551 * @nr_pages: Number of pages 552 * @pages: pages to return 553 */ 554 void free_xenballooned_pages(int nr_pages, struct page **pages) 555 { 556 int i; 557 558 mutex_lock(&balloon_mutex); 559 560 for (i = 0; i < nr_pages; i++) { 561 if (pages[i]) 562 balloon_append(pages[i]); 563 } 564 565 /* The balloon may be too large now. Shrink it if needed. */ 566 if (current_credit()) 567 schedule_delayed_work(&balloon_worker, 0); 568 569 mutex_unlock(&balloon_mutex); 570 } 571 EXPORT_SYMBOL(free_xenballooned_pages); 572 573 static void __init balloon_add_region(unsigned long start_pfn, 574 unsigned long pages) 575 { 576 unsigned long pfn, extra_pfn_end; 577 struct page *page; 578 579 /* 580 * If the amount of usable memory has been limited (e.g., with 581 * the 'mem' command line parameter), don't add pages beyond 582 * this limit. 583 */ 584 extra_pfn_end = min(max_pfn, start_pfn + pages); 585 586 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { 587 page = pfn_to_page(pfn); 588 /* totalram_pages and totalhigh_pages do not 589 include the boot-time balloon extension, so 590 don't subtract from it. */ 591 __balloon_append(page); 592 } 593 } 594 595 static int balloon_cpu_notify(struct notifier_block *self, 596 unsigned long action, void *hcpu) 597 { 598 int cpu = (long)hcpu; 599 switch (action) { 600 case CPU_UP_PREPARE: 601 if (per_cpu(balloon_scratch_page, cpu) != NULL) 602 break; 603 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 604 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 605 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 606 return NOTIFY_BAD; 607 } 608 break; 609 default: 610 break; 611 } 612 return NOTIFY_OK; 613 } 614 615 static struct notifier_block balloon_cpu_notifier = { 616 .notifier_call = balloon_cpu_notify, 617 }; 618 619 static int __init balloon_init(void) 620 { 621 int i, cpu; 622 623 if (!xen_domain()) 624 return -ENODEV; 625 626 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 627 for_each_online_cpu(cpu) 628 { 629 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL); 630 if (per_cpu(balloon_scratch_page, cpu) == NULL) { 631 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu); 632 return -ENOMEM; 633 } 634 } 635 register_cpu_notifier(&balloon_cpu_notifier); 636 } 637 638 pr_info("Initialising balloon driver\n"); 639 640 balloon_stats.current_pages = xen_pv_domain() 641 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) 642 : get_num_physpages(); 643 balloon_stats.target_pages = balloon_stats.current_pages; 644 balloon_stats.balloon_low = 0; 645 balloon_stats.balloon_high = 0; 646 647 balloon_stats.schedule_delay = 1; 648 balloon_stats.max_schedule_delay = 32; 649 balloon_stats.retry_count = 1; 650 balloon_stats.max_retry_count = RETRY_UNLIMITED; 651 652 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 653 balloon_stats.hotplug_pages = 0; 654 balloon_stats.balloon_hotplug = 0; 655 656 set_online_page_callback(&xen_online_page); 657 register_memory_notifier(&xen_memory_nb); 658 #endif 659 660 /* 661 * Initialize the balloon with pages from the extra memory 662 * regions (see arch/x86/xen/setup.c). 663 */ 664 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) 665 if (xen_extra_mem[i].size) 666 balloon_add_region(PFN_UP(xen_extra_mem[i].start), 667 PFN_DOWN(xen_extra_mem[i].size)); 668 669 return 0; 670 } 671 672 subsys_initcall(balloon_init); 673 674 static int __init balloon_clear(void) 675 { 676 int cpu; 677 678 for_each_possible_cpu(cpu) 679 per_cpu(balloon_scratch_page, cpu) = NULL; 680 681 return 0; 682 } 683 early_initcall(balloon_clear); 684 685 MODULE_LICENSE("GPL"); 686