1 /****************************************************************************** 2 * Xen balloon driver - enables returning/claiming memory to/from Xen. 3 * 4 * Copyright (c) 2003, B Dragovic 5 * Copyright (c) 2003-2004, M Williamson, K Fraser 6 * Copyright (c) 2005 Dan M. Smith, IBM Corporation 7 * Copyright (c) 2010 Daniel Kiper 8 * 9 * Memory hotplug support was written by Daniel Kiper. Work on 10 * it was sponsored by Google under Google Summer of Code 2010 11 * program. Jeremy Fitzhardinge from Citrix was the mentor for 12 * this project. 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License version 2 16 * as published by the Free Software Foundation; or, when distributed 17 * separately from the Linux kernel or incorporated into other 18 * software packages, subject to the following license: 19 * 20 * Permission is hereby granted, free of charge, to any person obtaining a copy 21 * of this source file (the "Software"), to deal in the Software without 22 * restriction, including without limitation the rights to use, copy, modify, 23 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 24 * and to permit persons to whom the Software is furnished to do so, subject to 25 * the following conditions: 26 * 27 * The above copyright notice and this permission notice shall be included in 28 * all copies or substantial portions of the Software. 29 * 30 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 31 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 32 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 33 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 34 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 35 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 36 * IN THE SOFTWARE. 37 */ 38 39 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 40 41 #include <linux/cpu.h> 42 #include <linux/kernel.h> 43 #include <linux/sched.h> 44 #include <linux/errno.h> 45 #include <linux/module.h> 46 #include <linux/mm.h> 47 #include <linux/bootmem.h> 48 #include <linux/pagemap.h> 49 #include <linux/highmem.h> 50 #include <linux/mutex.h> 51 #include <linux/list.h> 52 #include <linux/gfp.h> 53 #include <linux/notifier.h> 54 #include <linux/memory.h> 55 #include <linux/memory_hotplug.h> 56 #include <linux/percpu-defs.h> 57 #include <linux/slab.h> 58 #include <linux/sysctl.h> 59 60 #include <asm/page.h> 61 #include <asm/pgalloc.h> 62 #include <asm/pgtable.h> 63 #include <asm/tlb.h> 64 65 #include <asm/xen/hypervisor.h> 66 #include <asm/xen/hypercall.h> 67 68 #include <xen/xen.h> 69 #include <xen/interface/xen.h> 70 #include <xen/interface/memory.h> 71 #include <xen/balloon.h> 72 #include <xen/features.h> 73 #include <xen/page.h> 74 75 static int xen_hotplug_unpopulated; 76 77 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 78 79 static int zero; 80 static int one = 1; 81 82 static struct ctl_table balloon_table[] = { 83 { 84 .procname = "hotplug_unpopulated", 85 .data = &xen_hotplug_unpopulated, 86 .maxlen = sizeof(int), 87 .mode = 0644, 88 .proc_handler = proc_dointvec_minmax, 89 .extra1 = &zero, 90 .extra2 = &one, 91 }, 92 { } 93 }; 94 95 static struct ctl_table balloon_root[] = { 96 { 97 .procname = "balloon", 98 .mode = 0555, 99 .child = balloon_table, 100 }, 101 { } 102 }; 103 104 static struct ctl_table xen_root[] = { 105 { 106 .procname = "xen", 107 .mode = 0555, 108 .child = balloon_root, 109 }, 110 { } 111 }; 112 113 #endif 114 115 /* 116 * Use one extent per PAGE_SIZE to avoid to break down the page into 117 * multiple frame. 118 */ 119 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) 120 121 /* 122 * balloon_process() state: 123 * 124 * BP_DONE: done or nothing to do, 125 * BP_WAIT: wait to be rescheduled, 126 * BP_EAGAIN: error, go to sleep, 127 * BP_ECANCELED: error, balloon operation canceled. 128 */ 129 130 enum bp_state { 131 BP_DONE, 132 BP_WAIT, 133 BP_EAGAIN, 134 BP_ECANCELED 135 }; 136 137 138 static DEFINE_MUTEX(balloon_mutex); 139 140 struct balloon_stats balloon_stats; 141 EXPORT_SYMBOL_GPL(balloon_stats); 142 143 /* We increase/decrease in batches which fit in a page */ 144 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)]; 145 146 147 /* List of ballooned pages, threaded through the mem_map array. */ 148 static LIST_HEAD(ballooned_pages); 149 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); 150 151 /* Main work function, always executed in process context. */ 152 static void balloon_process(struct work_struct *work); 153 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); 154 155 /* When ballooning out (allocating memory to return to Xen) we don't really 156 want the kernel to try too hard since that can trigger the oom killer. */ 157 #define GFP_BALLOON \ 158 (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) 159 160 static void scrub_page(struct page *page) 161 { 162 #ifdef CONFIG_XEN_SCRUB_PAGES 163 clear_highpage(page); 164 #endif 165 } 166 167 /* balloon_append: add the given page to the balloon. */ 168 static void __balloon_append(struct page *page) 169 { 170 /* Lowmem is re-populated first, so highmem pages go at list tail. */ 171 if (PageHighMem(page)) { 172 list_add_tail(&page->lru, &ballooned_pages); 173 balloon_stats.balloon_high++; 174 } else { 175 list_add(&page->lru, &ballooned_pages); 176 balloon_stats.balloon_low++; 177 } 178 wake_up(&balloon_wq); 179 } 180 181 static void balloon_append(struct page *page) 182 { 183 __balloon_append(page); 184 adjust_managed_page_count(page, -1); 185 } 186 187 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ 188 static struct page *balloon_retrieve(bool require_lowmem) 189 { 190 struct page *page; 191 192 if (list_empty(&ballooned_pages)) 193 return NULL; 194 195 page = list_entry(ballooned_pages.next, struct page, lru); 196 if (require_lowmem && PageHighMem(page)) 197 return NULL; 198 list_del(&page->lru); 199 200 if (PageHighMem(page)) 201 balloon_stats.balloon_high--; 202 else 203 balloon_stats.balloon_low--; 204 205 adjust_managed_page_count(page, 1); 206 207 return page; 208 } 209 210 static struct page *balloon_next_page(struct page *page) 211 { 212 struct list_head *next = page->lru.next; 213 if (next == &ballooned_pages) 214 return NULL; 215 return list_entry(next, struct page, lru); 216 } 217 218 static enum bp_state update_schedule(enum bp_state state) 219 { 220 if (state == BP_WAIT) 221 return BP_WAIT; 222 223 if (state == BP_ECANCELED) 224 return BP_ECANCELED; 225 226 if (state == BP_DONE) { 227 balloon_stats.schedule_delay = 1; 228 balloon_stats.retry_count = 1; 229 return BP_DONE; 230 } 231 232 ++balloon_stats.retry_count; 233 234 if (balloon_stats.max_retry_count != RETRY_UNLIMITED && 235 balloon_stats.retry_count > balloon_stats.max_retry_count) { 236 balloon_stats.schedule_delay = 1; 237 balloon_stats.retry_count = 1; 238 return BP_ECANCELED; 239 } 240 241 balloon_stats.schedule_delay <<= 1; 242 243 if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) 244 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; 245 246 return BP_EAGAIN; 247 } 248 249 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 250 static struct resource *additional_memory_resource(phys_addr_t size) 251 { 252 struct resource *res; 253 int ret; 254 255 res = kzalloc(sizeof(*res), GFP_KERNEL); 256 if (!res) 257 return NULL; 258 259 res->name = "System RAM"; 260 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 261 262 ret = allocate_resource(&iomem_resource, res, 263 size, 0, -1, 264 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 265 if (ret < 0) { 266 pr_err("Cannot allocate new System RAM resource\n"); 267 kfree(res); 268 return NULL; 269 } 270 271 return res; 272 } 273 274 static void release_memory_resource(struct resource *resource) 275 { 276 if (!resource) 277 return; 278 279 /* 280 * No need to reset region to identity mapped since we now 281 * know that no I/O can be in this region 282 */ 283 release_resource(resource); 284 kfree(resource); 285 } 286 287 static enum bp_state reserve_additional_memory(void) 288 { 289 long credit; 290 struct resource *resource; 291 int nid, rc; 292 unsigned long balloon_hotplug; 293 294 credit = balloon_stats.target_pages + balloon_stats.target_unpopulated 295 - balloon_stats.total_pages; 296 297 /* 298 * Already hotplugged enough pages? Wait for them to be 299 * onlined. 300 */ 301 if (credit <= 0) 302 return BP_WAIT; 303 304 balloon_hotplug = round_up(credit, PAGES_PER_SECTION); 305 306 resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE); 307 if (!resource) 308 goto err; 309 310 nid = memory_add_physaddr_to_nid(resource->start); 311 312 #ifdef CONFIG_XEN_HAVE_PVMMU 313 /* 314 * We don't support PV MMU when Linux and Xen is using 315 * different page granularity. 316 */ 317 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 318 319 /* 320 * add_memory() will build page tables for the new memory so 321 * the p2m must contain invalid entries so the correct 322 * non-present PTEs will be written. 323 * 324 * If a failure occurs, the original (identity) p2m entries 325 * are not restored since this region is now known not to 326 * conflict with any devices. 327 */ 328 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 329 unsigned long pfn, i; 330 331 pfn = PFN_DOWN(resource->start); 332 for (i = 0; i < balloon_hotplug; i++) { 333 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { 334 pr_warn("set_phys_to_machine() failed, no memory added\n"); 335 goto err; 336 } 337 } 338 } 339 #endif 340 341 rc = add_memory_resource(nid, resource); 342 if (rc) { 343 pr_warn("Cannot add additional memory (%i)\n", rc); 344 goto err; 345 } 346 347 balloon_stats.total_pages += balloon_hotplug; 348 349 return BP_WAIT; 350 err: 351 release_memory_resource(resource); 352 return BP_ECANCELED; 353 } 354 355 static void xen_online_page(struct page *page) 356 { 357 __online_page_set_limits(page); 358 359 mutex_lock(&balloon_mutex); 360 361 __balloon_append(page); 362 363 mutex_unlock(&balloon_mutex); 364 } 365 366 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) 367 { 368 if (val == MEM_ONLINE) 369 schedule_delayed_work(&balloon_worker, 0); 370 371 return NOTIFY_OK; 372 } 373 374 static struct notifier_block xen_memory_nb = { 375 .notifier_call = xen_memory_notifier, 376 .priority = 0 377 }; 378 #else 379 static enum bp_state reserve_additional_memory(void) 380 { 381 balloon_stats.target_pages = balloon_stats.current_pages; 382 return BP_ECANCELED; 383 } 384 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ 385 386 static long current_credit(void) 387 { 388 return balloon_stats.target_pages - balloon_stats.current_pages; 389 } 390 391 static bool balloon_is_inflated(void) 392 { 393 return balloon_stats.balloon_low || balloon_stats.balloon_high; 394 } 395 396 static enum bp_state increase_reservation(unsigned long nr_pages) 397 { 398 int rc; 399 unsigned long i; 400 struct page *page; 401 struct xen_memory_reservation reservation = { 402 .address_bits = 0, 403 .extent_order = EXTENT_ORDER, 404 .domid = DOMID_SELF 405 }; 406 407 if (nr_pages > ARRAY_SIZE(frame_list)) 408 nr_pages = ARRAY_SIZE(frame_list); 409 410 page = list_first_entry_or_null(&ballooned_pages, struct page, lru); 411 for (i = 0; i < nr_pages; i++) { 412 if (!page) { 413 nr_pages = i; 414 break; 415 } 416 417 /* XENMEM_populate_physmap requires a PFN based on Xen 418 * granularity. 419 */ 420 frame_list[i] = page_to_xen_pfn(page); 421 page = balloon_next_page(page); 422 } 423 424 set_xen_guest_handle(reservation.extent_start, frame_list); 425 reservation.nr_extents = nr_pages; 426 rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); 427 if (rc <= 0) 428 return BP_EAGAIN; 429 430 for (i = 0; i < rc; i++) { 431 page = balloon_retrieve(false); 432 BUG_ON(page == NULL); 433 434 #ifdef CONFIG_XEN_HAVE_PVMMU 435 /* 436 * We don't support PV MMU when Linux and Xen is using 437 * different page granularity. 438 */ 439 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 440 441 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 442 unsigned long pfn = page_to_pfn(page); 443 444 set_phys_to_machine(pfn, frame_list[i]); 445 446 /* Link back into the page tables if not highmem. */ 447 if (!PageHighMem(page)) { 448 int ret; 449 ret = HYPERVISOR_update_va_mapping( 450 (unsigned long)__va(pfn << PAGE_SHIFT), 451 mfn_pte(frame_list[i], PAGE_KERNEL), 452 0); 453 BUG_ON(ret); 454 } 455 } 456 #endif 457 458 /* Relinquish the page back to the allocator. */ 459 __free_reserved_page(page); 460 } 461 462 balloon_stats.current_pages += rc; 463 464 return BP_DONE; 465 } 466 467 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) 468 { 469 enum bp_state state = BP_DONE; 470 unsigned long i; 471 struct page *page, *tmp; 472 int ret; 473 struct xen_memory_reservation reservation = { 474 .address_bits = 0, 475 .extent_order = EXTENT_ORDER, 476 .domid = DOMID_SELF 477 }; 478 LIST_HEAD(pages); 479 480 if (nr_pages > ARRAY_SIZE(frame_list)) 481 nr_pages = ARRAY_SIZE(frame_list); 482 483 for (i = 0; i < nr_pages; i++) { 484 page = alloc_page(gfp); 485 if (page == NULL) { 486 nr_pages = i; 487 state = BP_EAGAIN; 488 break; 489 } 490 scrub_page(page); 491 list_add(&page->lru, &pages); 492 } 493 494 /* 495 * Ensure that ballooned highmem pages don't have kmaps. 496 * 497 * Do this before changing the p2m as kmap_flush_unused() 498 * reads PTEs to obtain pages (and hence needs the original 499 * p2m entry). 500 */ 501 kmap_flush_unused(); 502 503 /* 504 * Setup the frame, update direct mapping, invalidate P2M, 505 * and add to balloon. 506 */ 507 i = 0; 508 list_for_each_entry_safe(page, tmp, &pages, lru) { 509 /* XENMEM_decrease_reservation requires a GFN */ 510 frame_list[i++] = xen_page_to_gfn(page); 511 512 #ifdef CONFIG_XEN_HAVE_PVMMU 513 /* 514 * We don't support PV MMU when Linux and Xen is using 515 * different page granularity. 516 */ 517 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 518 519 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 520 unsigned long pfn = page_to_pfn(page); 521 522 if (!PageHighMem(page)) { 523 ret = HYPERVISOR_update_va_mapping( 524 (unsigned long)__va(pfn << PAGE_SHIFT), 525 __pte_ma(0), 0); 526 BUG_ON(ret); 527 } 528 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 529 } 530 #endif 531 list_del(&page->lru); 532 533 balloon_append(page); 534 } 535 536 flush_tlb_all(); 537 538 set_xen_guest_handle(reservation.extent_start, frame_list); 539 reservation.nr_extents = nr_pages; 540 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); 541 BUG_ON(ret != nr_pages); 542 543 balloon_stats.current_pages -= nr_pages; 544 545 return state; 546 } 547 548 /* 549 * As this is a work item it is guaranteed to run as a single instance only. 550 * We may of course race updates of the target counts (which are protected 551 * by the balloon lock), or with changes to the Xen hard limit, but we will 552 * recover from these in time. 553 */ 554 static void balloon_process(struct work_struct *work) 555 { 556 enum bp_state state = BP_DONE; 557 long credit; 558 559 560 do { 561 mutex_lock(&balloon_mutex); 562 563 credit = current_credit(); 564 565 if (credit > 0) { 566 if (balloon_is_inflated()) 567 state = increase_reservation(credit); 568 else 569 state = reserve_additional_memory(); 570 } 571 572 if (credit < 0) 573 state = decrease_reservation(-credit, GFP_BALLOON); 574 575 state = update_schedule(state); 576 577 mutex_unlock(&balloon_mutex); 578 579 cond_resched(); 580 581 } while (credit && state == BP_DONE); 582 583 /* Schedule more work if there is some still to be done. */ 584 if (state == BP_EAGAIN) 585 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 586 } 587 588 /* Resets the Xen limit, sets new target, and kicks off processing. */ 589 void balloon_set_new_target(unsigned long target) 590 { 591 /* No need for lock. Not read-modify-write updates. */ 592 balloon_stats.target_pages = target; 593 schedule_delayed_work(&balloon_worker, 0); 594 } 595 EXPORT_SYMBOL_GPL(balloon_set_new_target); 596 597 static int add_ballooned_pages(int nr_pages) 598 { 599 enum bp_state st; 600 601 if (xen_hotplug_unpopulated) { 602 st = reserve_additional_memory(); 603 if (st != BP_ECANCELED) { 604 mutex_unlock(&balloon_mutex); 605 wait_event(balloon_wq, 606 !list_empty(&ballooned_pages)); 607 mutex_lock(&balloon_mutex); 608 return 0; 609 } 610 } 611 612 st = decrease_reservation(nr_pages, GFP_USER); 613 if (st != BP_DONE) 614 return -ENOMEM; 615 616 return 0; 617 } 618 619 /** 620 * alloc_xenballooned_pages - get pages that have been ballooned out 621 * @nr_pages: Number of pages to get 622 * @pages: pages returned 623 * @return 0 on success, error otherwise 624 */ 625 int alloc_xenballooned_pages(int nr_pages, struct page **pages) 626 { 627 int pgno = 0; 628 struct page *page; 629 int ret; 630 631 mutex_lock(&balloon_mutex); 632 633 balloon_stats.target_unpopulated += nr_pages; 634 635 while (pgno < nr_pages) { 636 page = balloon_retrieve(true); 637 if (page) { 638 pages[pgno++] = page; 639 #ifdef CONFIG_XEN_HAVE_PVMMU 640 /* 641 * We don't support PV MMU when Linux and Xen is using 642 * different page granularity. 643 */ 644 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 645 646 ret = xen_alloc_p2m_entry(page_to_pfn(page)); 647 if (ret < 0) 648 goto out_undo; 649 #endif 650 } else { 651 ret = add_ballooned_pages(nr_pages - pgno); 652 if (ret < 0) 653 goto out_undo; 654 } 655 } 656 mutex_unlock(&balloon_mutex); 657 return 0; 658 out_undo: 659 mutex_unlock(&balloon_mutex); 660 free_xenballooned_pages(pgno, pages); 661 return ret; 662 } 663 EXPORT_SYMBOL(alloc_xenballooned_pages); 664 665 /** 666 * free_xenballooned_pages - return pages retrieved with get_ballooned_pages 667 * @nr_pages: Number of pages 668 * @pages: pages to return 669 */ 670 void free_xenballooned_pages(int nr_pages, struct page **pages) 671 { 672 int i; 673 674 mutex_lock(&balloon_mutex); 675 676 for (i = 0; i < nr_pages; i++) { 677 if (pages[i]) 678 balloon_append(pages[i]); 679 } 680 681 balloon_stats.target_unpopulated -= nr_pages; 682 683 /* The balloon may be too large now. Shrink it if needed. */ 684 if (current_credit()) 685 schedule_delayed_work(&balloon_worker, 0); 686 687 mutex_unlock(&balloon_mutex); 688 } 689 EXPORT_SYMBOL(free_xenballooned_pages); 690 691 static void __init balloon_add_region(unsigned long start_pfn, 692 unsigned long pages) 693 { 694 unsigned long pfn, extra_pfn_end; 695 struct page *page; 696 697 /* 698 * If the amount of usable memory has been limited (e.g., with 699 * the 'mem' command line parameter), don't add pages beyond 700 * this limit. 701 */ 702 extra_pfn_end = min(max_pfn, start_pfn + pages); 703 704 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) { 705 page = pfn_to_page(pfn); 706 /* totalram_pages and totalhigh_pages do not 707 include the boot-time balloon extension, so 708 don't subtract from it. */ 709 __balloon_append(page); 710 } 711 712 balloon_stats.total_pages += extra_pfn_end - start_pfn; 713 } 714 715 static int __init balloon_init(void) 716 { 717 int i; 718 719 if (!xen_domain()) 720 return -ENODEV; 721 722 pr_info("Initialising balloon driver\n"); 723 724 balloon_stats.current_pages = xen_pv_domain() 725 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn) 726 : get_num_physpages(); 727 balloon_stats.target_pages = balloon_stats.current_pages; 728 balloon_stats.balloon_low = 0; 729 balloon_stats.balloon_high = 0; 730 balloon_stats.total_pages = balloon_stats.current_pages; 731 732 balloon_stats.schedule_delay = 1; 733 balloon_stats.max_schedule_delay = 32; 734 balloon_stats.retry_count = 1; 735 balloon_stats.max_retry_count = RETRY_UNLIMITED; 736 737 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG 738 set_online_page_callback(&xen_online_page); 739 register_memory_notifier(&xen_memory_nb); 740 register_sysctl_table(xen_root); 741 #endif 742 743 /* 744 * Initialize the balloon with pages from the extra memory 745 * regions (see arch/x86/xen/setup.c). 746 */ 747 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) 748 if (xen_extra_mem[i].n_pfns) 749 balloon_add_region(xen_extra_mem[i].start_pfn, 750 xen_extra_mem[i].n_pfns); 751 752 return 0; 753 } 754 755 subsys_initcall(balloon_init); 756 757 MODULE_LICENSE("GPL"); 758