1 /* 2 * linux/kernel/power/snapshot.c 3 * 4 * This file provides system snapshot/restore functionality for swsusp. 5 * 6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz> 7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 8 * 9 * This file is released under the GPLv2. 10 * 11 */ 12 13 #define pr_fmt(fmt) "PM: " fmt 14 15 #include <linux/version.h> 16 #include <linux/module.h> 17 #include <linux/mm.h> 18 #include <linux/suspend.h> 19 #include <linux/delay.h> 20 #include <linux/bitops.h> 21 #include <linux/spinlock.h> 22 #include <linux/kernel.h> 23 #include <linux/pm.h> 24 #include <linux/device.h> 25 #include <linux/init.h> 26 #include <linux/memblock.h> 27 #include <linux/nmi.h> 28 #include <linux/syscalls.h> 29 #include <linux/console.h> 30 #include <linux/highmem.h> 31 #include <linux/list.h> 32 #include <linux/slab.h> 33 #include <linux/compiler.h> 34 #include <linux/ktime.h> 35 #include <linux/set_memory.h> 36 37 #include <linux/uaccess.h> 38 #include <asm/mmu_context.h> 39 #include <asm/pgtable.h> 40 #include <asm/tlbflush.h> 41 #include <asm/io.h> 42 43 #include "power.h" 44 45 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY) 46 static bool hibernate_restore_protection; 47 static bool hibernate_restore_protection_active; 48 49 void enable_restore_image_protection(void) 50 { 51 hibernate_restore_protection = true; 52 } 53 54 static inline void hibernate_restore_protection_begin(void) 55 { 56 hibernate_restore_protection_active = hibernate_restore_protection; 57 } 58 59 static inline void hibernate_restore_protection_end(void) 60 { 61 hibernate_restore_protection_active = false; 62 } 63 64 static inline void hibernate_restore_protect_page(void *page_address) 65 { 66 if (hibernate_restore_protection_active) 67 set_memory_ro((unsigned long)page_address, 1); 68 } 69 70 static inline void hibernate_restore_unprotect_page(void *page_address) 71 { 72 if (hibernate_restore_protection_active) 73 set_memory_rw((unsigned long)page_address, 1); 74 } 75 #else 76 static inline void hibernate_restore_protection_begin(void) {} 77 static inline void hibernate_restore_protection_end(void) {} 78 static inline void hibernate_restore_protect_page(void *page_address) {} 79 static inline void hibernate_restore_unprotect_page(void *page_address) {} 80 #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */ 81 82 static int swsusp_page_is_free(struct page *); 83 static void swsusp_set_page_forbidden(struct page *); 84 static void swsusp_unset_page_forbidden(struct page *); 85 86 /* 87 * Number of bytes to reserve for memory allocations made by device drivers 88 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't 89 * cause image creation to fail (tunable via /sys/power/reserved_size). 90 */ 91 unsigned long reserved_size; 92 93 void __init hibernate_reserved_size_init(void) 94 { 95 reserved_size = SPARE_PAGES * PAGE_SIZE; 96 } 97 98 /* 99 * Preferred image size in bytes (tunable via /sys/power/image_size). 100 * When it is set to N, swsusp will do its best to ensure the image 101 * size will not exceed N bytes, but if that is impossible, it will 102 * try to create the smallest image possible. 103 */ 104 unsigned long image_size; 105 106 void __init hibernate_image_size_init(void) 107 { 108 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE; 109 } 110 111 /* 112 * List of PBEs needed for restoring the pages that were allocated before 113 * the suspend and included in the suspend image, but have also been 114 * allocated by the "resume" kernel, so their contents cannot be written 115 * directly to their "original" page frames. 116 */ 117 struct pbe *restore_pblist; 118 119 /* struct linked_page is used to build chains of pages */ 120 121 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) 122 123 struct linked_page { 124 struct linked_page *next; 125 char data[LINKED_PAGE_DATA_SIZE]; 126 } __packed; 127 128 /* 129 * List of "safe" pages (ie. pages that were not used by the image kernel 130 * before hibernation) that may be used as temporary storage for image kernel 131 * memory contents. 132 */ 133 static struct linked_page *safe_pages_list; 134 135 /* Pointer to an auxiliary buffer (1 page) */ 136 static void *buffer; 137 138 #define PG_ANY 0 139 #define PG_SAFE 1 140 #define PG_UNSAFE_CLEAR 1 141 #define PG_UNSAFE_KEEP 0 142 143 static unsigned int allocated_unsafe_pages; 144 145 /** 146 * get_image_page - Allocate a page for a hibernation image. 147 * @gfp_mask: GFP mask for the allocation. 148 * @safe_needed: Get pages that were not used before hibernation (restore only) 149 * 150 * During image restoration, for storing the PBE list and the image data, we can 151 * only use memory pages that do not conflict with the pages used before 152 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them 153 * using allocated_unsafe_pages. 154 * 155 * Each allocated image page is marked as PageNosave and PageNosaveFree so that 156 * swsusp_free() can release it. 157 */ 158 static void *get_image_page(gfp_t gfp_mask, int safe_needed) 159 { 160 void *res; 161 162 res = (void *)get_zeroed_page(gfp_mask); 163 if (safe_needed) 164 while (res && swsusp_page_is_free(virt_to_page(res))) { 165 /* The page is unsafe, mark it for swsusp_free() */ 166 swsusp_set_page_forbidden(virt_to_page(res)); 167 allocated_unsafe_pages++; 168 res = (void *)get_zeroed_page(gfp_mask); 169 } 170 if (res) { 171 swsusp_set_page_forbidden(virt_to_page(res)); 172 swsusp_set_page_free(virt_to_page(res)); 173 } 174 return res; 175 } 176 177 static void *__get_safe_page(gfp_t gfp_mask) 178 { 179 if (safe_pages_list) { 180 void *ret = safe_pages_list; 181 182 safe_pages_list = safe_pages_list->next; 183 memset(ret, 0, PAGE_SIZE); 184 return ret; 185 } 186 return get_image_page(gfp_mask, PG_SAFE); 187 } 188 189 unsigned long get_safe_page(gfp_t gfp_mask) 190 { 191 return (unsigned long)__get_safe_page(gfp_mask); 192 } 193 194 static struct page *alloc_image_page(gfp_t gfp_mask) 195 { 196 struct page *page; 197 198 page = alloc_page(gfp_mask); 199 if (page) { 200 swsusp_set_page_forbidden(page); 201 swsusp_set_page_free(page); 202 } 203 return page; 204 } 205 206 static void recycle_safe_page(void *page_address) 207 { 208 struct linked_page *lp = page_address; 209 210 lp->next = safe_pages_list; 211 safe_pages_list = lp; 212 } 213 214 /** 215 * free_image_page - Free a page allocated for hibernation image. 216 * @addr: Address of the page to free. 217 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page. 218 * 219 * The page to free should have been allocated by get_image_page() (page flags 220 * set by it are affected). 221 */ 222 static inline void free_image_page(void *addr, int clear_nosave_free) 223 { 224 struct page *page; 225 226 BUG_ON(!virt_addr_valid(addr)); 227 228 page = virt_to_page(addr); 229 230 swsusp_unset_page_forbidden(page); 231 if (clear_nosave_free) 232 swsusp_unset_page_free(page); 233 234 __free_page(page); 235 } 236 237 static inline void free_list_of_pages(struct linked_page *list, 238 int clear_page_nosave) 239 { 240 while (list) { 241 struct linked_page *lp = list->next; 242 243 free_image_page(list, clear_page_nosave); 244 list = lp; 245 } 246 } 247 248 /* 249 * struct chain_allocator is used for allocating small objects out of 250 * a linked list of pages called 'the chain'. 251 * 252 * The chain grows each time when there is no room for a new object in 253 * the current page. The allocated objects cannot be freed individually. 254 * It is only possible to free them all at once, by freeing the entire 255 * chain. 256 * 257 * NOTE: The chain allocator may be inefficient if the allocated objects 258 * are not much smaller than PAGE_SIZE. 259 */ 260 struct chain_allocator { 261 struct linked_page *chain; /* the chain */ 262 unsigned int used_space; /* total size of objects allocated out 263 of the current page */ 264 gfp_t gfp_mask; /* mask for allocating pages */ 265 int safe_needed; /* if set, only "safe" pages are allocated */ 266 }; 267 268 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask, 269 int safe_needed) 270 { 271 ca->chain = NULL; 272 ca->used_space = LINKED_PAGE_DATA_SIZE; 273 ca->gfp_mask = gfp_mask; 274 ca->safe_needed = safe_needed; 275 } 276 277 static void *chain_alloc(struct chain_allocator *ca, unsigned int size) 278 { 279 void *ret; 280 281 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { 282 struct linked_page *lp; 283 284 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) : 285 get_image_page(ca->gfp_mask, PG_ANY); 286 if (!lp) 287 return NULL; 288 289 lp->next = ca->chain; 290 ca->chain = lp; 291 ca->used_space = 0; 292 } 293 ret = ca->chain->data + ca->used_space; 294 ca->used_space += size; 295 return ret; 296 } 297 298 /** 299 * Data types related to memory bitmaps. 300 * 301 * Memory bitmap is a structure consiting of many linked lists of 302 * objects. The main list's elements are of type struct zone_bitmap 303 * and each of them corresonds to one zone. For each zone bitmap 304 * object there is a list of objects of type struct bm_block that 305 * represent each blocks of bitmap in which information is stored. 306 * 307 * struct memory_bitmap contains a pointer to the main list of zone 308 * bitmap objects, a struct bm_position used for browsing the bitmap, 309 * and a pointer to the list of pages used for allocating all of the 310 * zone bitmap objects and bitmap block objects. 311 * 312 * NOTE: It has to be possible to lay out the bitmap in memory 313 * using only allocations of order 0. Additionally, the bitmap is 314 * designed to work with arbitrary number of zones (this is over the 315 * top for now, but let's avoid making unnecessary assumptions ;-). 316 * 317 * struct zone_bitmap contains a pointer to a list of bitmap block 318 * objects and a pointer to the bitmap block object that has been 319 * most recently used for setting bits. Additionally, it contains the 320 * PFNs that correspond to the start and end of the represented zone. 321 * 322 * struct bm_block contains a pointer to the memory page in which 323 * information is stored (in the form of a block of bitmap) 324 * It also contains the pfns that correspond to the start and end of 325 * the represented memory area. 326 * 327 * The memory bitmap is organized as a radix tree to guarantee fast random 328 * access to the bits. There is one radix tree for each zone (as returned 329 * from create_mem_extents). 330 * 331 * One radix tree is represented by one struct mem_zone_bm_rtree. There are 332 * two linked lists for the nodes of the tree, one for the inner nodes and 333 * one for the leave nodes. The linked leave nodes are used for fast linear 334 * access of the memory bitmap. 335 * 336 * The struct rtree_node represents one node of the radix tree. 337 */ 338 339 #define BM_END_OF_MAP (~0UL) 340 341 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE) 342 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3) 343 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1) 344 345 /* 346 * struct rtree_node is a wrapper struct to link the nodes 347 * of the rtree together for easy linear iteration over 348 * bits and easy freeing 349 */ 350 struct rtree_node { 351 struct list_head list; 352 unsigned long *data; 353 }; 354 355 /* 356 * struct mem_zone_bm_rtree represents a bitmap used for one 357 * populated memory zone. 358 */ 359 struct mem_zone_bm_rtree { 360 struct list_head list; /* Link Zones together */ 361 struct list_head nodes; /* Radix Tree inner nodes */ 362 struct list_head leaves; /* Radix Tree leaves */ 363 unsigned long start_pfn; /* Zone start page frame */ 364 unsigned long end_pfn; /* Zone end page frame + 1 */ 365 struct rtree_node *rtree; /* Radix Tree Root */ 366 int levels; /* Number of Radix Tree Levels */ 367 unsigned int blocks; /* Number of Bitmap Blocks */ 368 }; 369 370 /* strcut bm_position is used for browsing memory bitmaps */ 371 372 struct bm_position { 373 struct mem_zone_bm_rtree *zone; 374 struct rtree_node *node; 375 unsigned long node_pfn; 376 int node_bit; 377 }; 378 379 struct memory_bitmap { 380 struct list_head zones; 381 struct linked_page *p_list; /* list of pages used to store zone 382 bitmap objects and bitmap block 383 objects */ 384 struct bm_position cur; /* most recently used bit position */ 385 }; 386 387 /* Functions that operate on memory bitmaps */ 388 389 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long)) 390 #if BITS_PER_LONG == 32 391 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2) 392 #else 393 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3) 394 #endif 395 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1) 396 397 /** 398 * alloc_rtree_node - Allocate a new node and add it to the radix tree. 399 * 400 * This function is used to allocate inner nodes as well as the 401 * leave nodes of the radix tree. It also adds the node to the 402 * corresponding linked list passed in by the *list parameter. 403 */ 404 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, 405 struct chain_allocator *ca, 406 struct list_head *list) 407 { 408 struct rtree_node *node; 409 410 node = chain_alloc(ca, sizeof(struct rtree_node)); 411 if (!node) 412 return NULL; 413 414 node->data = get_image_page(gfp_mask, safe_needed); 415 if (!node->data) 416 return NULL; 417 418 list_add_tail(&node->list, list); 419 420 return node; 421 } 422 423 /** 424 * add_rtree_block - Add a new leave node to the radix tree. 425 * 426 * The leave nodes need to be allocated in order to keep the leaves 427 * linked list in order. This is guaranteed by the zone->blocks 428 * counter. 429 */ 430 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, 431 int safe_needed, struct chain_allocator *ca) 432 { 433 struct rtree_node *node, *block, **dst; 434 unsigned int levels_needed, block_nr; 435 int i; 436 437 block_nr = zone->blocks; 438 levels_needed = 0; 439 440 /* How many levels do we need for this block nr? */ 441 while (block_nr) { 442 levels_needed += 1; 443 block_nr >>= BM_RTREE_LEVEL_SHIFT; 444 } 445 446 /* Make sure the rtree has enough levels */ 447 for (i = zone->levels; i < levels_needed; i++) { 448 node = alloc_rtree_node(gfp_mask, safe_needed, ca, 449 &zone->nodes); 450 if (!node) 451 return -ENOMEM; 452 453 node->data[0] = (unsigned long)zone->rtree; 454 zone->rtree = node; 455 zone->levels += 1; 456 } 457 458 /* Allocate new block */ 459 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); 460 if (!block) 461 return -ENOMEM; 462 463 /* Now walk the rtree to insert the block */ 464 node = zone->rtree; 465 dst = &zone->rtree; 466 block_nr = zone->blocks; 467 for (i = zone->levels; i > 0; i--) { 468 int index; 469 470 if (!node) { 471 node = alloc_rtree_node(gfp_mask, safe_needed, ca, 472 &zone->nodes); 473 if (!node) 474 return -ENOMEM; 475 *dst = node; 476 } 477 478 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); 479 index &= BM_RTREE_LEVEL_MASK; 480 dst = (struct rtree_node **)&((*dst)->data[index]); 481 node = *dst; 482 } 483 484 zone->blocks += 1; 485 *dst = block; 486 487 return 0; 488 } 489 490 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 491 int clear_nosave_free); 492 493 /** 494 * create_zone_bm_rtree - Create a radix tree for one zone. 495 * 496 * Allocated the mem_zone_bm_rtree structure and initializes it. 497 * This function also allocated and builds the radix tree for the 498 * zone. 499 */ 500 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask, 501 int safe_needed, 502 struct chain_allocator *ca, 503 unsigned long start, 504 unsigned long end) 505 { 506 struct mem_zone_bm_rtree *zone; 507 unsigned int i, nr_blocks; 508 unsigned long pages; 509 510 pages = end - start; 511 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree)); 512 if (!zone) 513 return NULL; 514 515 INIT_LIST_HEAD(&zone->nodes); 516 INIT_LIST_HEAD(&zone->leaves); 517 zone->start_pfn = start; 518 zone->end_pfn = end; 519 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); 520 521 for (i = 0; i < nr_blocks; i++) { 522 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) { 523 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR); 524 return NULL; 525 } 526 } 527 528 return zone; 529 } 530 531 /** 532 * free_zone_bm_rtree - Free the memory of the radix tree. 533 * 534 * Free all node pages of the radix tree. The mem_zone_bm_rtree 535 * structure itself is not freed here nor are the rtree_node 536 * structs. 537 */ 538 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 539 int clear_nosave_free) 540 { 541 struct rtree_node *node; 542 543 list_for_each_entry(node, &zone->nodes, list) 544 free_image_page(node->data, clear_nosave_free); 545 546 list_for_each_entry(node, &zone->leaves, list) 547 free_image_page(node->data, clear_nosave_free); 548 } 549 550 static void memory_bm_position_reset(struct memory_bitmap *bm) 551 { 552 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, 553 list); 554 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 555 struct rtree_node, list); 556 bm->cur.node_pfn = 0; 557 bm->cur.node_bit = 0; 558 } 559 560 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 561 562 struct mem_extent { 563 struct list_head hook; 564 unsigned long start; 565 unsigned long end; 566 }; 567 568 /** 569 * free_mem_extents - Free a list of memory extents. 570 * @list: List of extents to free. 571 */ 572 static void free_mem_extents(struct list_head *list) 573 { 574 struct mem_extent *ext, *aux; 575 576 list_for_each_entry_safe(ext, aux, list, hook) { 577 list_del(&ext->hook); 578 kfree(ext); 579 } 580 } 581 582 /** 583 * create_mem_extents - Create a list of memory extents. 584 * @list: List to put the extents into. 585 * @gfp_mask: Mask to use for memory allocations. 586 * 587 * The extents represent contiguous ranges of PFNs. 588 */ 589 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) 590 { 591 struct zone *zone; 592 593 INIT_LIST_HEAD(list); 594 595 for_each_populated_zone(zone) { 596 unsigned long zone_start, zone_end; 597 struct mem_extent *ext, *cur, *aux; 598 599 zone_start = zone->zone_start_pfn; 600 zone_end = zone_end_pfn(zone); 601 602 list_for_each_entry(ext, list, hook) 603 if (zone_start <= ext->end) 604 break; 605 606 if (&ext->hook == list || zone_end < ext->start) { 607 /* New extent is necessary */ 608 struct mem_extent *new_ext; 609 610 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); 611 if (!new_ext) { 612 free_mem_extents(list); 613 return -ENOMEM; 614 } 615 new_ext->start = zone_start; 616 new_ext->end = zone_end; 617 list_add_tail(&new_ext->hook, &ext->hook); 618 continue; 619 } 620 621 /* Merge this zone's range of PFNs with the existing one */ 622 if (zone_start < ext->start) 623 ext->start = zone_start; 624 if (zone_end > ext->end) 625 ext->end = zone_end; 626 627 /* More merging may be possible */ 628 cur = ext; 629 list_for_each_entry_safe_continue(cur, aux, list, hook) { 630 if (zone_end < cur->start) 631 break; 632 if (zone_end < cur->end) 633 ext->end = cur->end; 634 list_del(&cur->hook); 635 kfree(cur); 636 } 637 } 638 639 return 0; 640 } 641 642 /** 643 * memory_bm_create - Allocate memory for a memory bitmap. 644 */ 645 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, 646 int safe_needed) 647 { 648 struct chain_allocator ca; 649 struct list_head mem_extents; 650 struct mem_extent *ext; 651 int error; 652 653 chain_init(&ca, gfp_mask, safe_needed); 654 INIT_LIST_HEAD(&bm->zones); 655 656 error = create_mem_extents(&mem_extents, gfp_mask); 657 if (error) 658 return error; 659 660 list_for_each_entry(ext, &mem_extents, hook) { 661 struct mem_zone_bm_rtree *zone; 662 663 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, 664 ext->start, ext->end); 665 if (!zone) { 666 error = -ENOMEM; 667 goto Error; 668 } 669 list_add_tail(&zone->list, &bm->zones); 670 } 671 672 bm->p_list = ca.chain; 673 memory_bm_position_reset(bm); 674 Exit: 675 free_mem_extents(&mem_extents); 676 return error; 677 678 Error: 679 bm->p_list = ca.chain; 680 memory_bm_free(bm, PG_UNSAFE_CLEAR); 681 goto Exit; 682 } 683 684 /** 685 * memory_bm_free - Free memory occupied by the memory bitmap. 686 * @bm: Memory bitmap. 687 */ 688 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 689 { 690 struct mem_zone_bm_rtree *zone; 691 692 list_for_each_entry(zone, &bm->zones, list) 693 free_zone_bm_rtree(zone, clear_nosave_free); 694 695 free_list_of_pages(bm->p_list, clear_nosave_free); 696 697 INIT_LIST_HEAD(&bm->zones); 698 } 699 700 /** 701 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap. 702 * 703 * Find the bit in memory bitmap @bm that corresponds to the given PFN. 704 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated. 705 * 706 * Walk the radix tree to find the page containing the bit that represents @pfn 707 * and return the position of the bit in @addr and @bit_nr. 708 */ 709 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 710 void **addr, unsigned int *bit_nr) 711 { 712 struct mem_zone_bm_rtree *curr, *zone; 713 struct rtree_node *node; 714 int i, block_nr; 715 716 zone = bm->cur.zone; 717 718 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) 719 goto zone_found; 720 721 zone = NULL; 722 723 /* Find the right zone */ 724 list_for_each_entry(curr, &bm->zones, list) { 725 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { 726 zone = curr; 727 break; 728 } 729 } 730 731 if (!zone) 732 return -EFAULT; 733 734 zone_found: 735 /* 736 * We have found the zone. Now walk the radix tree to find the leaf node 737 * for our PFN. 738 */ 739 node = bm->cur.node; 740 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) 741 goto node_found; 742 743 node = zone->rtree; 744 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; 745 746 for (i = zone->levels; i > 0; i--) { 747 int index; 748 749 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); 750 index &= BM_RTREE_LEVEL_MASK; 751 BUG_ON(node->data[index] == 0); 752 node = (struct rtree_node *)node->data[index]; 753 } 754 755 node_found: 756 /* Update last position */ 757 bm->cur.zone = zone; 758 bm->cur.node = node; 759 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; 760 761 /* Set return values */ 762 *addr = node->data; 763 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; 764 765 return 0; 766 } 767 768 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) 769 { 770 void *addr; 771 unsigned int bit; 772 int error; 773 774 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 775 BUG_ON(error); 776 set_bit(bit, addr); 777 } 778 779 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) 780 { 781 void *addr; 782 unsigned int bit; 783 int error; 784 785 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 786 if (!error) 787 set_bit(bit, addr); 788 789 return error; 790 } 791 792 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) 793 { 794 void *addr; 795 unsigned int bit; 796 int error; 797 798 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 799 BUG_ON(error); 800 clear_bit(bit, addr); 801 } 802 803 static void memory_bm_clear_current(struct memory_bitmap *bm) 804 { 805 int bit; 806 807 bit = max(bm->cur.node_bit - 1, 0); 808 clear_bit(bit, bm->cur.node->data); 809 } 810 811 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 812 { 813 void *addr; 814 unsigned int bit; 815 int error; 816 817 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 818 BUG_ON(error); 819 return test_bit(bit, addr); 820 } 821 822 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) 823 { 824 void *addr; 825 unsigned int bit; 826 827 return !memory_bm_find_bit(bm, pfn, &addr, &bit); 828 } 829 830 /* 831 * rtree_next_node - Jump to the next leaf node. 832 * 833 * Set the position to the beginning of the next node in the 834 * memory bitmap. This is either the next node in the current 835 * zone's radix tree or the first node in the radix tree of the 836 * next zone. 837 * 838 * Return true if there is a next node, false otherwise. 839 */ 840 static bool rtree_next_node(struct memory_bitmap *bm) 841 { 842 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { 843 bm->cur.node = list_entry(bm->cur.node->list.next, 844 struct rtree_node, list); 845 bm->cur.node_pfn += BM_BITS_PER_BLOCK; 846 bm->cur.node_bit = 0; 847 touch_softlockup_watchdog(); 848 return true; 849 } 850 851 /* No more nodes, goto next zone */ 852 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) { 853 bm->cur.zone = list_entry(bm->cur.zone->list.next, 854 struct mem_zone_bm_rtree, list); 855 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 856 struct rtree_node, list); 857 bm->cur.node_pfn = 0; 858 bm->cur.node_bit = 0; 859 return true; 860 } 861 862 /* No more zones */ 863 return false; 864 } 865 866 /** 867 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap. 868 * @bm: Memory bitmap. 869 * 870 * Starting from the last returned position this function searches for the next 871 * set bit in @bm and returns the PFN represented by it. If no more bits are 872 * set, BM_END_OF_MAP is returned. 873 * 874 * It is required to run memory_bm_position_reset() before the first call to 875 * this function for the given memory bitmap. 876 */ 877 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 878 { 879 unsigned long bits, pfn, pages; 880 int bit; 881 882 do { 883 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; 884 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); 885 bit = find_next_bit(bm->cur.node->data, bits, 886 bm->cur.node_bit); 887 if (bit < bits) { 888 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; 889 bm->cur.node_bit = bit + 1; 890 return pfn; 891 } 892 } while (rtree_next_node(bm)); 893 894 return BM_END_OF_MAP; 895 } 896 897 /* 898 * This structure represents a range of page frames the contents of which 899 * should not be saved during hibernation. 900 */ 901 struct nosave_region { 902 struct list_head list; 903 unsigned long start_pfn; 904 unsigned long end_pfn; 905 }; 906 907 static LIST_HEAD(nosave_regions); 908 909 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone) 910 { 911 struct rtree_node *node; 912 913 list_for_each_entry(node, &zone->nodes, list) 914 recycle_safe_page(node->data); 915 916 list_for_each_entry(node, &zone->leaves, list) 917 recycle_safe_page(node->data); 918 } 919 920 static void memory_bm_recycle(struct memory_bitmap *bm) 921 { 922 struct mem_zone_bm_rtree *zone; 923 struct linked_page *p_list; 924 925 list_for_each_entry(zone, &bm->zones, list) 926 recycle_zone_bm_rtree(zone); 927 928 p_list = bm->p_list; 929 while (p_list) { 930 struct linked_page *lp = p_list; 931 932 p_list = lp->next; 933 recycle_safe_page(lp); 934 } 935 } 936 937 /** 938 * register_nosave_region - Register a region of unsaveable memory. 939 * 940 * Register a range of page frames the contents of which should not be saved 941 * during hibernation (to be used in the early initialization code). 942 */ 943 void __init __register_nosave_region(unsigned long start_pfn, 944 unsigned long end_pfn, int use_kmalloc) 945 { 946 struct nosave_region *region; 947 948 if (start_pfn >= end_pfn) 949 return; 950 951 if (!list_empty(&nosave_regions)) { 952 /* Try to extend the previous region (they should be sorted) */ 953 region = list_entry(nosave_regions.prev, 954 struct nosave_region, list); 955 if (region->end_pfn == start_pfn) { 956 region->end_pfn = end_pfn; 957 goto Report; 958 } 959 } 960 if (use_kmalloc) { 961 /* During init, this shouldn't fail */ 962 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); 963 BUG_ON(!region); 964 } else { 965 /* This allocation cannot fail */ 966 region = memblock_alloc(sizeof(struct nosave_region), 967 SMP_CACHE_BYTES); 968 } 969 region->start_pfn = start_pfn; 970 region->end_pfn = end_pfn; 971 list_add_tail(®ion->list, &nosave_regions); 972 Report: 973 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n", 974 (unsigned long long) start_pfn << PAGE_SHIFT, 975 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 976 } 977 978 /* 979 * Set bits in this map correspond to the page frames the contents of which 980 * should not be saved during the suspend. 981 */ 982 static struct memory_bitmap *forbidden_pages_map; 983 984 /* Set bits in this map correspond to free page frames. */ 985 static struct memory_bitmap *free_pages_map; 986 987 /* 988 * Each page frame allocated for creating the image is marked by setting the 989 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously 990 */ 991 992 void swsusp_set_page_free(struct page *page) 993 { 994 if (free_pages_map) 995 memory_bm_set_bit(free_pages_map, page_to_pfn(page)); 996 } 997 998 static int swsusp_page_is_free(struct page *page) 999 { 1000 return free_pages_map ? 1001 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0; 1002 } 1003 1004 void swsusp_unset_page_free(struct page *page) 1005 { 1006 if (free_pages_map) 1007 memory_bm_clear_bit(free_pages_map, page_to_pfn(page)); 1008 } 1009 1010 static void swsusp_set_page_forbidden(struct page *page) 1011 { 1012 if (forbidden_pages_map) 1013 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page)); 1014 } 1015 1016 int swsusp_page_is_forbidden(struct page *page) 1017 { 1018 return forbidden_pages_map ? 1019 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0; 1020 } 1021 1022 static void swsusp_unset_page_forbidden(struct page *page) 1023 { 1024 if (forbidden_pages_map) 1025 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page)); 1026 } 1027 1028 /** 1029 * mark_nosave_pages - Mark pages that should not be saved. 1030 * @bm: Memory bitmap. 1031 * 1032 * Set the bits in @bm that correspond to the page frames the contents of which 1033 * should not be saved. 1034 */ 1035 static void mark_nosave_pages(struct memory_bitmap *bm) 1036 { 1037 struct nosave_region *region; 1038 1039 if (list_empty(&nosave_regions)) 1040 return; 1041 1042 list_for_each_entry(region, &nosave_regions, list) { 1043 unsigned long pfn; 1044 1045 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n", 1046 (unsigned long long) region->start_pfn << PAGE_SHIFT, 1047 ((unsigned long long) region->end_pfn << PAGE_SHIFT) 1048 - 1); 1049 1050 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) 1051 if (pfn_valid(pfn)) { 1052 /* 1053 * It is safe to ignore the result of 1054 * mem_bm_set_bit_check() here, since we won't 1055 * touch the PFNs for which the error is 1056 * returned anyway. 1057 */ 1058 mem_bm_set_bit_check(bm, pfn); 1059 } 1060 } 1061 } 1062 1063 /** 1064 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information. 1065 * 1066 * Create bitmaps needed for marking page frames that should not be saved and 1067 * free page frames. The forbidden_pages_map and free_pages_map pointers are 1068 * only modified if everything goes well, because we don't want the bits to be 1069 * touched before both bitmaps are set up. 1070 */ 1071 int create_basic_memory_bitmaps(void) 1072 { 1073 struct memory_bitmap *bm1, *bm2; 1074 int error = 0; 1075 1076 if (forbidden_pages_map && free_pages_map) 1077 return 0; 1078 else 1079 BUG_ON(forbidden_pages_map || free_pages_map); 1080 1081 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 1082 if (!bm1) 1083 return -ENOMEM; 1084 1085 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY); 1086 if (error) 1087 goto Free_first_object; 1088 1089 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 1090 if (!bm2) 1091 goto Free_first_bitmap; 1092 1093 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY); 1094 if (error) 1095 goto Free_second_object; 1096 1097 forbidden_pages_map = bm1; 1098 free_pages_map = bm2; 1099 mark_nosave_pages(forbidden_pages_map); 1100 1101 pr_debug("Basic memory bitmaps created\n"); 1102 1103 return 0; 1104 1105 Free_second_object: 1106 kfree(bm2); 1107 Free_first_bitmap: 1108 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 1109 Free_first_object: 1110 kfree(bm1); 1111 return -ENOMEM; 1112 } 1113 1114 /** 1115 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information. 1116 * 1117 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The 1118 * auxiliary pointers are necessary so that the bitmaps themselves are not 1119 * referred to while they are being freed. 1120 */ 1121 void free_basic_memory_bitmaps(void) 1122 { 1123 struct memory_bitmap *bm1, *bm2; 1124 1125 if (WARN_ON(!(forbidden_pages_map && free_pages_map))) 1126 return; 1127 1128 bm1 = forbidden_pages_map; 1129 bm2 = free_pages_map; 1130 forbidden_pages_map = NULL; 1131 free_pages_map = NULL; 1132 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 1133 kfree(bm1); 1134 memory_bm_free(bm2, PG_UNSAFE_CLEAR); 1135 kfree(bm2); 1136 1137 pr_debug("Basic memory bitmaps freed\n"); 1138 } 1139 1140 void clear_free_pages(void) 1141 { 1142 #ifdef CONFIG_PAGE_POISONING_ZERO 1143 struct memory_bitmap *bm = free_pages_map; 1144 unsigned long pfn; 1145 1146 if (WARN_ON(!(free_pages_map))) 1147 return; 1148 1149 memory_bm_position_reset(bm); 1150 pfn = memory_bm_next_pfn(bm); 1151 while (pfn != BM_END_OF_MAP) { 1152 if (pfn_valid(pfn)) 1153 clear_highpage(pfn_to_page(pfn)); 1154 1155 pfn = memory_bm_next_pfn(bm); 1156 } 1157 memory_bm_position_reset(bm); 1158 pr_info("free pages cleared after restore\n"); 1159 #endif /* PAGE_POISONING_ZERO */ 1160 } 1161 1162 /** 1163 * snapshot_additional_pages - Estimate the number of extra pages needed. 1164 * @zone: Memory zone to carry out the computation for. 1165 * 1166 * Estimate the number of additional pages needed for setting up a hibernation 1167 * image data structures for @zone (usually, the returned value is greater than 1168 * the exact number). 1169 */ 1170 unsigned int snapshot_additional_pages(struct zone *zone) 1171 { 1172 unsigned int rtree, nodes; 1173 1174 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 1175 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node), 1176 LINKED_PAGE_DATA_SIZE); 1177 while (nodes > 1) { 1178 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL); 1179 rtree += nodes; 1180 } 1181 1182 return 2 * rtree; 1183 } 1184 1185 #ifdef CONFIG_HIGHMEM 1186 /** 1187 * count_free_highmem_pages - Compute the total number of free highmem pages. 1188 * 1189 * The returned number is system-wide. 1190 */ 1191 static unsigned int count_free_highmem_pages(void) 1192 { 1193 struct zone *zone; 1194 unsigned int cnt = 0; 1195 1196 for_each_populated_zone(zone) 1197 if (is_highmem(zone)) 1198 cnt += zone_page_state(zone, NR_FREE_PAGES); 1199 1200 return cnt; 1201 } 1202 1203 /** 1204 * saveable_highmem_page - Check if a highmem page is saveable. 1205 * 1206 * Determine whether a highmem page should be included in a hibernation image. 1207 * 1208 * We should save the page if it isn't Nosave or NosaveFree, or Reserved, 1209 * and it isn't part of a free chunk of pages. 1210 */ 1211 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) 1212 { 1213 struct page *page; 1214 1215 if (!pfn_valid(pfn)) 1216 return NULL; 1217 1218 page = pfn_to_page(pfn); 1219 if (page_zone(page) != zone) 1220 return NULL; 1221 1222 BUG_ON(!PageHighMem(page)); 1223 1224 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) || 1225 PageReserved(page)) 1226 return NULL; 1227 1228 if (page_is_guard(page)) 1229 return NULL; 1230 1231 return page; 1232 } 1233 1234 /** 1235 * count_highmem_pages - Compute the total number of saveable highmem pages. 1236 */ 1237 static unsigned int count_highmem_pages(void) 1238 { 1239 struct zone *zone; 1240 unsigned int n = 0; 1241 1242 for_each_populated_zone(zone) { 1243 unsigned long pfn, max_zone_pfn; 1244 1245 if (!is_highmem(zone)) 1246 continue; 1247 1248 mark_free_pages(zone); 1249 max_zone_pfn = zone_end_pfn(zone); 1250 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1251 if (saveable_highmem_page(zone, pfn)) 1252 n++; 1253 } 1254 return n; 1255 } 1256 #else 1257 static inline void *saveable_highmem_page(struct zone *z, unsigned long p) 1258 { 1259 return NULL; 1260 } 1261 #endif /* CONFIG_HIGHMEM */ 1262 1263 /** 1264 * saveable_page - Check if the given page is saveable. 1265 * 1266 * Determine whether a non-highmem page should be included in a hibernation 1267 * image. 1268 * 1269 * We should save the page if it isn't Nosave, and is not in the range 1270 * of pages statically defined as 'unsaveable', and it isn't part of 1271 * a free chunk of pages. 1272 */ 1273 static struct page *saveable_page(struct zone *zone, unsigned long pfn) 1274 { 1275 struct page *page; 1276 1277 if (!pfn_valid(pfn)) 1278 return NULL; 1279 1280 page = pfn_to_page(pfn); 1281 if (page_zone(page) != zone) 1282 return NULL; 1283 1284 BUG_ON(PageHighMem(page)); 1285 1286 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) 1287 return NULL; 1288 1289 if (PageReserved(page) 1290 && (!kernel_page_present(page) || pfn_is_nosave(pfn))) 1291 return NULL; 1292 1293 if (page_is_guard(page)) 1294 return NULL; 1295 1296 return page; 1297 } 1298 1299 /** 1300 * count_data_pages - Compute the total number of saveable non-highmem pages. 1301 */ 1302 static unsigned int count_data_pages(void) 1303 { 1304 struct zone *zone; 1305 unsigned long pfn, max_zone_pfn; 1306 unsigned int n = 0; 1307 1308 for_each_populated_zone(zone) { 1309 if (is_highmem(zone)) 1310 continue; 1311 1312 mark_free_pages(zone); 1313 max_zone_pfn = zone_end_pfn(zone); 1314 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1315 if (saveable_page(zone, pfn)) 1316 n++; 1317 } 1318 return n; 1319 } 1320 1321 /* 1322 * This is needed, because copy_page and memcpy are not usable for copying 1323 * task structs. 1324 */ 1325 static inline void do_copy_page(long *dst, long *src) 1326 { 1327 int n; 1328 1329 for (n = PAGE_SIZE / sizeof(long); n; n--) 1330 *dst++ = *src++; 1331 } 1332 1333 /** 1334 * safe_copy_page - Copy a page in a safe way. 1335 * 1336 * Check if the page we are going to copy is marked as present in the kernel 1337 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set 1338 * and in that case kernel_page_present() always returns 'true'). 1339 */ 1340 static void safe_copy_page(void *dst, struct page *s_page) 1341 { 1342 if (kernel_page_present(s_page)) { 1343 do_copy_page(dst, page_address(s_page)); 1344 } else { 1345 kernel_map_pages(s_page, 1, 1); 1346 do_copy_page(dst, page_address(s_page)); 1347 kernel_map_pages(s_page, 1, 0); 1348 } 1349 } 1350 1351 #ifdef CONFIG_HIGHMEM 1352 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn) 1353 { 1354 return is_highmem(zone) ? 1355 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); 1356 } 1357 1358 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1359 { 1360 struct page *s_page, *d_page; 1361 void *src, *dst; 1362 1363 s_page = pfn_to_page(src_pfn); 1364 d_page = pfn_to_page(dst_pfn); 1365 if (PageHighMem(s_page)) { 1366 src = kmap_atomic(s_page); 1367 dst = kmap_atomic(d_page); 1368 do_copy_page(dst, src); 1369 kunmap_atomic(dst); 1370 kunmap_atomic(src); 1371 } else { 1372 if (PageHighMem(d_page)) { 1373 /* 1374 * The page pointed to by src may contain some kernel 1375 * data modified by kmap_atomic() 1376 */ 1377 safe_copy_page(buffer, s_page); 1378 dst = kmap_atomic(d_page); 1379 copy_page(dst, buffer); 1380 kunmap_atomic(dst); 1381 } else { 1382 safe_copy_page(page_address(d_page), s_page); 1383 } 1384 } 1385 } 1386 #else 1387 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn) 1388 1389 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1390 { 1391 safe_copy_page(page_address(pfn_to_page(dst_pfn)), 1392 pfn_to_page(src_pfn)); 1393 } 1394 #endif /* CONFIG_HIGHMEM */ 1395 1396 static void copy_data_pages(struct memory_bitmap *copy_bm, 1397 struct memory_bitmap *orig_bm) 1398 { 1399 struct zone *zone; 1400 unsigned long pfn; 1401 1402 for_each_populated_zone(zone) { 1403 unsigned long max_zone_pfn; 1404 1405 mark_free_pages(zone); 1406 max_zone_pfn = zone_end_pfn(zone); 1407 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1408 if (page_is_saveable(zone, pfn)) 1409 memory_bm_set_bit(orig_bm, pfn); 1410 } 1411 memory_bm_position_reset(orig_bm); 1412 memory_bm_position_reset(copy_bm); 1413 for(;;) { 1414 pfn = memory_bm_next_pfn(orig_bm); 1415 if (unlikely(pfn == BM_END_OF_MAP)) 1416 break; 1417 copy_data_page(memory_bm_next_pfn(copy_bm), pfn); 1418 } 1419 } 1420 1421 /* Total number of image pages */ 1422 static unsigned int nr_copy_pages; 1423 /* Number of pages needed for saving the original pfns of the image pages */ 1424 static unsigned int nr_meta_pages; 1425 /* 1426 * Numbers of normal and highmem page frames allocated for hibernation image 1427 * before suspending devices. 1428 */ 1429 static unsigned int alloc_normal, alloc_highmem; 1430 /* 1431 * Memory bitmap used for marking saveable pages (during hibernation) or 1432 * hibernation image pages (during restore) 1433 */ 1434 static struct memory_bitmap orig_bm; 1435 /* 1436 * Memory bitmap used during hibernation for marking allocated page frames that 1437 * will contain copies of saveable pages. During restore it is initially used 1438 * for marking hibernation image pages, but then the set bits from it are 1439 * duplicated in @orig_bm and it is released. On highmem systems it is next 1440 * used for marking "safe" highmem pages, but it has to be reinitialized for 1441 * this purpose. 1442 */ 1443 static struct memory_bitmap copy_bm; 1444 1445 /** 1446 * swsusp_free - Free pages allocated for hibernation image. 1447 * 1448 * Image pages are alocated before snapshot creation, so they need to be 1449 * released after resume. 1450 */ 1451 void swsusp_free(void) 1452 { 1453 unsigned long fb_pfn, fr_pfn; 1454 1455 if (!forbidden_pages_map || !free_pages_map) 1456 goto out; 1457 1458 memory_bm_position_reset(forbidden_pages_map); 1459 memory_bm_position_reset(free_pages_map); 1460 1461 loop: 1462 fr_pfn = memory_bm_next_pfn(free_pages_map); 1463 fb_pfn = memory_bm_next_pfn(forbidden_pages_map); 1464 1465 /* 1466 * Find the next bit set in both bitmaps. This is guaranteed to 1467 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP. 1468 */ 1469 do { 1470 if (fb_pfn < fr_pfn) 1471 fb_pfn = memory_bm_next_pfn(forbidden_pages_map); 1472 if (fr_pfn < fb_pfn) 1473 fr_pfn = memory_bm_next_pfn(free_pages_map); 1474 } while (fb_pfn != fr_pfn); 1475 1476 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) { 1477 struct page *page = pfn_to_page(fr_pfn); 1478 1479 memory_bm_clear_current(forbidden_pages_map); 1480 memory_bm_clear_current(free_pages_map); 1481 hibernate_restore_unprotect_page(page_address(page)); 1482 __free_page(page); 1483 goto loop; 1484 } 1485 1486 out: 1487 nr_copy_pages = 0; 1488 nr_meta_pages = 0; 1489 restore_pblist = NULL; 1490 buffer = NULL; 1491 alloc_normal = 0; 1492 alloc_highmem = 0; 1493 hibernate_restore_protection_end(); 1494 } 1495 1496 /* Helper functions used for the shrinking of memory. */ 1497 1498 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN) 1499 1500 /** 1501 * preallocate_image_pages - Allocate a number of pages for hibernation image. 1502 * @nr_pages: Number of page frames to allocate. 1503 * @mask: GFP flags to use for the allocation. 1504 * 1505 * Return value: Number of page frames actually allocated 1506 */ 1507 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) 1508 { 1509 unsigned long nr_alloc = 0; 1510 1511 while (nr_pages > 0) { 1512 struct page *page; 1513 1514 page = alloc_image_page(mask); 1515 if (!page) 1516 break; 1517 memory_bm_set_bit(©_bm, page_to_pfn(page)); 1518 if (PageHighMem(page)) 1519 alloc_highmem++; 1520 else 1521 alloc_normal++; 1522 nr_pages--; 1523 nr_alloc++; 1524 } 1525 1526 return nr_alloc; 1527 } 1528 1529 static unsigned long preallocate_image_memory(unsigned long nr_pages, 1530 unsigned long avail_normal) 1531 { 1532 unsigned long alloc; 1533 1534 if (avail_normal <= alloc_normal) 1535 return 0; 1536 1537 alloc = avail_normal - alloc_normal; 1538 if (nr_pages < alloc) 1539 alloc = nr_pages; 1540 1541 return preallocate_image_pages(alloc, GFP_IMAGE); 1542 } 1543 1544 #ifdef CONFIG_HIGHMEM 1545 static unsigned long preallocate_image_highmem(unsigned long nr_pages) 1546 { 1547 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); 1548 } 1549 1550 /** 1551 * __fraction - Compute (an approximation of) x * (multiplier / base). 1552 */ 1553 static unsigned long __fraction(u64 x, u64 multiplier, u64 base) 1554 { 1555 x *= multiplier; 1556 do_div(x, base); 1557 return (unsigned long)x; 1558 } 1559 1560 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1561 unsigned long highmem, 1562 unsigned long total) 1563 { 1564 unsigned long alloc = __fraction(nr_pages, highmem, total); 1565 1566 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM); 1567 } 1568 #else /* CONFIG_HIGHMEM */ 1569 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages) 1570 { 1571 return 0; 1572 } 1573 1574 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1575 unsigned long highmem, 1576 unsigned long total) 1577 { 1578 return 0; 1579 } 1580 #endif /* CONFIG_HIGHMEM */ 1581 1582 /** 1583 * free_unnecessary_pages - Release preallocated pages not needed for the image. 1584 */ 1585 static unsigned long free_unnecessary_pages(void) 1586 { 1587 unsigned long save, to_free_normal, to_free_highmem, free; 1588 1589 save = count_data_pages(); 1590 if (alloc_normal >= save) { 1591 to_free_normal = alloc_normal - save; 1592 save = 0; 1593 } else { 1594 to_free_normal = 0; 1595 save -= alloc_normal; 1596 } 1597 save += count_highmem_pages(); 1598 if (alloc_highmem >= save) { 1599 to_free_highmem = alloc_highmem - save; 1600 } else { 1601 to_free_highmem = 0; 1602 save -= alloc_highmem; 1603 if (to_free_normal > save) 1604 to_free_normal -= save; 1605 else 1606 to_free_normal = 0; 1607 } 1608 free = to_free_normal + to_free_highmem; 1609 1610 memory_bm_position_reset(©_bm); 1611 1612 while (to_free_normal > 0 || to_free_highmem > 0) { 1613 unsigned long pfn = memory_bm_next_pfn(©_bm); 1614 struct page *page = pfn_to_page(pfn); 1615 1616 if (PageHighMem(page)) { 1617 if (!to_free_highmem) 1618 continue; 1619 to_free_highmem--; 1620 alloc_highmem--; 1621 } else { 1622 if (!to_free_normal) 1623 continue; 1624 to_free_normal--; 1625 alloc_normal--; 1626 } 1627 memory_bm_clear_bit(©_bm, pfn); 1628 swsusp_unset_page_forbidden(page); 1629 swsusp_unset_page_free(page); 1630 __free_page(page); 1631 } 1632 1633 return free; 1634 } 1635 1636 /** 1637 * minimum_image_size - Estimate the minimum acceptable size of an image. 1638 * @saveable: Number of saveable pages in the system. 1639 * 1640 * We want to avoid attempting to free too much memory too hard, so estimate the 1641 * minimum acceptable size of a hibernation image to use as the lower limit for 1642 * preallocating memory. 1643 * 1644 * We assume that the minimum image size should be proportional to 1645 * 1646 * [number of saveable pages] - [number of pages that can be freed in theory] 1647 * 1648 * where the second term is the sum of (1) reclaimable slab pages, (2) active 1649 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages. 1650 */ 1651 static unsigned long minimum_image_size(unsigned long saveable) 1652 { 1653 unsigned long size; 1654 1655 size = global_node_page_state(NR_SLAB_RECLAIMABLE) 1656 + global_node_page_state(NR_ACTIVE_ANON) 1657 + global_node_page_state(NR_INACTIVE_ANON) 1658 + global_node_page_state(NR_ACTIVE_FILE) 1659 + global_node_page_state(NR_INACTIVE_FILE); 1660 1661 return saveable <= size ? 0 : saveable - size; 1662 } 1663 1664 /** 1665 * hibernate_preallocate_memory - Preallocate memory for hibernation image. 1666 * 1667 * To create a hibernation image it is necessary to make a copy of every page 1668 * frame in use. We also need a number of page frames to be free during 1669 * hibernation for allocations made while saving the image and for device 1670 * drivers, in case they need to allocate memory from their hibernation 1671 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough 1672 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through 1673 * /sys/power/reserved_size, respectively). To make this happen, we compute the 1674 * total number of available page frames and allocate at least 1675 * 1676 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 1677 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) 1678 * 1679 * of them, which corresponds to the maximum size of a hibernation image. 1680 * 1681 * If image_size is set below the number following from the above formula, 1682 * the preallocation of memory is continued until the total number of saveable 1683 * pages in the system is below the requested image size or the minimum 1684 * acceptable image size returned by minimum_image_size(), whichever is greater. 1685 */ 1686 int hibernate_preallocate_memory(void) 1687 { 1688 struct zone *zone; 1689 unsigned long saveable, size, max_size, count, highmem, pages = 0; 1690 unsigned long alloc, save_highmem, pages_highmem, avail_normal; 1691 ktime_t start, stop; 1692 int error; 1693 1694 pr_info("Preallocating image memory... "); 1695 start = ktime_get(); 1696 1697 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY); 1698 if (error) 1699 goto err_out; 1700 1701 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY); 1702 if (error) 1703 goto err_out; 1704 1705 alloc_normal = 0; 1706 alloc_highmem = 0; 1707 1708 /* Count the number of saveable data pages. */ 1709 save_highmem = count_highmem_pages(); 1710 saveable = count_data_pages(); 1711 1712 /* 1713 * Compute the total number of page frames we can use (count) and the 1714 * number of pages needed for image metadata (size). 1715 */ 1716 count = saveable; 1717 saveable += save_highmem; 1718 highmem = save_highmem; 1719 size = 0; 1720 for_each_populated_zone(zone) { 1721 size += snapshot_additional_pages(zone); 1722 if (is_highmem(zone)) 1723 highmem += zone_page_state(zone, NR_FREE_PAGES); 1724 else 1725 count += zone_page_state(zone, NR_FREE_PAGES); 1726 } 1727 avail_normal = count; 1728 count += highmem; 1729 count -= totalreserve_pages; 1730 1731 /* Add number of pages required for page keys (s390 only). */ 1732 size += page_key_additional_pages(saveable); 1733 1734 /* Compute the maximum number of saveable pages to leave in memory. */ 1735 max_size = (count - (size + PAGES_FOR_IO)) / 2 1736 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); 1737 /* Compute the desired number of image pages specified by image_size. */ 1738 size = DIV_ROUND_UP(image_size, PAGE_SIZE); 1739 if (size > max_size) 1740 size = max_size; 1741 /* 1742 * If the desired number of image pages is at least as large as the 1743 * current number of saveable pages in memory, allocate page frames for 1744 * the image and we're done. 1745 */ 1746 if (size >= saveable) { 1747 pages = preallocate_image_highmem(save_highmem); 1748 pages += preallocate_image_memory(saveable - pages, avail_normal); 1749 goto out; 1750 } 1751 1752 /* Estimate the minimum size of the image. */ 1753 pages = minimum_image_size(saveable); 1754 /* 1755 * To avoid excessive pressure on the normal zone, leave room in it to 1756 * accommodate an image of the minimum size (unless it's already too 1757 * small, in which case don't preallocate pages from it at all). 1758 */ 1759 if (avail_normal > pages) 1760 avail_normal -= pages; 1761 else 1762 avail_normal = 0; 1763 if (size < pages) 1764 size = min_t(unsigned long, pages, max_size); 1765 1766 /* 1767 * Let the memory management subsystem know that we're going to need a 1768 * large number of page frames to allocate and make it free some memory. 1769 * NOTE: If this is not done, performance will be hurt badly in some 1770 * test cases. 1771 */ 1772 shrink_all_memory(saveable - size); 1773 1774 /* 1775 * The number of saveable pages in memory was too high, so apply some 1776 * pressure to decrease it. First, make room for the largest possible 1777 * image and fail if that doesn't work. Next, try to decrease the size 1778 * of the image as much as indicated by 'size' using allocations from 1779 * highmem and non-highmem zones separately. 1780 */ 1781 pages_highmem = preallocate_image_highmem(highmem / 2); 1782 alloc = count - max_size; 1783 if (alloc > pages_highmem) 1784 alloc -= pages_highmem; 1785 else 1786 alloc = 0; 1787 pages = preallocate_image_memory(alloc, avail_normal); 1788 if (pages < alloc) { 1789 /* We have exhausted non-highmem pages, try highmem. */ 1790 alloc -= pages; 1791 pages += pages_highmem; 1792 pages_highmem = preallocate_image_highmem(alloc); 1793 if (pages_highmem < alloc) 1794 goto err_out; 1795 pages += pages_highmem; 1796 /* 1797 * size is the desired number of saveable pages to leave in 1798 * memory, so try to preallocate (all memory - size) pages. 1799 */ 1800 alloc = (count - pages) - size; 1801 pages += preallocate_image_highmem(alloc); 1802 } else { 1803 /* 1804 * There are approximately max_size saveable pages at this point 1805 * and we want to reduce this number down to size. 1806 */ 1807 alloc = max_size - size; 1808 size = preallocate_highmem_fraction(alloc, highmem, count); 1809 pages_highmem += size; 1810 alloc -= size; 1811 size = preallocate_image_memory(alloc, avail_normal); 1812 pages_highmem += preallocate_image_highmem(alloc - size); 1813 pages += pages_highmem + size; 1814 } 1815 1816 /* 1817 * We only need as many page frames for the image as there are saveable 1818 * pages in memory, but we have allocated more. Release the excessive 1819 * ones now. 1820 */ 1821 pages -= free_unnecessary_pages(); 1822 1823 out: 1824 stop = ktime_get(); 1825 pr_cont("done (allocated %lu pages)\n", pages); 1826 swsusp_show_speed(start, stop, pages, "Allocated"); 1827 1828 return 0; 1829 1830 err_out: 1831 pr_cont("\n"); 1832 swsusp_free(); 1833 return -ENOMEM; 1834 } 1835 1836 #ifdef CONFIG_HIGHMEM 1837 /** 1838 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem. 1839 * 1840 * Compute the number of non-highmem pages that will be necessary for creating 1841 * copies of highmem pages. 1842 */ 1843 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) 1844 { 1845 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem; 1846 1847 if (free_highmem >= nr_highmem) 1848 nr_highmem = 0; 1849 else 1850 nr_highmem -= free_highmem; 1851 1852 return nr_highmem; 1853 } 1854 #else 1855 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; } 1856 #endif /* CONFIG_HIGHMEM */ 1857 1858 /** 1859 * enough_free_mem - Check if there is enough free memory for the image. 1860 */ 1861 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) 1862 { 1863 struct zone *zone; 1864 unsigned int free = alloc_normal; 1865 1866 for_each_populated_zone(zone) 1867 if (!is_highmem(zone)) 1868 free += zone_page_state(zone, NR_FREE_PAGES); 1869 1870 nr_pages += count_pages_for_highmem(nr_highmem); 1871 pr_debug("Normal pages needed: %u + %u, available pages: %u\n", 1872 nr_pages, PAGES_FOR_IO, free); 1873 1874 return free > nr_pages + PAGES_FOR_IO; 1875 } 1876 1877 #ifdef CONFIG_HIGHMEM 1878 /** 1879 * get_highmem_buffer - Allocate a buffer for highmem pages. 1880 * 1881 * If there are some highmem pages in the hibernation image, we may need a 1882 * buffer to copy them and/or load their data. 1883 */ 1884 static inline int get_highmem_buffer(int safe_needed) 1885 { 1886 buffer = get_image_page(GFP_ATOMIC, safe_needed); 1887 return buffer ? 0 : -ENOMEM; 1888 } 1889 1890 /** 1891 * alloc_highmem_image_pages - Allocate some highmem pages for the image. 1892 * 1893 * Try to allocate as many pages as needed, but if the number of free highmem 1894 * pages is less than that, allocate them all. 1895 */ 1896 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 1897 unsigned int nr_highmem) 1898 { 1899 unsigned int to_alloc = count_free_highmem_pages(); 1900 1901 if (to_alloc > nr_highmem) 1902 to_alloc = nr_highmem; 1903 1904 nr_highmem -= to_alloc; 1905 while (to_alloc-- > 0) { 1906 struct page *page; 1907 1908 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM); 1909 memory_bm_set_bit(bm, page_to_pfn(page)); 1910 } 1911 return nr_highmem; 1912 } 1913 #else 1914 static inline int get_highmem_buffer(int safe_needed) { return 0; } 1915 1916 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 1917 unsigned int n) { return 0; } 1918 #endif /* CONFIG_HIGHMEM */ 1919 1920 /** 1921 * swsusp_alloc - Allocate memory for hibernation image. 1922 * 1923 * We first try to allocate as many highmem pages as there are 1924 * saveable highmem pages in the system. If that fails, we allocate 1925 * non-highmem pages for the copies of the remaining highmem ones. 1926 * 1927 * In this approach it is likely that the copies of highmem pages will 1928 * also be located in the high memory, because of the way in which 1929 * copy_data_pages() works. 1930 */ 1931 static int swsusp_alloc(struct memory_bitmap *copy_bm, 1932 unsigned int nr_pages, unsigned int nr_highmem) 1933 { 1934 if (nr_highmem > 0) { 1935 if (get_highmem_buffer(PG_ANY)) 1936 goto err_out; 1937 if (nr_highmem > alloc_highmem) { 1938 nr_highmem -= alloc_highmem; 1939 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem); 1940 } 1941 } 1942 if (nr_pages > alloc_normal) { 1943 nr_pages -= alloc_normal; 1944 while (nr_pages-- > 0) { 1945 struct page *page; 1946 1947 page = alloc_image_page(GFP_ATOMIC); 1948 if (!page) 1949 goto err_out; 1950 memory_bm_set_bit(copy_bm, page_to_pfn(page)); 1951 } 1952 } 1953 1954 return 0; 1955 1956 err_out: 1957 swsusp_free(); 1958 return -ENOMEM; 1959 } 1960 1961 asmlinkage __visible int swsusp_save(void) 1962 { 1963 unsigned int nr_pages, nr_highmem; 1964 1965 pr_info("Creating hibernation image:\n"); 1966 1967 drain_local_pages(NULL); 1968 nr_pages = count_data_pages(); 1969 nr_highmem = count_highmem_pages(); 1970 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem); 1971 1972 if (!enough_free_mem(nr_pages, nr_highmem)) { 1973 pr_err("Not enough free memory\n"); 1974 return -ENOMEM; 1975 } 1976 1977 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) { 1978 pr_err("Memory allocation failed\n"); 1979 return -ENOMEM; 1980 } 1981 1982 /* 1983 * During allocating of suspend pagedir, new cold pages may appear. 1984 * Kill them. 1985 */ 1986 drain_local_pages(NULL); 1987 copy_data_pages(©_bm, &orig_bm); 1988 1989 /* 1990 * End of critical section. From now on, we can write to memory, 1991 * but we should not touch disk. This specially means we must _not_ 1992 * touch swap space! Except we must write out our image of course. 1993 */ 1994 1995 nr_pages += nr_highmem; 1996 nr_copy_pages = nr_pages; 1997 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); 1998 1999 pr_info("Hibernation image created (%d pages copied)\n", nr_pages); 2000 2001 return 0; 2002 } 2003 2004 #ifndef CONFIG_ARCH_HIBERNATION_HEADER 2005 static int init_header_complete(struct swsusp_info *info) 2006 { 2007 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); 2008 info->version_code = LINUX_VERSION_CODE; 2009 return 0; 2010 } 2011 2012 static char *check_image_kernel(struct swsusp_info *info) 2013 { 2014 if (info->version_code != LINUX_VERSION_CODE) 2015 return "kernel version"; 2016 if (strcmp(info->uts.sysname,init_utsname()->sysname)) 2017 return "system type"; 2018 if (strcmp(info->uts.release,init_utsname()->release)) 2019 return "kernel release"; 2020 if (strcmp(info->uts.version,init_utsname()->version)) 2021 return "version"; 2022 if (strcmp(info->uts.machine,init_utsname()->machine)) 2023 return "machine"; 2024 return NULL; 2025 } 2026 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 2027 2028 unsigned long snapshot_get_image_size(void) 2029 { 2030 return nr_copy_pages + nr_meta_pages + 1; 2031 } 2032 2033 static int init_header(struct swsusp_info *info) 2034 { 2035 memset(info, 0, sizeof(struct swsusp_info)); 2036 info->num_physpages = get_num_physpages(); 2037 info->image_pages = nr_copy_pages; 2038 info->pages = snapshot_get_image_size(); 2039 info->size = info->pages; 2040 info->size <<= PAGE_SHIFT; 2041 return init_header_complete(info); 2042 } 2043 2044 /** 2045 * pack_pfns - Prepare PFNs for saving. 2046 * @bm: Memory bitmap. 2047 * @buf: Memory buffer to store the PFNs in. 2048 * 2049 * PFNs corresponding to set bits in @bm are stored in the area of memory 2050 * pointed to by @buf (1 page at a time). 2051 */ 2052 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm) 2053 { 2054 int j; 2055 2056 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2057 buf[j] = memory_bm_next_pfn(bm); 2058 if (unlikely(buf[j] == BM_END_OF_MAP)) 2059 break; 2060 /* Save page key for data page (s390 only). */ 2061 page_key_read(buf + j); 2062 } 2063 } 2064 2065 /** 2066 * snapshot_read_next - Get the address to read the next image page from. 2067 * @handle: Snapshot handle to be used for the reading. 2068 * 2069 * On the first call, @handle should point to a zeroed snapshot_handle 2070 * structure. The structure gets populated then and a pointer to it should be 2071 * passed to this function every next time. 2072 * 2073 * On success, the function returns a positive number. Then, the caller 2074 * is allowed to read up to the returned number of bytes from the memory 2075 * location computed by the data_of() macro. 2076 * 2077 * The function returns 0 to indicate the end of the data stream condition, 2078 * and negative numbers are returned on errors. If that happens, the structure 2079 * pointed to by @handle is not updated and should not be used any more. 2080 */ 2081 int snapshot_read_next(struct snapshot_handle *handle) 2082 { 2083 if (handle->cur > nr_meta_pages + nr_copy_pages) 2084 return 0; 2085 2086 if (!buffer) { 2087 /* This makes the buffer be freed by swsusp_free() */ 2088 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2089 if (!buffer) 2090 return -ENOMEM; 2091 } 2092 if (!handle->cur) { 2093 int error; 2094 2095 error = init_header((struct swsusp_info *)buffer); 2096 if (error) 2097 return error; 2098 handle->buffer = buffer; 2099 memory_bm_position_reset(&orig_bm); 2100 memory_bm_position_reset(©_bm); 2101 } else if (handle->cur <= nr_meta_pages) { 2102 clear_page(buffer); 2103 pack_pfns(buffer, &orig_bm); 2104 } else { 2105 struct page *page; 2106 2107 page = pfn_to_page(memory_bm_next_pfn(©_bm)); 2108 if (PageHighMem(page)) { 2109 /* 2110 * Highmem pages are copied to the buffer, 2111 * because we can't return with a kmapped 2112 * highmem page (we may not be called again). 2113 */ 2114 void *kaddr; 2115 2116 kaddr = kmap_atomic(page); 2117 copy_page(buffer, kaddr); 2118 kunmap_atomic(kaddr); 2119 handle->buffer = buffer; 2120 } else { 2121 handle->buffer = page_address(page); 2122 } 2123 } 2124 handle->cur++; 2125 return PAGE_SIZE; 2126 } 2127 2128 static void duplicate_memory_bitmap(struct memory_bitmap *dst, 2129 struct memory_bitmap *src) 2130 { 2131 unsigned long pfn; 2132 2133 memory_bm_position_reset(src); 2134 pfn = memory_bm_next_pfn(src); 2135 while (pfn != BM_END_OF_MAP) { 2136 memory_bm_set_bit(dst, pfn); 2137 pfn = memory_bm_next_pfn(src); 2138 } 2139 } 2140 2141 /** 2142 * mark_unsafe_pages - Mark pages that were used before hibernation. 2143 * 2144 * Mark the pages that cannot be used for storing the image during restoration, 2145 * because they conflict with the pages that had been used before hibernation. 2146 */ 2147 static void mark_unsafe_pages(struct memory_bitmap *bm) 2148 { 2149 unsigned long pfn; 2150 2151 /* Clear the "free"/"unsafe" bit for all PFNs */ 2152 memory_bm_position_reset(free_pages_map); 2153 pfn = memory_bm_next_pfn(free_pages_map); 2154 while (pfn != BM_END_OF_MAP) { 2155 memory_bm_clear_current(free_pages_map); 2156 pfn = memory_bm_next_pfn(free_pages_map); 2157 } 2158 2159 /* Mark pages that correspond to the "original" PFNs as "unsafe" */ 2160 duplicate_memory_bitmap(free_pages_map, bm); 2161 2162 allocated_unsafe_pages = 0; 2163 } 2164 2165 static int check_header(struct swsusp_info *info) 2166 { 2167 char *reason; 2168 2169 reason = check_image_kernel(info); 2170 if (!reason && info->num_physpages != get_num_physpages()) 2171 reason = "memory size"; 2172 if (reason) { 2173 pr_err("Image mismatch: %s\n", reason); 2174 return -EPERM; 2175 } 2176 return 0; 2177 } 2178 2179 /** 2180 * load header - Check the image header and copy the data from it. 2181 */ 2182 static int load_header(struct swsusp_info *info) 2183 { 2184 int error; 2185 2186 restore_pblist = NULL; 2187 error = check_header(info); 2188 if (!error) { 2189 nr_copy_pages = info->image_pages; 2190 nr_meta_pages = info->pages - info->image_pages - 1; 2191 } 2192 return error; 2193 } 2194 2195 /** 2196 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap. 2197 * @bm: Memory bitmap. 2198 * @buf: Area of memory containing the PFNs. 2199 * 2200 * For each element of the array pointed to by @buf (1 page at a time), set the 2201 * corresponding bit in @bm. 2202 */ 2203 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) 2204 { 2205 int j; 2206 2207 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2208 if (unlikely(buf[j] == BM_END_OF_MAP)) 2209 break; 2210 2211 /* Extract and buffer page key for data page (s390 only). */ 2212 page_key_memorize(buf + j); 2213 2214 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) 2215 memory_bm_set_bit(bm, buf[j]); 2216 else 2217 return -EFAULT; 2218 } 2219 2220 return 0; 2221 } 2222 2223 #ifdef CONFIG_HIGHMEM 2224 /* 2225 * struct highmem_pbe is used for creating the list of highmem pages that 2226 * should be restored atomically during the resume from disk, because the page 2227 * frames they have occupied before the suspend are in use. 2228 */ 2229 struct highmem_pbe { 2230 struct page *copy_page; /* data is here now */ 2231 struct page *orig_page; /* data was here before the suspend */ 2232 struct highmem_pbe *next; 2233 }; 2234 2235 /* 2236 * List of highmem PBEs needed for restoring the highmem pages that were 2237 * allocated before the suspend and included in the suspend image, but have 2238 * also been allocated by the "resume" kernel, so their contents cannot be 2239 * written directly to their "original" page frames. 2240 */ 2241 static struct highmem_pbe *highmem_pblist; 2242 2243 /** 2244 * count_highmem_image_pages - Compute the number of highmem pages in the image. 2245 * @bm: Memory bitmap. 2246 * 2247 * The bits in @bm that correspond to image pages are assumed to be set. 2248 */ 2249 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) 2250 { 2251 unsigned long pfn; 2252 unsigned int cnt = 0; 2253 2254 memory_bm_position_reset(bm); 2255 pfn = memory_bm_next_pfn(bm); 2256 while (pfn != BM_END_OF_MAP) { 2257 if (PageHighMem(pfn_to_page(pfn))) 2258 cnt++; 2259 2260 pfn = memory_bm_next_pfn(bm); 2261 } 2262 return cnt; 2263 } 2264 2265 static unsigned int safe_highmem_pages; 2266 2267 static struct memory_bitmap *safe_highmem_bm; 2268 2269 /** 2270 * prepare_highmem_image - Allocate memory for loading highmem data from image. 2271 * @bm: Pointer to an uninitialized memory bitmap structure. 2272 * @nr_highmem_p: Pointer to the number of highmem image pages. 2273 * 2274 * Try to allocate as many highmem pages as there are highmem image pages 2275 * (@nr_highmem_p points to the variable containing the number of highmem image 2276 * pages). The pages that are "safe" (ie. will not be overwritten when the 2277 * hibernation image is restored entirely) have the corresponding bits set in 2278 * @bm (it must be unitialized). 2279 * 2280 * NOTE: This function should not be called if there are no highmem image pages. 2281 */ 2282 static int prepare_highmem_image(struct memory_bitmap *bm, 2283 unsigned int *nr_highmem_p) 2284 { 2285 unsigned int to_alloc; 2286 2287 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE)) 2288 return -ENOMEM; 2289 2290 if (get_highmem_buffer(PG_SAFE)) 2291 return -ENOMEM; 2292 2293 to_alloc = count_free_highmem_pages(); 2294 if (to_alloc > *nr_highmem_p) 2295 to_alloc = *nr_highmem_p; 2296 else 2297 *nr_highmem_p = to_alloc; 2298 2299 safe_highmem_pages = 0; 2300 while (to_alloc-- > 0) { 2301 struct page *page; 2302 2303 page = alloc_page(__GFP_HIGHMEM); 2304 if (!swsusp_page_is_free(page)) { 2305 /* The page is "safe", set its bit the bitmap */ 2306 memory_bm_set_bit(bm, page_to_pfn(page)); 2307 safe_highmem_pages++; 2308 } 2309 /* Mark the page as allocated */ 2310 swsusp_set_page_forbidden(page); 2311 swsusp_set_page_free(page); 2312 } 2313 memory_bm_position_reset(bm); 2314 safe_highmem_bm = bm; 2315 return 0; 2316 } 2317 2318 static struct page *last_highmem_page; 2319 2320 /** 2321 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page. 2322 * 2323 * For a given highmem image page get a buffer that suspend_write_next() should 2324 * return to its caller to write to. 2325 * 2326 * If the page is to be saved to its "original" page frame or a copy of 2327 * the page is to be made in the highmem, @buffer is returned. Otherwise, 2328 * the copy of the page is to be made in normal memory, so the address of 2329 * the copy is returned. 2330 * 2331 * If @buffer is returned, the caller of suspend_write_next() will write 2332 * the page's contents to @buffer, so they will have to be copied to the 2333 * right location on the next call to suspend_write_next() and it is done 2334 * with the help of copy_last_highmem_page(). For this purpose, if 2335 * @buffer is returned, @last_highmem_page is set to the page to which 2336 * the data will have to be copied from @buffer. 2337 */ 2338 static void *get_highmem_page_buffer(struct page *page, 2339 struct chain_allocator *ca) 2340 { 2341 struct highmem_pbe *pbe; 2342 void *kaddr; 2343 2344 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) { 2345 /* 2346 * We have allocated the "original" page frame and we can 2347 * use it directly to store the loaded page. 2348 */ 2349 last_highmem_page = page; 2350 return buffer; 2351 } 2352 /* 2353 * The "original" page frame has not been allocated and we have to 2354 * use a "safe" page frame to store the loaded page. 2355 */ 2356 pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); 2357 if (!pbe) { 2358 swsusp_free(); 2359 return ERR_PTR(-ENOMEM); 2360 } 2361 pbe->orig_page = page; 2362 if (safe_highmem_pages > 0) { 2363 struct page *tmp; 2364 2365 /* Copy of the page will be stored in high memory */ 2366 kaddr = buffer; 2367 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm)); 2368 safe_highmem_pages--; 2369 last_highmem_page = tmp; 2370 pbe->copy_page = tmp; 2371 } else { 2372 /* Copy of the page will be stored in normal memory */ 2373 kaddr = safe_pages_list; 2374 safe_pages_list = safe_pages_list->next; 2375 pbe->copy_page = virt_to_page(kaddr); 2376 } 2377 pbe->next = highmem_pblist; 2378 highmem_pblist = pbe; 2379 return kaddr; 2380 } 2381 2382 /** 2383 * copy_last_highmem_page - Copy most the most recent highmem image page. 2384 * 2385 * Copy the contents of a highmem image from @buffer, where the caller of 2386 * snapshot_write_next() has stored them, to the right location represented by 2387 * @last_highmem_page . 2388 */ 2389 static void copy_last_highmem_page(void) 2390 { 2391 if (last_highmem_page) { 2392 void *dst; 2393 2394 dst = kmap_atomic(last_highmem_page); 2395 copy_page(dst, buffer); 2396 kunmap_atomic(dst); 2397 last_highmem_page = NULL; 2398 } 2399 } 2400 2401 static inline int last_highmem_page_copied(void) 2402 { 2403 return !last_highmem_page; 2404 } 2405 2406 static inline void free_highmem_data(void) 2407 { 2408 if (safe_highmem_bm) 2409 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR); 2410 2411 if (buffer) 2412 free_image_page(buffer, PG_UNSAFE_CLEAR); 2413 } 2414 #else 2415 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } 2416 2417 static inline int prepare_highmem_image(struct memory_bitmap *bm, 2418 unsigned int *nr_highmem_p) { return 0; } 2419 2420 static inline void *get_highmem_page_buffer(struct page *page, 2421 struct chain_allocator *ca) 2422 { 2423 return ERR_PTR(-EINVAL); 2424 } 2425 2426 static inline void copy_last_highmem_page(void) {} 2427 static inline int last_highmem_page_copied(void) { return 1; } 2428 static inline void free_highmem_data(void) {} 2429 #endif /* CONFIG_HIGHMEM */ 2430 2431 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe)) 2432 2433 /** 2434 * prepare_image - Make room for loading hibernation image. 2435 * @new_bm: Unitialized memory bitmap structure. 2436 * @bm: Memory bitmap with unsafe pages marked. 2437 * 2438 * Use @bm to mark the pages that will be overwritten in the process of 2439 * restoring the system memory state from the suspend image ("unsafe" pages) 2440 * and allocate memory for the image. 2441 * 2442 * The idea is to allocate a new memory bitmap first and then allocate 2443 * as many pages as needed for image data, but without specifying what those 2444 * pages will be used for just yet. Instead, we mark them all as allocated and 2445 * create a lists of "safe" pages to be used later. On systems with high 2446 * memory a list of "safe" highmem pages is created too. 2447 */ 2448 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) 2449 { 2450 unsigned int nr_pages, nr_highmem; 2451 struct linked_page *lp; 2452 int error; 2453 2454 /* If there is no highmem, the buffer will not be necessary */ 2455 free_image_page(buffer, PG_UNSAFE_CLEAR); 2456 buffer = NULL; 2457 2458 nr_highmem = count_highmem_image_pages(bm); 2459 mark_unsafe_pages(bm); 2460 2461 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); 2462 if (error) 2463 goto Free; 2464 2465 duplicate_memory_bitmap(new_bm, bm); 2466 memory_bm_free(bm, PG_UNSAFE_KEEP); 2467 if (nr_highmem > 0) { 2468 error = prepare_highmem_image(bm, &nr_highmem); 2469 if (error) 2470 goto Free; 2471 } 2472 /* 2473 * Reserve some safe pages for potential later use. 2474 * 2475 * NOTE: This way we make sure there will be enough safe pages for the 2476 * chain_alloc() in get_buffer(). It is a bit wasteful, but 2477 * nr_copy_pages cannot be greater than 50% of the memory anyway. 2478 * 2479 * nr_copy_pages cannot be less than allocated_unsafe_pages too. 2480 */ 2481 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2482 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); 2483 while (nr_pages > 0) { 2484 lp = get_image_page(GFP_ATOMIC, PG_SAFE); 2485 if (!lp) { 2486 error = -ENOMEM; 2487 goto Free; 2488 } 2489 lp->next = safe_pages_list; 2490 safe_pages_list = lp; 2491 nr_pages--; 2492 } 2493 /* Preallocate memory for the image */ 2494 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2495 while (nr_pages > 0) { 2496 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); 2497 if (!lp) { 2498 error = -ENOMEM; 2499 goto Free; 2500 } 2501 if (!swsusp_page_is_free(virt_to_page(lp))) { 2502 /* The page is "safe", add it to the list */ 2503 lp->next = safe_pages_list; 2504 safe_pages_list = lp; 2505 } 2506 /* Mark the page as allocated */ 2507 swsusp_set_page_forbidden(virt_to_page(lp)); 2508 swsusp_set_page_free(virt_to_page(lp)); 2509 nr_pages--; 2510 } 2511 return 0; 2512 2513 Free: 2514 swsusp_free(); 2515 return error; 2516 } 2517 2518 /** 2519 * get_buffer - Get the address to store the next image data page. 2520 * 2521 * Get the address that snapshot_write_next() should return to its caller to 2522 * write to. 2523 */ 2524 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) 2525 { 2526 struct pbe *pbe; 2527 struct page *page; 2528 unsigned long pfn = memory_bm_next_pfn(bm); 2529 2530 if (pfn == BM_END_OF_MAP) 2531 return ERR_PTR(-EFAULT); 2532 2533 page = pfn_to_page(pfn); 2534 if (PageHighMem(page)) 2535 return get_highmem_page_buffer(page, ca); 2536 2537 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) 2538 /* 2539 * We have allocated the "original" page frame and we can 2540 * use it directly to store the loaded page. 2541 */ 2542 return page_address(page); 2543 2544 /* 2545 * The "original" page frame has not been allocated and we have to 2546 * use a "safe" page frame to store the loaded page. 2547 */ 2548 pbe = chain_alloc(ca, sizeof(struct pbe)); 2549 if (!pbe) { 2550 swsusp_free(); 2551 return ERR_PTR(-ENOMEM); 2552 } 2553 pbe->orig_address = page_address(page); 2554 pbe->address = safe_pages_list; 2555 safe_pages_list = safe_pages_list->next; 2556 pbe->next = restore_pblist; 2557 restore_pblist = pbe; 2558 return pbe->address; 2559 } 2560 2561 /** 2562 * snapshot_write_next - Get the address to store the next image page. 2563 * @handle: Snapshot handle structure to guide the writing. 2564 * 2565 * On the first call, @handle should point to a zeroed snapshot_handle 2566 * structure. The structure gets populated then and a pointer to it should be 2567 * passed to this function every next time. 2568 * 2569 * On success, the function returns a positive number. Then, the caller 2570 * is allowed to write up to the returned number of bytes to the memory 2571 * location computed by the data_of() macro. 2572 * 2573 * The function returns 0 to indicate the "end of file" condition. Negative 2574 * numbers are returned on errors, in which cases the structure pointed to by 2575 * @handle is not updated and should not be used any more. 2576 */ 2577 int snapshot_write_next(struct snapshot_handle *handle) 2578 { 2579 static struct chain_allocator ca; 2580 int error = 0; 2581 2582 /* Check if we have already loaded the entire image */ 2583 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) 2584 return 0; 2585 2586 handle->sync_read = 1; 2587 2588 if (!handle->cur) { 2589 if (!buffer) 2590 /* This makes the buffer be freed by swsusp_free() */ 2591 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2592 2593 if (!buffer) 2594 return -ENOMEM; 2595 2596 handle->buffer = buffer; 2597 } else if (handle->cur == 1) { 2598 error = load_header(buffer); 2599 if (error) 2600 return error; 2601 2602 safe_pages_list = NULL; 2603 2604 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY); 2605 if (error) 2606 return error; 2607 2608 /* Allocate buffer for page keys. */ 2609 error = page_key_alloc(nr_copy_pages); 2610 if (error) 2611 return error; 2612 2613 hibernate_restore_protection_begin(); 2614 } else if (handle->cur <= nr_meta_pages + 1) { 2615 error = unpack_orig_pfns(buffer, ©_bm); 2616 if (error) 2617 return error; 2618 2619 if (handle->cur == nr_meta_pages + 1) { 2620 error = prepare_image(&orig_bm, ©_bm); 2621 if (error) 2622 return error; 2623 2624 chain_init(&ca, GFP_ATOMIC, PG_SAFE); 2625 memory_bm_position_reset(&orig_bm); 2626 restore_pblist = NULL; 2627 handle->buffer = get_buffer(&orig_bm, &ca); 2628 handle->sync_read = 0; 2629 if (IS_ERR(handle->buffer)) 2630 return PTR_ERR(handle->buffer); 2631 } 2632 } else { 2633 copy_last_highmem_page(); 2634 /* Restore page key for data page (s390 only). */ 2635 page_key_write(handle->buffer); 2636 hibernate_restore_protect_page(handle->buffer); 2637 handle->buffer = get_buffer(&orig_bm, &ca); 2638 if (IS_ERR(handle->buffer)) 2639 return PTR_ERR(handle->buffer); 2640 if (handle->buffer != buffer) 2641 handle->sync_read = 0; 2642 } 2643 handle->cur++; 2644 return PAGE_SIZE; 2645 } 2646 2647 /** 2648 * snapshot_write_finalize - Complete the loading of a hibernation image. 2649 * 2650 * Must be called after the last call to snapshot_write_next() in case the last 2651 * page in the image happens to be a highmem page and its contents should be 2652 * stored in highmem. Additionally, it recycles bitmap memory that's not 2653 * necessary any more. 2654 */ 2655 void snapshot_write_finalize(struct snapshot_handle *handle) 2656 { 2657 copy_last_highmem_page(); 2658 /* Restore page key for data page (s390 only). */ 2659 page_key_write(handle->buffer); 2660 page_key_free(); 2661 hibernate_restore_protect_page(handle->buffer); 2662 /* Do that only if we have loaded the image entirely */ 2663 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { 2664 memory_bm_recycle(&orig_bm); 2665 free_highmem_data(); 2666 } 2667 } 2668 2669 int snapshot_image_loaded(struct snapshot_handle *handle) 2670 { 2671 return !(!nr_copy_pages || !last_highmem_page_copied() || 2672 handle->cur <= nr_meta_pages + nr_copy_pages); 2673 } 2674 2675 #ifdef CONFIG_HIGHMEM 2676 /* Assumes that @buf is ready and points to a "safe" page */ 2677 static inline void swap_two_pages_data(struct page *p1, struct page *p2, 2678 void *buf) 2679 { 2680 void *kaddr1, *kaddr2; 2681 2682 kaddr1 = kmap_atomic(p1); 2683 kaddr2 = kmap_atomic(p2); 2684 copy_page(buf, kaddr1); 2685 copy_page(kaddr1, kaddr2); 2686 copy_page(kaddr2, buf); 2687 kunmap_atomic(kaddr2); 2688 kunmap_atomic(kaddr1); 2689 } 2690 2691 /** 2692 * restore_highmem - Put highmem image pages into their original locations. 2693 * 2694 * For each highmem page that was in use before hibernation and is included in 2695 * the image, and also has been allocated by the "restore" kernel, swap its 2696 * current contents with the previous (ie. "before hibernation") ones. 2697 * 2698 * If the restore eventually fails, we can call this function once again and 2699 * restore the highmem state as seen by the restore kernel. 2700 */ 2701 int restore_highmem(void) 2702 { 2703 struct highmem_pbe *pbe = highmem_pblist; 2704 void *buf; 2705 2706 if (!pbe) 2707 return 0; 2708 2709 buf = get_image_page(GFP_ATOMIC, PG_SAFE); 2710 if (!buf) 2711 return -ENOMEM; 2712 2713 while (pbe) { 2714 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf); 2715 pbe = pbe->next; 2716 } 2717 free_image_page(buf, PG_UNSAFE_CLEAR); 2718 return 0; 2719 } 2720 #endif /* CONFIG_HIGHMEM */ 2721