1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/kernel/power/snapshot.c 4 * 5 * This file provides system snapshot/restore functionality for swsusp. 6 * 7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 9 */ 10 11 #define pr_fmt(fmt) "PM: hibernation: " fmt 12 13 #include <linux/version.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/suspend.h> 17 #include <linux/delay.h> 18 #include <linux/bitops.h> 19 #include <linux/spinlock.h> 20 #include <linux/kernel.h> 21 #include <linux/pm.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/memblock.h> 25 #include <linux/nmi.h> 26 #include <linux/syscalls.h> 27 #include <linux/console.h> 28 #include <linux/highmem.h> 29 #include <linux/list.h> 30 #include <linux/slab.h> 31 #include <linux/compiler.h> 32 #include <linux/ktime.h> 33 #include <linux/set_memory.h> 34 35 #include <linux/uaccess.h> 36 #include <asm/mmu_context.h> 37 #include <asm/tlbflush.h> 38 #include <asm/io.h> 39 40 #include "power.h" 41 42 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY) 43 static bool hibernate_restore_protection; 44 static bool hibernate_restore_protection_active; 45 46 void enable_restore_image_protection(void) 47 { 48 hibernate_restore_protection = true; 49 } 50 51 static inline void hibernate_restore_protection_begin(void) 52 { 53 hibernate_restore_protection_active = hibernate_restore_protection; 54 } 55 56 static inline void hibernate_restore_protection_end(void) 57 { 58 hibernate_restore_protection_active = false; 59 } 60 61 static inline void hibernate_restore_protect_page(void *page_address) 62 { 63 if (hibernate_restore_protection_active) 64 set_memory_ro((unsigned long)page_address, 1); 65 } 66 67 static inline void hibernate_restore_unprotect_page(void *page_address) 68 { 69 if (hibernate_restore_protection_active) 70 set_memory_rw((unsigned long)page_address, 1); 71 } 72 #else 73 static inline void hibernate_restore_protection_begin(void) {} 74 static inline void hibernate_restore_protection_end(void) {} 75 static inline void hibernate_restore_protect_page(void *page_address) {} 76 static inline void hibernate_restore_unprotect_page(void *page_address) {} 77 #endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */ 78 79 80 /* 81 * The calls to set_direct_map_*() should not fail because remapping a page 82 * here means that we only update protection bits in an existing PTE. 83 * It is still worth to have a warning here if something changes and this 84 * will no longer be the case. 85 */ 86 static inline void hibernate_map_page(struct page *page) 87 { 88 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { 89 int ret = set_direct_map_default_noflush(page); 90 91 if (ret) 92 pr_warn_once("Failed to remap page\n"); 93 } else { 94 debug_pagealloc_map_pages(page, 1); 95 } 96 } 97 98 static inline void hibernate_unmap_page(struct page *page) 99 { 100 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { 101 unsigned long addr = (unsigned long)page_address(page); 102 int ret = set_direct_map_invalid_noflush(page); 103 104 if (ret) 105 pr_warn_once("Failed to remap page\n"); 106 107 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 108 } else { 109 debug_pagealloc_unmap_pages(page, 1); 110 } 111 } 112 113 static int swsusp_page_is_free(struct page *); 114 static void swsusp_set_page_forbidden(struct page *); 115 static void swsusp_unset_page_forbidden(struct page *); 116 117 /* 118 * Number of bytes to reserve for memory allocations made by device drivers 119 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't 120 * cause image creation to fail (tunable via /sys/power/reserved_size). 121 */ 122 unsigned long reserved_size; 123 124 void __init hibernate_reserved_size_init(void) 125 { 126 reserved_size = SPARE_PAGES * PAGE_SIZE; 127 } 128 129 /* 130 * Preferred image size in bytes (tunable via /sys/power/image_size). 131 * When it is set to N, swsusp will do its best to ensure the image 132 * size will not exceed N bytes, but if that is impossible, it will 133 * try to create the smallest image possible. 134 */ 135 unsigned long image_size; 136 137 void __init hibernate_image_size_init(void) 138 { 139 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE; 140 } 141 142 /* 143 * List of PBEs needed for restoring the pages that were allocated before 144 * the suspend and included in the suspend image, but have also been 145 * allocated by the "resume" kernel, so their contents cannot be written 146 * directly to their "original" page frames. 147 */ 148 struct pbe *restore_pblist; 149 150 /* struct linked_page is used to build chains of pages */ 151 152 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) 153 154 struct linked_page { 155 struct linked_page *next; 156 char data[LINKED_PAGE_DATA_SIZE]; 157 } __packed; 158 159 /* 160 * List of "safe" pages (ie. pages that were not used by the image kernel 161 * before hibernation) that may be used as temporary storage for image kernel 162 * memory contents. 163 */ 164 static struct linked_page *safe_pages_list; 165 166 /* Pointer to an auxiliary buffer (1 page) */ 167 static void *buffer; 168 169 #define PG_ANY 0 170 #define PG_SAFE 1 171 #define PG_UNSAFE_CLEAR 1 172 #define PG_UNSAFE_KEEP 0 173 174 static unsigned int allocated_unsafe_pages; 175 176 /** 177 * get_image_page - Allocate a page for a hibernation image. 178 * @gfp_mask: GFP mask for the allocation. 179 * @safe_needed: Get pages that were not used before hibernation (restore only) 180 * 181 * During image restoration, for storing the PBE list and the image data, we can 182 * only use memory pages that do not conflict with the pages used before 183 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them 184 * using allocated_unsafe_pages. 185 * 186 * Each allocated image page is marked as PageNosave and PageNosaveFree so that 187 * swsusp_free() can release it. 188 */ 189 static void *get_image_page(gfp_t gfp_mask, int safe_needed) 190 { 191 void *res; 192 193 res = (void *)get_zeroed_page(gfp_mask); 194 if (safe_needed) 195 while (res && swsusp_page_is_free(virt_to_page(res))) { 196 /* The page is unsafe, mark it for swsusp_free() */ 197 swsusp_set_page_forbidden(virt_to_page(res)); 198 allocated_unsafe_pages++; 199 res = (void *)get_zeroed_page(gfp_mask); 200 } 201 if (res) { 202 swsusp_set_page_forbidden(virt_to_page(res)); 203 swsusp_set_page_free(virt_to_page(res)); 204 } 205 return res; 206 } 207 208 static void *__get_safe_page(gfp_t gfp_mask) 209 { 210 if (safe_pages_list) { 211 void *ret = safe_pages_list; 212 213 safe_pages_list = safe_pages_list->next; 214 memset(ret, 0, PAGE_SIZE); 215 return ret; 216 } 217 return get_image_page(gfp_mask, PG_SAFE); 218 } 219 220 unsigned long get_safe_page(gfp_t gfp_mask) 221 { 222 return (unsigned long)__get_safe_page(gfp_mask); 223 } 224 225 static struct page *alloc_image_page(gfp_t gfp_mask) 226 { 227 struct page *page; 228 229 page = alloc_page(gfp_mask); 230 if (page) { 231 swsusp_set_page_forbidden(page); 232 swsusp_set_page_free(page); 233 } 234 return page; 235 } 236 237 static void recycle_safe_page(void *page_address) 238 { 239 struct linked_page *lp = page_address; 240 241 lp->next = safe_pages_list; 242 safe_pages_list = lp; 243 } 244 245 /** 246 * free_image_page - Free a page allocated for hibernation image. 247 * @addr: Address of the page to free. 248 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page. 249 * 250 * The page to free should have been allocated by get_image_page() (page flags 251 * set by it are affected). 252 */ 253 static inline void free_image_page(void *addr, int clear_nosave_free) 254 { 255 struct page *page; 256 257 BUG_ON(!virt_addr_valid(addr)); 258 259 page = virt_to_page(addr); 260 261 swsusp_unset_page_forbidden(page); 262 if (clear_nosave_free) 263 swsusp_unset_page_free(page); 264 265 __free_page(page); 266 } 267 268 static inline void free_list_of_pages(struct linked_page *list, 269 int clear_page_nosave) 270 { 271 while (list) { 272 struct linked_page *lp = list->next; 273 274 free_image_page(list, clear_page_nosave); 275 list = lp; 276 } 277 } 278 279 /* 280 * struct chain_allocator is used for allocating small objects out of 281 * a linked list of pages called 'the chain'. 282 * 283 * The chain grows each time when there is no room for a new object in 284 * the current page. The allocated objects cannot be freed individually. 285 * It is only possible to free them all at once, by freeing the entire 286 * chain. 287 * 288 * NOTE: The chain allocator may be inefficient if the allocated objects 289 * are not much smaller than PAGE_SIZE. 290 */ 291 struct chain_allocator { 292 struct linked_page *chain; /* the chain */ 293 unsigned int used_space; /* total size of objects allocated out 294 of the current page */ 295 gfp_t gfp_mask; /* mask for allocating pages */ 296 int safe_needed; /* if set, only "safe" pages are allocated */ 297 }; 298 299 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask, 300 int safe_needed) 301 { 302 ca->chain = NULL; 303 ca->used_space = LINKED_PAGE_DATA_SIZE; 304 ca->gfp_mask = gfp_mask; 305 ca->safe_needed = safe_needed; 306 } 307 308 static void *chain_alloc(struct chain_allocator *ca, unsigned int size) 309 { 310 void *ret; 311 312 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { 313 struct linked_page *lp; 314 315 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) : 316 get_image_page(ca->gfp_mask, PG_ANY); 317 if (!lp) 318 return NULL; 319 320 lp->next = ca->chain; 321 ca->chain = lp; 322 ca->used_space = 0; 323 } 324 ret = ca->chain->data + ca->used_space; 325 ca->used_space += size; 326 return ret; 327 } 328 329 /* 330 * Data types related to memory bitmaps. 331 * 332 * Memory bitmap is a structure consisting of many linked lists of 333 * objects. The main list's elements are of type struct zone_bitmap 334 * and each of them corresponds to one zone. For each zone bitmap 335 * object there is a list of objects of type struct bm_block that 336 * represent each blocks of bitmap in which information is stored. 337 * 338 * struct memory_bitmap contains a pointer to the main list of zone 339 * bitmap objects, a struct bm_position used for browsing the bitmap, 340 * and a pointer to the list of pages used for allocating all of the 341 * zone bitmap objects and bitmap block objects. 342 * 343 * NOTE: It has to be possible to lay out the bitmap in memory 344 * using only allocations of order 0. Additionally, the bitmap is 345 * designed to work with arbitrary number of zones (this is over the 346 * top for now, but let's avoid making unnecessary assumptions ;-). 347 * 348 * struct zone_bitmap contains a pointer to a list of bitmap block 349 * objects and a pointer to the bitmap block object that has been 350 * most recently used for setting bits. Additionally, it contains the 351 * PFNs that correspond to the start and end of the represented zone. 352 * 353 * struct bm_block contains a pointer to the memory page in which 354 * information is stored (in the form of a block of bitmap) 355 * It also contains the pfns that correspond to the start and end of 356 * the represented memory area. 357 * 358 * The memory bitmap is organized as a radix tree to guarantee fast random 359 * access to the bits. There is one radix tree for each zone (as returned 360 * from create_mem_extents). 361 * 362 * One radix tree is represented by one struct mem_zone_bm_rtree. There are 363 * two linked lists for the nodes of the tree, one for the inner nodes and 364 * one for the leave nodes. The linked leave nodes are used for fast linear 365 * access of the memory bitmap. 366 * 367 * The struct rtree_node represents one node of the radix tree. 368 */ 369 370 #define BM_END_OF_MAP (~0UL) 371 372 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE) 373 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3) 374 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1) 375 376 /* 377 * struct rtree_node is a wrapper struct to link the nodes 378 * of the rtree together for easy linear iteration over 379 * bits and easy freeing 380 */ 381 struct rtree_node { 382 struct list_head list; 383 unsigned long *data; 384 }; 385 386 /* 387 * struct mem_zone_bm_rtree represents a bitmap used for one 388 * populated memory zone. 389 */ 390 struct mem_zone_bm_rtree { 391 struct list_head list; /* Link Zones together */ 392 struct list_head nodes; /* Radix Tree inner nodes */ 393 struct list_head leaves; /* Radix Tree leaves */ 394 unsigned long start_pfn; /* Zone start page frame */ 395 unsigned long end_pfn; /* Zone end page frame + 1 */ 396 struct rtree_node *rtree; /* Radix Tree Root */ 397 int levels; /* Number of Radix Tree Levels */ 398 unsigned int blocks; /* Number of Bitmap Blocks */ 399 }; 400 401 /* struct bm_position is used for browsing memory bitmaps */ 402 403 struct bm_position { 404 struct mem_zone_bm_rtree *zone; 405 struct rtree_node *node; 406 unsigned long node_pfn; 407 int node_bit; 408 }; 409 410 struct memory_bitmap { 411 struct list_head zones; 412 struct linked_page *p_list; /* list of pages used to store zone 413 bitmap objects and bitmap block 414 objects */ 415 struct bm_position cur; /* most recently used bit position */ 416 }; 417 418 /* Functions that operate on memory bitmaps */ 419 420 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long)) 421 #if BITS_PER_LONG == 32 422 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2) 423 #else 424 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3) 425 #endif 426 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1) 427 428 /** 429 * alloc_rtree_node - Allocate a new node and add it to the radix tree. 430 * @gfp_mask: GFP mask for the allocation. 431 * @safe_needed: Get pages not used before hibernation (restore only) 432 * @ca: Pointer to a linked list of pages ("a chain") to allocate from 433 * @list: Radix Tree node to add. 434 * 435 * This function is used to allocate inner nodes as well as the 436 * leave nodes of the radix tree. It also adds the node to the 437 * corresponding linked list passed in by the *list parameter. 438 */ 439 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, 440 struct chain_allocator *ca, 441 struct list_head *list) 442 { 443 struct rtree_node *node; 444 445 node = chain_alloc(ca, sizeof(struct rtree_node)); 446 if (!node) 447 return NULL; 448 449 node->data = get_image_page(gfp_mask, safe_needed); 450 if (!node->data) 451 return NULL; 452 453 list_add_tail(&node->list, list); 454 455 return node; 456 } 457 458 /** 459 * add_rtree_block - Add a new leave node to the radix tree. 460 * 461 * The leave nodes need to be allocated in order to keep the leaves 462 * linked list in order. This is guaranteed by the zone->blocks 463 * counter. 464 */ 465 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, 466 int safe_needed, struct chain_allocator *ca) 467 { 468 struct rtree_node *node, *block, **dst; 469 unsigned int levels_needed, block_nr; 470 int i; 471 472 block_nr = zone->blocks; 473 levels_needed = 0; 474 475 /* How many levels do we need for this block nr? */ 476 while (block_nr) { 477 levels_needed += 1; 478 block_nr >>= BM_RTREE_LEVEL_SHIFT; 479 } 480 481 /* Make sure the rtree has enough levels */ 482 for (i = zone->levels; i < levels_needed; i++) { 483 node = alloc_rtree_node(gfp_mask, safe_needed, ca, 484 &zone->nodes); 485 if (!node) 486 return -ENOMEM; 487 488 node->data[0] = (unsigned long)zone->rtree; 489 zone->rtree = node; 490 zone->levels += 1; 491 } 492 493 /* Allocate new block */ 494 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); 495 if (!block) 496 return -ENOMEM; 497 498 /* Now walk the rtree to insert the block */ 499 node = zone->rtree; 500 dst = &zone->rtree; 501 block_nr = zone->blocks; 502 for (i = zone->levels; i > 0; i--) { 503 int index; 504 505 if (!node) { 506 node = alloc_rtree_node(gfp_mask, safe_needed, ca, 507 &zone->nodes); 508 if (!node) 509 return -ENOMEM; 510 *dst = node; 511 } 512 513 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); 514 index &= BM_RTREE_LEVEL_MASK; 515 dst = (struct rtree_node **)&((*dst)->data[index]); 516 node = *dst; 517 } 518 519 zone->blocks += 1; 520 *dst = block; 521 522 return 0; 523 } 524 525 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 526 int clear_nosave_free); 527 528 /** 529 * create_zone_bm_rtree - Create a radix tree for one zone. 530 * 531 * Allocated the mem_zone_bm_rtree structure and initializes it. 532 * This function also allocated and builds the radix tree for the 533 * zone. 534 */ 535 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask, 536 int safe_needed, 537 struct chain_allocator *ca, 538 unsigned long start, 539 unsigned long end) 540 { 541 struct mem_zone_bm_rtree *zone; 542 unsigned int i, nr_blocks; 543 unsigned long pages; 544 545 pages = end - start; 546 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree)); 547 if (!zone) 548 return NULL; 549 550 INIT_LIST_HEAD(&zone->nodes); 551 INIT_LIST_HEAD(&zone->leaves); 552 zone->start_pfn = start; 553 zone->end_pfn = end; 554 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); 555 556 for (i = 0; i < nr_blocks; i++) { 557 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) { 558 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR); 559 return NULL; 560 } 561 } 562 563 return zone; 564 } 565 566 /** 567 * free_zone_bm_rtree - Free the memory of the radix tree. 568 * 569 * Free all node pages of the radix tree. The mem_zone_bm_rtree 570 * structure itself is not freed here nor are the rtree_node 571 * structs. 572 */ 573 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 574 int clear_nosave_free) 575 { 576 struct rtree_node *node; 577 578 list_for_each_entry(node, &zone->nodes, list) 579 free_image_page(node->data, clear_nosave_free); 580 581 list_for_each_entry(node, &zone->leaves, list) 582 free_image_page(node->data, clear_nosave_free); 583 } 584 585 static void memory_bm_position_reset(struct memory_bitmap *bm) 586 { 587 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, 588 list); 589 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 590 struct rtree_node, list); 591 bm->cur.node_pfn = 0; 592 bm->cur.node_bit = 0; 593 } 594 595 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 596 597 struct mem_extent { 598 struct list_head hook; 599 unsigned long start; 600 unsigned long end; 601 }; 602 603 /** 604 * free_mem_extents - Free a list of memory extents. 605 * @list: List of extents to free. 606 */ 607 static void free_mem_extents(struct list_head *list) 608 { 609 struct mem_extent *ext, *aux; 610 611 list_for_each_entry_safe(ext, aux, list, hook) { 612 list_del(&ext->hook); 613 kfree(ext); 614 } 615 } 616 617 /** 618 * create_mem_extents - Create a list of memory extents. 619 * @list: List to put the extents into. 620 * @gfp_mask: Mask to use for memory allocations. 621 * 622 * The extents represent contiguous ranges of PFNs. 623 */ 624 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) 625 { 626 struct zone *zone; 627 628 INIT_LIST_HEAD(list); 629 630 for_each_populated_zone(zone) { 631 unsigned long zone_start, zone_end; 632 struct mem_extent *ext, *cur, *aux; 633 634 zone_start = zone->zone_start_pfn; 635 zone_end = zone_end_pfn(zone); 636 637 list_for_each_entry(ext, list, hook) 638 if (zone_start <= ext->end) 639 break; 640 641 if (&ext->hook == list || zone_end < ext->start) { 642 /* New extent is necessary */ 643 struct mem_extent *new_ext; 644 645 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); 646 if (!new_ext) { 647 free_mem_extents(list); 648 return -ENOMEM; 649 } 650 new_ext->start = zone_start; 651 new_ext->end = zone_end; 652 list_add_tail(&new_ext->hook, &ext->hook); 653 continue; 654 } 655 656 /* Merge this zone's range of PFNs with the existing one */ 657 if (zone_start < ext->start) 658 ext->start = zone_start; 659 if (zone_end > ext->end) 660 ext->end = zone_end; 661 662 /* More merging may be possible */ 663 cur = ext; 664 list_for_each_entry_safe_continue(cur, aux, list, hook) { 665 if (zone_end < cur->start) 666 break; 667 if (zone_end < cur->end) 668 ext->end = cur->end; 669 list_del(&cur->hook); 670 kfree(cur); 671 } 672 } 673 674 return 0; 675 } 676 677 /** 678 * memory_bm_create - Allocate memory for a memory bitmap. 679 */ 680 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, 681 int safe_needed) 682 { 683 struct chain_allocator ca; 684 struct list_head mem_extents; 685 struct mem_extent *ext; 686 int error; 687 688 chain_init(&ca, gfp_mask, safe_needed); 689 INIT_LIST_HEAD(&bm->zones); 690 691 error = create_mem_extents(&mem_extents, gfp_mask); 692 if (error) 693 return error; 694 695 list_for_each_entry(ext, &mem_extents, hook) { 696 struct mem_zone_bm_rtree *zone; 697 698 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, 699 ext->start, ext->end); 700 if (!zone) { 701 error = -ENOMEM; 702 goto Error; 703 } 704 list_add_tail(&zone->list, &bm->zones); 705 } 706 707 bm->p_list = ca.chain; 708 memory_bm_position_reset(bm); 709 Exit: 710 free_mem_extents(&mem_extents); 711 return error; 712 713 Error: 714 bm->p_list = ca.chain; 715 memory_bm_free(bm, PG_UNSAFE_CLEAR); 716 goto Exit; 717 } 718 719 /** 720 * memory_bm_free - Free memory occupied by the memory bitmap. 721 * @bm: Memory bitmap. 722 */ 723 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 724 { 725 struct mem_zone_bm_rtree *zone; 726 727 list_for_each_entry(zone, &bm->zones, list) 728 free_zone_bm_rtree(zone, clear_nosave_free); 729 730 free_list_of_pages(bm->p_list, clear_nosave_free); 731 732 INIT_LIST_HEAD(&bm->zones); 733 } 734 735 /** 736 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap. 737 * 738 * Find the bit in memory bitmap @bm that corresponds to the given PFN. 739 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated. 740 * 741 * Walk the radix tree to find the page containing the bit that represents @pfn 742 * and return the position of the bit in @addr and @bit_nr. 743 */ 744 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 745 void **addr, unsigned int *bit_nr) 746 { 747 struct mem_zone_bm_rtree *curr, *zone; 748 struct rtree_node *node; 749 int i, block_nr; 750 751 zone = bm->cur.zone; 752 753 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) 754 goto zone_found; 755 756 zone = NULL; 757 758 /* Find the right zone */ 759 list_for_each_entry(curr, &bm->zones, list) { 760 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { 761 zone = curr; 762 break; 763 } 764 } 765 766 if (!zone) 767 return -EFAULT; 768 769 zone_found: 770 /* 771 * We have found the zone. Now walk the radix tree to find the leaf node 772 * for our PFN. 773 */ 774 775 /* 776 * If the zone we wish to scan is the current zone and the 777 * pfn falls into the current node then we do not need to walk 778 * the tree. 779 */ 780 node = bm->cur.node; 781 if (zone == bm->cur.zone && 782 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) 783 goto node_found; 784 785 node = zone->rtree; 786 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; 787 788 for (i = zone->levels; i > 0; i--) { 789 int index; 790 791 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); 792 index &= BM_RTREE_LEVEL_MASK; 793 BUG_ON(node->data[index] == 0); 794 node = (struct rtree_node *)node->data[index]; 795 } 796 797 node_found: 798 /* Update last position */ 799 bm->cur.zone = zone; 800 bm->cur.node = node; 801 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; 802 803 /* Set return values */ 804 *addr = node->data; 805 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; 806 807 return 0; 808 } 809 810 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) 811 { 812 void *addr; 813 unsigned int bit; 814 int error; 815 816 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 817 BUG_ON(error); 818 set_bit(bit, addr); 819 } 820 821 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) 822 { 823 void *addr; 824 unsigned int bit; 825 int error; 826 827 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 828 if (!error) 829 set_bit(bit, addr); 830 831 return error; 832 } 833 834 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) 835 { 836 void *addr; 837 unsigned int bit; 838 int error; 839 840 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 841 BUG_ON(error); 842 clear_bit(bit, addr); 843 } 844 845 static void memory_bm_clear_current(struct memory_bitmap *bm) 846 { 847 int bit; 848 849 bit = max(bm->cur.node_bit - 1, 0); 850 clear_bit(bit, bm->cur.node->data); 851 } 852 853 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 854 { 855 void *addr; 856 unsigned int bit; 857 int error; 858 859 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 860 BUG_ON(error); 861 return test_bit(bit, addr); 862 } 863 864 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) 865 { 866 void *addr; 867 unsigned int bit; 868 869 return !memory_bm_find_bit(bm, pfn, &addr, &bit); 870 } 871 872 /* 873 * rtree_next_node - Jump to the next leaf node. 874 * 875 * Set the position to the beginning of the next node in the 876 * memory bitmap. This is either the next node in the current 877 * zone's radix tree or the first node in the radix tree of the 878 * next zone. 879 * 880 * Return true if there is a next node, false otherwise. 881 */ 882 static bool rtree_next_node(struct memory_bitmap *bm) 883 { 884 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { 885 bm->cur.node = list_entry(bm->cur.node->list.next, 886 struct rtree_node, list); 887 bm->cur.node_pfn += BM_BITS_PER_BLOCK; 888 bm->cur.node_bit = 0; 889 touch_softlockup_watchdog(); 890 return true; 891 } 892 893 /* No more nodes, goto next zone */ 894 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) { 895 bm->cur.zone = list_entry(bm->cur.zone->list.next, 896 struct mem_zone_bm_rtree, list); 897 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 898 struct rtree_node, list); 899 bm->cur.node_pfn = 0; 900 bm->cur.node_bit = 0; 901 return true; 902 } 903 904 /* No more zones */ 905 return false; 906 } 907 908 /** 909 * memory_bm_next_pfn - Find the next set bit in a memory bitmap. 910 * @bm: Memory bitmap. 911 * 912 * Starting from the last returned position this function searches for the next 913 * set bit in @bm and returns the PFN represented by it. If no more bits are 914 * set, BM_END_OF_MAP is returned. 915 * 916 * It is required to run memory_bm_position_reset() before the first call to 917 * this function for the given memory bitmap. 918 */ 919 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 920 { 921 unsigned long bits, pfn, pages; 922 int bit; 923 924 do { 925 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; 926 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); 927 bit = find_next_bit(bm->cur.node->data, bits, 928 bm->cur.node_bit); 929 if (bit < bits) { 930 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; 931 bm->cur.node_bit = bit + 1; 932 return pfn; 933 } 934 } while (rtree_next_node(bm)); 935 936 return BM_END_OF_MAP; 937 } 938 939 /* 940 * This structure represents a range of page frames the contents of which 941 * should not be saved during hibernation. 942 */ 943 struct nosave_region { 944 struct list_head list; 945 unsigned long start_pfn; 946 unsigned long end_pfn; 947 }; 948 949 static LIST_HEAD(nosave_regions); 950 951 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone) 952 { 953 struct rtree_node *node; 954 955 list_for_each_entry(node, &zone->nodes, list) 956 recycle_safe_page(node->data); 957 958 list_for_each_entry(node, &zone->leaves, list) 959 recycle_safe_page(node->data); 960 } 961 962 static void memory_bm_recycle(struct memory_bitmap *bm) 963 { 964 struct mem_zone_bm_rtree *zone; 965 struct linked_page *p_list; 966 967 list_for_each_entry(zone, &bm->zones, list) 968 recycle_zone_bm_rtree(zone); 969 970 p_list = bm->p_list; 971 while (p_list) { 972 struct linked_page *lp = p_list; 973 974 p_list = lp->next; 975 recycle_safe_page(lp); 976 } 977 } 978 979 /** 980 * register_nosave_region - Register a region of unsaveable memory. 981 * 982 * Register a range of page frames the contents of which should not be saved 983 * during hibernation (to be used in the early initialization code). 984 */ 985 void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn) 986 { 987 struct nosave_region *region; 988 989 if (start_pfn >= end_pfn) 990 return; 991 992 if (!list_empty(&nosave_regions)) { 993 /* Try to extend the previous region (they should be sorted) */ 994 region = list_entry(nosave_regions.prev, 995 struct nosave_region, list); 996 if (region->end_pfn == start_pfn) { 997 region->end_pfn = end_pfn; 998 goto Report; 999 } 1000 } 1001 /* This allocation cannot fail */ 1002 region = memblock_alloc(sizeof(struct nosave_region), 1003 SMP_CACHE_BYTES); 1004 if (!region) 1005 panic("%s: Failed to allocate %zu bytes\n", __func__, 1006 sizeof(struct nosave_region)); 1007 region->start_pfn = start_pfn; 1008 region->end_pfn = end_pfn; 1009 list_add_tail(®ion->list, &nosave_regions); 1010 Report: 1011 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n", 1012 (unsigned long long) start_pfn << PAGE_SHIFT, 1013 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 1014 } 1015 1016 /* 1017 * Set bits in this map correspond to the page frames the contents of which 1018 * should not be saved during the suspend. 1019 */ 1020 static struct memory_bitmap *forbidden_pages_map; 1021 1022 /* Set bits in this map correspond to free page frames. */ 1023 static struct memory_bitmap *free_pages_map; 1024 1025 /* 1026 * Each page frame allocated for creating the image is marked by setting the 1027 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously 1028 */ 1029 1030 void swsusp_set_page_free(struct page *page) 1031 { 1032 if (free_pages_map) 1033 memory_bm_set_bit(free_pages_map, page_to_pfn(page)); 1034 } 1035 1036 static int swsusp_page_is_free(struct page *page) 1037 { 1038 return free_pages_map ? 1039 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0; 1040 } 1041 1042 void swsusp_unset_page_free(struct page *page) 1043 { 1044 if (free_pages_map) 1045 memory_bm_clear_bit(free_pages_map, page_to_pfn(page)); 1046 } 1047 1048 static void swsusp_set_page_forbidden(struct page *page) 1049 { 1050 if (forbidden_pages_map) 1051 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page)); 1052 } 1053 1054 int swsusp_page_is_forbidden(struct page *page) 1055 { 1056 return forbidden_pages_map ? 1057 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0; 1058 } 1059 1060 static void swsusp_unset_page_forbidden(struct page *page) 1061 { 1062 if (forbidden_pages_map) 1063 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page)); 1064 } 1065 1066 /** 1067 * mark_nosave_pages - Mark pages that should not be saved. 1068 * @bm: Memory bitmap. 1069 * 1070 * Set the bits in @bm that correspond to the page frames the contents of which 1071 * should not be saved. 1072 */ 1073 static void mark_nosave_pages(struct memory_bitmap *bm) 1074 { 1075 struct nosave_region *region; 1076 1077 if (list_empty(&nosave_regions)) 1078 return; 1079 1080 list_for_each_entry(region, &nosave_regions, list) { 1081 unsigned long pfn; 1082 1083 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n", 1084 (unsigned long long) region->start_pfn << PAGE_SHIFT, 1085 ((unsigned long long) region->end_pfn << PAGE_SHIFT) 1086 - 1); 1087 1088 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) 1089 if (pfn_valid(pfn)) { 1090 /* 1091 * It is safe to ignore the result of 1092 * mem_bm_set_bit_check() here, since we won't 1093 * touch the PFNs for which the error is 1094 * returned anyway. 1095 */ 1096 mem_bm_set_bit_check(bm, pfn); 1097 } 1098 } 1099 } 1100 1101 /** 1102 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information. 1103 * 1104 * Create bitmaps needed for marking page frames that should not be saved and 1105 * free page frames. The forbidden_pages_map and free_pages_map pointers are 1106 * only modified if everything goes well, because we don't want the bits to be 1107 * touched before both bitmaps are set up. 1108 */ 1109 int create_basic_memory_bitmaps(void) 1110 { 1111 struct memory_bitmap *bm1, *bm2; 1112 int error = 0; 1113 1114 if (forbidden_pages_map && free_pages_map) 1115 return 0; 1116 else 1117 BUG_ON(forbidden_pages_map || free_pages_map); 1118 1119 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 1120 if (!bm1) 1121 return -ENOMEM; 1122 1123 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY); 1124 if (error) 1125 goto Free_first_object; 1126 1127 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 1128 if (!bm2) 1129 goto Free_first_bitmap; 1130 1131 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY); 1132 if (error) 1133 goto Free_second_object; 1134 1135 forbidden_pages_map = bm1; 1136 free_pages_map = bm2; 1137 mark_nosave_pages(forbidden_pages_map); 1138 1139 pr_debug("Basic memory bitmaps created\n"); 1140 1141 return 0; 1142 1143 Free_second_object: 1144 kfree(bm2); 1145 Free_first_bitmap: 1146 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 1147 Free_first_object: 1148 kfree(bm1); 1149 return -ENOMEM; 1150 } 1151 1152 /** 1153 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information. 1154 * 1155 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The 1156 * auxiliary pointers are necessary so that the bitmaps themselves are not 1157 * referred to while they are being freed. 1158 */ 1159 void free_basic_memory_bitmaps(void) 1160 { 1161 struct memory_bitmap *bm1, *bm2; 1162 1163 if (WARN_ON(!(forbidden_pages_map && free_pages_map))) 1164 return; 1165 1166 bm1 = forbidden_pages_map; 1167 bm2 = free_pages_map; 1168 forbidden_pages_map = NULL; 1169 free_pages_map = NULL; 1170 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 1171 kfree(bm1); 1172 memory_bm_free(bm2, PG_UNSAFE_CLEAR); 1173 kfree(bm2); 1174 1175 pr_debug("Basic memory bitmaps freed\n"); 1176 } 1177 1178 static void clear_or_poison_free_page(struct page *page) 1179 { 1180 if (page_poisoning_enabled_static()) 1181 __kernel_poison_pages(page, 1); 1182 else if (want_init_on_free()) 1183 clear_highpage(page); 1184 } 1185 1186 void clear_or_poison_free_pages(void) 1187 { 1188 struct memory_bitmap *bm = free_pages_map; 1189 unsigned long pfn; 1190 1191 if (WARN_ON(!(free_pages_map))) 1192 return; 1193 1194 if (page_poisoning_enabled() || want_init_on_free()) { 1195 memory_bm_position_reset(bm); 1196 pfn = memory_bm_next_pfn(bm); 1197 while (pfn != BM_END_OF_MAP) { 1198 if (pfn_valid(pfn)) 1199 clear_or_poison_free_page(pfn_to_page(pfn)); 1200 1201 pfn = memory_bm_next_pfn(bm); 1202 } 1203 memory_bm_position_reset(bm); 1204 pr_info("free pages cleared after restore\n"); 1205 } 1206 } 1207 1208 /** 1209 * snapshot_additional_pages - Estimate the number of extra pages needed. 1210 * @zone: Memory zone to carry out the computation for. 1211 * 1212 * Estimate the number of additional pages needed for setting up a hibernation 1213 * image data structures for @zone (usually, the returned value is greater than 1214 * the exact number). 1215 */ 1216 unsigned int snapshot_additional_pages(struct zone *zone) 1217 { 1218 unsigned int rtree, nodes; 1219 1220 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 1221 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node), 1222 LINKED_PAGE_DATA_SIZE); 1223 while (nodes > 1) { 1224 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL); 1225 rtree += nodes; 1226 } 1227 1228 return 2 * rtree; 1229 } 1230 1231 /* 1232 * Touch the watchdog for every WD_PAGE_COUNT pages. 1233 */ 1234 #define WD_PAGE_COUNT (128*1024) 1235 1236 static void mark_free_pages(struct zone *zone) 1237 { 1238 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; 1239 unsigned long flags; 1240 unsigned int order, t; 1241 struct page *page; 1242 1243 if (zone_is_empty(zone)) 1244 return; 1245 1246 spin_lock_irqsave(&zone->lock, flags); 1247 1248 max_zone_pfn = zone_end_pfn(zone); 1249 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1250 if (pfn_valid(pfn)) { 1251 page = pfn_to_page(pfn); 1252 1253 if (!--page_count) { 1254 touch_nmi_watchdog(); 1255 page_count = WD_PAGE_COUNT; 1256 } 1257 1258 if (page_zone(page) != zone) 1259 continue; 1260 1261 if (!swsusp_page_is_forbidden(page)) 1262 swsusp_unset_page_free(page); 1263 } 1264 1265 for_each_migratetype_order(order, t) { 1266 list_for_each_entry(page, 1267 &zone->free_area[order].free_list[t], buddy_list) { 1268 unsigned long i; 1269 1270 pfn = page_to_pfn(page); 1271 for (i = 0; i < (1UL << order); i++) { 1272 if (!--page_count) { 1273 touch_nmi_watchdog(); 1274 page_count = WD_PAGE_COUNT; 1275 } 1276 swsusp_set_page_free(pfn_to_page(pfn + i)); 1277 } 1278 } 1279 } 1280 spin_unlock_irqrestore(&zone->lock, flags); 1281 } 1282 1283 #ifdef CONFIG_HIGHMEM 1284 /** 1285 * count_free_highmem_pages - Compute the total number of free highmem pages. 1286 * 1287 * The returned number is system-wide. 1288 */ 1289 static unsigned int count_free_highmem_pages(void) 1290 { 1291 struct zone *zone; 1292 unsigned int cnt = 0; 1293 1294 for_each_populated_zone(zone) 1295 if (is_highmem(zone)) 1296 cnt += zone_page_state(zone, NR_FREE_PAGES); 1297 1298 return cnt; 1299 } 1300 1301 /** 1302 * saveable_highmem_page - Check if a highmem page is saveable. 1303 * 1304 * Determine whether a highmem page should be included in a hibernation image. 1305 * 1306 * We should save the page if it isn't Nosave or NosaveFree, or Reserved, 1307 * and it isn't part of a free chunk of pages. 1308 */ 1309 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) 1310 { 1311 struct page *page; 1312 1313 if (!pfn_valid(pfn)) 1314 return NULL; 1315 1316 page = pfn_to_online_page(pfn); 1317 if (!page || page_zone(page) != zone) 1318 return NULL; 1319 1320 BUG_ON(!PageHighMem(page)); 1321 1322 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) 1323 return NULL; 1324 1325 if (PageReserved(page) || PageOffline(page)) 1326 return NULL; 1327 1328 if (page_is_guard(page)) 1329 return NULL; 1330 1331 return page; 1332 } 1333 1334 /** 1335 * count_highmem_pages - Compute the total number of saveable highmem pages. 1336 */ 1337 static unsigned int count_highmem_pages(void) 1338 { 1339 struct zone *zone; 1340 unsigned int n = 0; 1341 1342 for_each_populated_zone(zone) { 1343 unsigned long pfn, max_zone_pfn; 1344 1345 if (!is_highmem(zone)) 1346 continue; 1347 1348 mark_free_pages(zone); 1349 max_zone_pfn = zone_end_pfn(zone); 1350 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1351 if (saveable_highmem_page(zone, pfn)) 1352 n++; 1353 } 1354 return n; 1355 } 1356 #else 1357 static inline void *saveable_highmem_page(struct zone *z, unsigned long p) 1358 { 1359 return NULL; 1360 } 1361 #endif /* CONFIG_HIGHMEM */ 1362 1363 /** 1364 * saveable_page - Check if the given page is saveable. 1365 * 1366 * Determine whether a non-highmem page should be included in a hibernation 1367 * image. 1368 * 1369 * We should save the page if it isn't Nosave, and is not in the range 1370 * of pages statically defined as 'unsaveable', and it isn't part of 1371 * a free chunk of pages. 1372 */ 1373 static struct page *saveable_page(struct zone *zone, unsigned long pfn) 1374 { 1375 struct page *page; 1376 1377 if (!pfn_valid(pfn)) 1378 return NULL; 1379 1380 page = pfn_to_online_page(pfn); 1381 if (!page || page_zone(page) != zone) 1382 return NULL; 1383 1384 BUG_ON(PageHighMem(page)); 1385 1386 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) 1387 return NULL; 1388 1389 if (PageOffline(page)) 1390 return NULL; 1391 1392 if (PageReserved(page) 1393 && (!kernel_page_present(page) || pfn_is_nosave(pfn))) 1394 return NULL; 1395 1396 if (page_is_guard(page)) 1397 return NULL; 1398 1399 return page; 1400 } 1401 1402 /** 1403 * count_data_pages - Compute the total number of saveable non-highmem pages. 1404 */ 1405 static unsigned int count_data_pages(void) 1406 { 1407 struct zone *zone; 1408 unsigned long pfn, max_zone_pfn; 1409 unsigned int n = 0; 1410 1411 for_each_populated_zone(zone) { 1412 if (is_highmem(zone)) 1413 continue; 1414 1415 mark_free_pages(zone); 1416 max_zone_pfn = zone_end_pfn(zone); 1417 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1418 if (saveable_page(zone, pfn)) 1419 n++; 1420 } 1421 return n; 1422 } 1423 1424 /* 1425 * This is needed, because copy_page and memcpy are not usable for copying 1426 * task structs. 1427 */ 1428 static inline void do_copy_page(long *dst, long *src) 1429 { 1430 int n; 1431 1432 for (n = PAGE_SIZE / sizeof(long); n; n--) 1433 *dst++ = *src++; 1434 } 1435 1436 /** 1437 * safe_copy_page - Copy a page in a safe way. 1438 * 1439 * Check if the page we are going to copy is marked as present in the kernel 1440 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or 1441 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present() 1442 * always returns 'true'. 1443 */ 1444 static void safe_copy_page(void *dst, struct page *s_page) 1445 { 1446 if (kernel_page_present(s_page)) { 1447 do_copy_page(dst, page_address(s_page)); 1448 } else { 1449 hibernate_map_page(s_page); 1450 do_copy_page(dst, page_address(s_page)); 1451 hibernate_unmap_page(s_page); 1452 } 1453 } 1454 1455 #ifdef CONFIG_HIGHMEM 1456 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn) 1457 { 1458 return is_highmem(zone) ? 1459 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); 1460 } 1461 1462 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1463 { 1464 struct page *s_page, *d_page; 1465 void *src, *dst; 1466 1467 s_page = pfn_to_page(src_pfn); 1468 d_page = pfn_to_page(dst_pfn); 1469 if (PageHighMem(s_page)) { 1470 src = kmap_atomic(s_page); 1471 dst = kmap_atomic(d_page); 1472 do_copy_page(dst, src); 1473 kunmap_atomic(dst); 1474 kunmap_atomic(src); 1475 } else { 1476 if (PageHighMem(d_page)) { 1477 /* 1478 * The page pointed to by src may contain some kernel 1479 * data modified by kmap_atomic() 1480 */ 1481 safe_copy_page(buffer, s_page); 1482 dst = kmap_atomic(d_page); 1483 copy_page(dst, buffer); 1484 kunmap_atomic(dst); 1485 } else { 1486 safe_copy_page(page_address(d_page), s_page); 1487 } 1488 } 1489 } 1490 #else 1491 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn) 1492 1493 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1494 { 1495 safe_copy_page(page_address(pfn_to_page(dst_pfn)), 1496 pfn_to_page(src_pfn)); 1497 } 1498 #endif /* CONFIG_HIGHMEM */ 1499 1500 static void copy_data_pages(struct memory_bitmap *copy_bm, 1501 struct memory_bitmap *orig_bm) 1502 { 1503 struct zone *zone; 1504 unsigned long pfn; 1505 1506 for_each_populated_zone(zone) { 1507 unsigned long max_zone_pfn; 1508 1509 mark_free_pages(zone); 1510 max_zone_pfn = zone_end_pfn(zone); 1511 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1512 if (page_is_saveable(zone, pfn)) 1513 memory_bm_set_bit(orig_bm, pfn); 1514 } 1515 memory_bm_position_reset(orig_bm); 1516 memory_bm_position_reset(copy_bm); 1517 for(;;) { 1518 pfn = memory_bm_next_pfn(orig_bm); 1519 if (unlikely(pfn == BM_END_OF_MAP)) 1520 break; 1521 copy_data_page(memory_bm_next_pfn(copy_bm), pfn); 1522 } 1523 } 1524 1525 /* Total number of image pages */ 1526 static unsigned int nr_copy_pages; 1527 /* Number of pages needed for saving the original pfns of the image pages */ 1528 static unsigned int nr_meta_pages; 1529 /* 1530 * Numbers of normal and highmem page frames allocated for hibernation image 1531 * before suspending devices. 1532 */ 1533 static unsigned int alloc_normal, alloc_highmem; 1534 /* 1535 * Memory bitmap used for marking saveable pages (during hibernation) or 1536 * hibernation image pages (during restore) 1537 */ 1538 static struct memory_bitmap orig_bm; 1539 /* 1540 * Memory bitmap used during hibernation for marking allocated page frames that 1541 * will contain copies of saveable pages. During restore it is initially used 1542 * for marking hibernation image pages, but then the set bits from it are 1543 * duplicated in @orig_bm and it is released. On highmem systems it is next 1544 * used for marking "safe" highmem pages, but it has to be reinitialized for 1545 * this purpose. 1546 */ 1547 static struct memory_bitmap copy_bm; 1548 1549 /** 1550 * swsusp_free - Free pages allocated for hibernation image. 1551 * 1552 * Image pages are allocated before snapshot creation, so they need to be 1553 * released after resume. 1554 */ 1555 void swsusp_free(void) 1556 { 1557 unsigned long fb_pfn, fr_pfn; 1558 1559 if (!forbidden_pages_map || !free_pages_map) 1560 goto out; 1561 1562 memory_bm_position_reset(forbidden_pages_map); 1563 memory_bm_position_reset(free_pages_map); 1564 1565 loop: 1566 fr_pfn = memory_bm_next_pfn(free_pages_map); 1567 fb_pfn = memory_bm_next_pfn(forbidden_pages_map); 1568 1569 /* 1570 * Find the next bit set in both bitmaps. This is guaranteed to 1571 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP. 1572 */ 1573 do { 1574 if (fb_pfn < fr_pfn) 1575 fb_pfn = memory_bm_next_pfn(forbidden_pages_map); 1576 if (fr_pfn < fb_pfn) 1577 fr_pfn = memory_bm_next_pfn(free_pages_map); 1578 } while (fb_pfn != fr_pfn); 1579 1580 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) { 1581 struct page *page = pfn_to_page(fr_pfn); 1582 1583 memory_bm_clear_current(forbidden_pages_map); 1584 memory_bm_clear_current(free_pages_map); 1585 hibernate_restore_unprotect_page(page_address(page)); 1586 __free_page(page); 1587 goto loop; 1588 } 1589 1590 out: 1591 nr_copy_pages = 0; 1592 nr_meta_pages = 0; 1593 restore_pblist = NULL; 1594 buffer = NULL; 1595 alloc_normal = 0; 1596 alloc_highmem = 0; 1597 hibernate_restore_protection_end(); 1598 } 1599 1600 /* Helper functions used for the shrinking of memory. */ 1601 1602 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN) 1603 1604 /** 1605 * preallocate_image_pages - Allocate a number of pages for hibernation image. 1606 * @nr_pages: Number of page frames to allocate. 1607 * @mask: GFP flags to use for the allocation. 1608 * 1609 * Return value: Number of page frames actually allocated 1610 */ 1611 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) 1612 { 1613 unsigned long nr_alloc = 0; 1614 1615 while (nr_pages > 0) { 1616 struct page *page; 1617 1618 page = alloc_image_page(mask); 1619 if (!page) 1620 break; 1621 memory_bm_set_bit(©_bm, page_to_pfn(page)); 1622 if (PageHighMem(page)) 1623 alloc_highmem++; 1624 else 1625 alloc_normal++; 1626 nr_pages--; 1627 nr_alloc++; 1628 } 1629 1630 return nr_alloc; 1631 } 1632 1633 static unsigned long preallocate_image_memory(unsigned long nr_pages, 1634 unsigned long avail_normal) 1635 { 1636 unsigned long alloc; 1637 1638 if (avail_normal <= alloc_normal) 1639 return 0; 1640 1641 alloc = avail_normal - alloc_normal; 1642 if (nr_pages < alloc) 1643 alloc = nr_pages; 1644 1645 return preallocate_image_pages(alloc, GFP_IMAGE); 1646 } 1647 1648 #ifdef CONFIG_HIGHMEM 1649 static unsigned long preallocate_image_highmem(unsigned long nr_pages) 1650 { 1651 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); 1652 } 1653 1654 /** 1655 * __fraction - Compute (an approximation of) x * (multiplier / base). 1656 */ 1657 static unsigned long __fraction(u64 x, u64 multiplier, u64 base) 1658 { 1659 return div64_u64(x * multiplier, base); 1660 } 1661 1662 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1663 unsigned long highmem, 1664 unsigned long total) 1665 { 1666 unsigned long alloc = __fraction(nr_pages, highmem, total); 1667 1668 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM); 1669 } 1670 #else /* CONFIG_HIGHMEM */ 1671 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages) 1672 { 1673 return 0; 1674 } 1675 1676 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1677 unsigned long highmem, 1678 unsigned long total) 1679 { 1680 return 0; 1681 } 1682 #endif /* CONFIG_HIGHMEM */ 1683 1684 /** 1685 * free_unnecessary_pages - Release preallocated pages not needed for the image. 1686 */ 1687 static unsigned long free_unnecessary_pages(void) 1688 { 1689 unsigned long save, to_free_normal, to_free_highmem, free; 1690 1691 save = count_data_pages(); 1692 if (alloc_normal >= save) { 1693 to_free_normal = alloc_normal - save; 1694 save = 0; 1695 } else { 1696 to_free_normal = 0; 1697 save -= alloc_normal; 1698 } 1699 save += count_highmem_pages(); 1700 if (alloc_highmem >= save) { 1701 to_free_highmem = alloc_highmem - save; 1702 } else { 1703 to_free_highmem = 0; 1704 save -= alloc_highmem; 1705 if (to_free_normal > save) 1706 to_free_normal -= save; 1707 else 1708 to_free_normal = 0; 1709 } 1710 free = to_free_normal + to_free_highmem; 1711 1712 memory_bm_position_reset(©_bm); 1713 1714 while (to_free_normal > 0 || to_free_highmem > 0) { 1715 unsigned long pfn = memory_bm_next_pfn(©_bm); 1716 struct page *page = pfn_to_page(pfn); 1717 1718 if (PageHighMem(page)) { 1719 if (!to_free_highmem) 1720 continue; 1721 to_free_highmem--; 1722 alloc_highmem--; 1723 } else { 1724 if (!to_free_normal) 1725 continue; 1726 to_free_normal--; 1727 alloc_normal--; 1728 } 1729 memory_bm_clear_bit(©_bm, pfn); 1730 swsusp_unset_page_forbidden(page); 1731 swsusp_unset_page_free(page); 1732 __free_page(page); 1733 } 1734 1735 return free; 1736 } 1737 1738 /** 1739 * minimum_image_size - Estimate the minimum acceptable size of an image. 1740 * @saveable: Number of saveable pages in the system. 1741 * 1742 * We want to avoid attempting to free too much memory too hard, so estimate the 1743 * minimum acceptable size of a hibernation image to use as the lower limit for 1744 * preallocating memory. 1745 * 1746 * We assume that the minimum image size should be proportional to 1747 * 1748 * [number of saveable pages] - [number of pages that can be freed in theory] 1749 * 1750 * where the second term is the sum of (1) reclaimable slab pages, (2) active 1751 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages. 1752 */ 1753 static unsigned long minimum_image_size(unsigned long saveable) 1754 { 1755 unsigned long size; 1756 1757 size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) 1758 + global_node_page_state(NR_ACTIVE_ANON) 1759 + global_node_page_state(NR_INACTIVE_ANON) 1760 + global_node_page_state(NR_ACTIVE_FILE) 1761 + global_node_page_state(NR_INACTIVE_FILE); 1762 1763 return saveable <= size ? 0 : saveable - size; 1764 } 1765 1766 /** 1767 * hibernate_preallocate_memory - Preallocate memory for hibernation image. 1768 * 1769 * To create a hibernation image it is necessary to make a copy of every page 1770 * frame in use. We also need a number of page frames to be free during 1771 * hibernation for allocations made while saving the image and for device 1772 * drivers, in case they need to allocate memory from their hibernation 1773 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough 1774 * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through 1775 * /sys/power/reserved_size, respectively). To make this happen, we compute the 1776 * total number of available page frames and allocate at least 1777 * 1778 * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2 1779 * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) 1780 * 1781 * of them, which corresponds to the maximum size of a hibernation image. 1782 * 1783 * If image_size is set below the number following from the above formula, 1784 * the preallocation of memory is continued until the total number of saveable 1785 * pages in the system is below the requested image size or the minimum 1786 * acceptable image size returned by minimum_image_size(), whichever is greater. 1787 */ 1788 int hibernate_preallocate_memory(void) 1789 { 1790 struct zone *zone; 1791 unsigned long saveable, size, max_size, count, highmem, pages = 0; 1792 unsigned long alloc, save_highmem, pages_highmem, avail_normal; 1793 ktime_t start, stop; 1794 int error; 1795 1796 pr_info("Preallocating image memory\n"); 1797 start = ktime_get(); 1798 1799 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY); 1800 if (error) { 1801 pr_err("Cannot allocate original bitmap\n"); 1802 goto err_out; 1803 } 1804 1805 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY); 1806 if (error) { 1807 pr_err("Cannot allocate copy bitmap\n"); 1808 goto err_out; 1809 } 1810 1811 alloc_normal = 0; 1812 alloc_highmem = 0; 1813 1814 /* Count the number of saveable data pages. */ 1815 save_highmem = count_highmem_pages(); 1816 saveable = count_data_pages(); 1817 1818 /* 1819 * Compute the total number of page frames we can use (count) and the 1820 * number of pages needed for image metadata (size). 1821 */ 1822 count = saveable; 1823 saveable += save_highmem; 1824 highmem = save_highmem; 1825 size = 0; 1826 for_each_populated_zone(zone) { 1827 size += snapshot_additional_pages(zone); 1828 if (is_highmem(zone)) 1829 highmem += zone_page_state(zone, NR_FREE_PAGES); 1830 else 1831 count += zone_page_state(zone, NR_FREE_PAGES); 1832 } 1833 avail_normal = count; 1834 count += highmem; 1835 count -= totalreserve_pages; 1836 1837 /* Compute the maximum number of saveable pages to leave in memory. */ 1838 max_size = (count - (size + PAGES_FOR_IO)) / 2 1839 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); 1840 /* Compute the desired number of image pages specified by image_size. */ 1841 size = DIV_ROUND_UP(image_size, PAGE_SIZE); 1842 if (size > max_size) 1843 size = max_size; 1844 /* 1845 * If the desired number of image pages is at least as large as the 1846 * current number of saveable pages in memory, allocate page frames for 1847 * the image and we're done. 1848 */ 1849 if (size >= saveable) { 1850 pages = preallocate_image_highmem(save_highmem); 1851 pages += preallocate_image_memory(saveable - pages, avail_normal); 1852 goto out; 1853 } 1854 1855 /* Estimate the minimum size of the image. */ 1856 pages = minimum_image_size(saveable); 1857 /* 1858 * To avoid excessive pressure on the normal zone, leave room in it to 1859 * accommodate an image of the minimum size (unless it's already too 1860 * small, in which case don't preallocate pages from it at all). 1861 */ 1862 if (avail_normal > pages) 1863 avail_normal -= pages; 1864 else 1865 avail_normal = 0; 1866 if (size < pages) 1867 size = min_t(unsigned long, pages, max_size); 1868 1869 /* 1870 * Let the memory management subsystem know that we're going to need a 1871 * large number of page frames to allocate and make it free some memory. 1872 * NOTE: If this is not done, performance will be hurt badly in some 1873 * test cases. 1874 */ 1875 shrink_all_memory(saveable - size); 1876 1877 /* 1878 * The number of saveable pages in memory was too high, so apply some 1879 * pressure to decrease it. First, make room for the largest possible 1880 * image and fail if that doesn't work. Next, try to decrease the size 1881 * of the image as much as indicated by 'size' using allocations from 1882 * highmem and non-highmem zones separately. 1883 */ 1884 pages_highmem = preallocate_image_highmem(highmem / 2); 1885 alloc = count - max_size; 1886 if (alloc > pages_highmem) 1887 alloc -= pages_highmem; 1888 else 1889 alloc = 0; 1890 pages = preallocate_image_memory(alloc, avail_normal); 1891 if (pages < alloc) { 1892 /* We have exhausted non-highmem pages, try highmem. */ 1893 alloc -= pages; 1894 pages += pages_highmem; 1895 pages_highmem = preallocate_image_highmem(alloc); 1896 if (pages_highmem < alloc) { 1897 pr_err("Image allocation is %lu pages short\n", 1898 alloc - pages_highmem); 1899 goto err_out; 1900 } 1901 pages += pages_highmem; 1902 /* 1903 * size is the desired number of saveable pages to leave in 1904 * memory, so try to preallocate (all memory - size) pages. 1905 */ 1906 alloc = (count - pages) - size; 1907 pages += preallocate_image_highmem(alloc); 1908 } else { 1909 /* 1910 * There are approximately max_size saveable pages at this point 1911 * and we want to reduce this number down to size. 1912 */ 1913 alloc = max_size - size; 1914 size = preallocate_highmem_fraction(alloc, highmem, count); 1915 pages_highmem += size; 1916 alloc -= size; 1917 size = preallocate_image_memory(alloc, avail_normal); 1918 pages_highmem += preallocate_image_highmem(alloc - size); 1919 pages += pages_highmem + size; 1920 } 1921 1922 /* 1923 * We only need as many page frames for the image as there are saveable 1924 * pages in memory, but we have allocated more. Release the excessive 1925 * ones now. 1926 */ 1927 pages -= free_unnecessary_pages(); 1928 1929 out: 1930 stop = ktime_get(); 1931 pr_info("Allocated %lu pages for snapshot\n", pages); 1932 swsusp_show_speed(start, stop, pages, "Allocated"); 1933 1934 return 0; 1935 1936 err_out: 1937 swsusp_free(); 1938 return -ENOMEM; 1939 } 1940 1941 #ifdef CONFIG_HIGHMEM 1942 /** 1943 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem. 1944 * 1945 * Compute the number of non-highmem pages that will be necessary for creating 1946 * copies of highmem pages. 1947 */ 1948 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) 1949 { 1950 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem; 1951 1952 if (free_highmem >= nr_highmem) 1953 nr_highmem = 0; 1954 else 1955 nr_highmem -= free_highmem; 1956 1957 return nr_highmem; 1958 } 1959 #else 1960 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; } 1961 #endif /* CONFIG_HIGHMEM */ 1962 1963 /** 1964 * enough_free_mem - Check if there is enough free memory for the image. 1965 */ 1966 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) 1967 { 1968 struct zone *zone; 1969 unsigned int free = alloc_normal; 1970 1971 for_each_populated_zone(zone) 1972 if (!is_highmem(zone)) 1973 free += zone_page_state(zone, NR_FREE_PAGES); 1974 1975 nr_pages += count_pages_for_highmem(nr_highmem); 1976 pr_debug("Normal pages needed: %u + %u, available pages: %u\n", 1977 nr_pages, PAGES_FOR_IO, free); 1978 1979 return free > nr_pages + PAGES_FOR_IO; 1980 } 1981 1982 #ifdef CONFIG_HIGHMEM 1983 /** 1984 * get_highmem_buffer - Allocate a buffer for highmem pages. 1985 * 1986 * If there are some highmem pages in the hibernation image, we may need a 1987 * buffer to copy them and/or load their data. 1988 */ 1989 static inline int get_highmem_buffer(int safe_needed) 1990 { 1991 buffer = get_image_page(GFP_ATOMIC, safe_needed); 1992 return buffer ? 0 : -ENOMEM; 1993 } 1994 1995 /** 1996 * alloc_highmem_pages - Allocate some highmem pages for the image. 1997 * 1998 * Try to allocate as many pages as needed, but if the number of free highmem 1999 * pages is less than that, allocate them all. 2000 */ 2001 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 2002 unsigned int nr_highmem) 2003 { 2004 unsigned int to_alloc = count_free_highmem_pages(); 2005 2006 if (to_alloc > nr_highmem) 2007 to_alloc = nr_highmem; 2008 2009 nr_highmem -= to_alloc; 2010 while (to_alloc-- > 0) { 2011 struct page *page; 2012 2013 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM); 2014 memory_bm_set_bit(bm, page_to_pfn(page)); 2015 } 2016 return nr_highmem; 2017 } 2018 #else 2019 static inline int get_highmem_buffer(int safe_needed) { return 0; } 2020 2021 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 2022 unsigned int n) { return 0; } 2023 #endif /* CONFIG_HIGHMEM */ 2024 2025 /** 2026 * swsusp_alloc - Allocate memory for hibernation image. 2027 * 2028 * We first try to allocate as many highmem pages as there are 2029 * saveable highmem pages in the system. If that fails, we allocate 2030 * non-highmem pages for the copies of the remaining highmem ones. 2031 * 2032 * In this approach it is likely that the copies of highmem pages will 2033 * also be located in the high memory, because of the way in which 2034 * copy_data_pages() works. 2035 */ 2036 static int swsusp_alloc(struct memory_bitmap *copy_bm, 2037 unsigned int nr_pages, unsigned int nr_highmem) 2038 { 2039 if (nr_highmem > 0) { 2040 if (get_highmem_buffer(PG_ANY)) 2041 goto err_out; 2042 if (nr_highmem > alloc_highmem) { 2043 nr_highmem -= alloc_highmem; 2044 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem); 2045 } 2046 } 2047 if (nr_pages > alloc_normal) { 2048 nr_pages -= alloc_normal; 2049 while (nr_pages-- > 0) { 2050 struct page *page; 2051 2052 page = alloc_image_page(GFP_ATOMIC); 2053 if (!page) 2054 goto err_out; 2055 memory_bm_set_bit(copy_bm, page_to_pfn(page)); 2056 } 2057 } 2058 2059 return 0; 2060 2061 err_out: 2062 swsusp_free(); 2063 return -ENOMEM; 2064 } 2065 2066 asmlinkage __visible int swsusp_save(void) 2067 { 2068 unsigned int nr_pages, nr_highmem; 2069 2070 pr_info("Creating image:\n"); 2071 2072 drain_local_pages(NULL); 2073 nr_pages = count_data_pages(); 2074 nr_highmem = count_highmem_pages(); 2075 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem); 2076 2077 if (!enough_free_mem(nr_pages, nr_highmem)) { 2078 pr_err("Not enough free memory\n"); 2079 return -ENOMEM; 2080 } 2081 2082 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) { 2083 pr_err("Memory allocation failed\n"); 2084 return -ENOMEM; 2085 } 2086 2087 /* 2088 * During allocating of suspend pagedir, new cold pages may appear. 2089 * Kill them. 2090 */ 2091 drain_local_pages(NULL); 2092 copy_data_pages(©_bm, &orig_bm); 2093 2094 /* 2095 * End of critical section. From now on, we can write to memory, 2096 * but we should not touch disk. This specially means we must _not_ 2097 * touch swap space! Except we must write out our image of course. 2098 */ 2099 2100 nr_pages += nr_highmem; 2101 nr_copy_pages = nr_pages; 2102 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); 2103 2104 pr_info("Image created (%d pages copied)\n", nr_pages); 2105 2106 return 0; 2107 } 2108 2109 #ifndef CONFIG_ARCH_HIBERNATION_HEADER 2110 static int init_header_complete(struct swsusp_info *info) 2111 { 2112 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); 2113 info->version_code = LINUX_VERSION_CODE; 2114 return 0; 2115 } 2116 2117 static const char *check_image_kernel(struct swsusp_info *info) 2118 { 2119 if (info->version_code != LINUX_VERSION_CODE) 2120 return "kernel version"; 2121 if (strcmp(info->uts.sysname,init_utsname()->sysname)) 2122 return "system type"; 2123 if (strcmp(info->uts.release,init_utsname()->release)) 2124 return "kernel release"; 2125 if (strcmp(info->uts.version,init_utsname()->version)) 2126 return "version"; 2127 if (strcmp(info->uts.machine,init_utsname()->machine)) 2128 return "machine"; 2129 return NULL; 2130 } 2131 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 2132 2133 unsigned long snapshot_get_image_size(void) 2134 { 2135 return nr_copy_pages + nr_meta_pages + 1; 2136 } 2137 2138 static int init_header(struct swsusp_info *info) 2139 { 2140 memset(info, 0, sizeof(struct swsusp_info)); 2141 info->num_physpages = get_num_physpages(); 2142 info->image_pages = nr_copy_pages; 2143 info->pages = snapshot_get_image_size(); 2144 info->size = info->pages; 2145 info->size <<= PAGE_SHIFT; 2146 return init_header_complete(info); 2147 } 2148 2149 /** 2150 * pack_pfns - Prepare PFNs for saving. 2151 * @bm: Memory bitmap. 2152 * @buf: Memory buffer to store the PFNs in. 2153 * 2154 * PFNs corresponding to set bits in @bm are stored in the area of memory 2155 * pointed to by @buf (1 page at a time). 2156 */ 2157 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm) 2158 { 2159 int j; 2160 2161 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2162 buf[j] = memory_bm_next_pfn(bm); 2163 if (unlikely(buf[j] == BM_END_OF_MAP)) 2164 break; 2165 } 2166 } 2167 2168 /** 2169 * snapshot_read_next - Get the address to read the next image page from. 2170 * @handle: Snapshot handle to be used for the reading. 2171 * 2172 * On the first call, @handle should point to a zeroed snapshot_handle 2173 * structure. The structure gets populated then and a pointer to it should be 2174 * passed to this function every next time. 2175 * 2176 * On success, the function returns a positive number. Then, the caller 2177 * is allowed to read up to the returned number of bytes from the memory 2178 * location computed by the data_of() macro. 2179 * 2180 * The function returns 0 to indicate the end of the data stream condition, 2181 * and negative numbers are returned on errors. If that happens, the structure 2182 * pointed to by @handle is not updated and should not be used any more. 2183 */ 2184 int snapshot_read_next(struct snapshot_handle *handle) 2185 { 2186 if (handle->cur > nr_meta_pages + nr_copy_pages) 2187 return 0; 2188 2189 if (!buffer) { 2190 /* This makes the buffer be freed by swsusp_free() */ 2191 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2192 if (!buffer) 2193 return -ENOMEM; 2194 } 2195 if (!handle->cur) { 2196 int error; 2197 2198 error = init_header((struct swsusp_info *)buffer); 2199 if (error) 2200 return error; 2201 handle->buffer = buffer; 2202 memory_bm_position_reset(&orig_bm); 2203 memory_bm_position_reset(©_bm); 2204 } else if (handle->cur <= nr_meta_pages) { 2205 clear_page(buffer); 2206 pack_pfns(buffer, &orig_bm); 2207 } else { 2208 struct page *page; 2209 2210 page = pfn_to_page(memory_bm_next_pfn(©_bm)); 2211 if (PageHighMem(page)) { 2212 /* 2213 * Highmem pages are copied to the buffer, 2214 * because we can't return with a kmapped 2215 * highmem page (we may not be called again). 2216 */ 2217 void *kaddr; 2218 2219 kaddr = kmap_atomic(page); 2220 copy_page(buffer, kaddr); 2221 kunmap_atomic(kaddr); 2222 handle->buffer = buffer; 2223 } else { 2224 handle->buffer = page_address(page); 2225 } 2226 } 2227 handle->cur++; 2228 return PAGE_SIZE; 2229 } 2230 2231 static void duplicate_memory_bitmap(struct memory_bitmap *dst, 2232 struct memory_bitmap *src) 2233 { 2234 unsigned long pfn; 2235 2236 memory_bm_position_reset(src); 2237 pfn = memory_bm_next_pfn(src); 2238 while (pfn != BM_END_OF_MAP) { 2239 memory_bm_set_bit(dst, pfn); 2240 pfn = memory_bm_next_pfn(src); 2241 } 2242 } 2243 2244 /** 2245 * mark_unsafe_pages - Mark pages that were used before hibernation. 2246 * 2247 * Mark the pages that cannot be used for storing the image during restoration, 2248 * because they conflict with the pages that had been used before hibernation. 2249 */ 2250 static void mark_unsafe_pages(struct memory_bitmap *bm) 2251 { 2252 unsigned long pfn; 2253 2254 /* Clear the "free"/"unsafe" bit for all PFNs */ 2255 memory_bm_position_reset(free_pages_map); 2256 pfn = memory_bm_next_pfn(free_pages_map); 2257 while (pfn != BM_END_OF_MAP) { 2258 memory_bm_clear_current(free_pages_map); 2259 pfn = memory_bm_next_pfn(free_pages_map); 2260 } 2261 2262 /* Mark pages that correspond to the "original" PFNs as "unsafe" */ 2263 duplicate_memory_bitmap(free_pages_map, bm); 2264 2265 allocated_unsafe_pages = 0; 2266 } 2267 2268 static int check_header(struct swsusp_info *info) 2269 { 2270 const char *reason; 2271 2272 reason = check_image_kernel(info); 2273 if (!reason && info->num_physpages != get_num_physpages()) 2274 reason = "memory size"; 2275 if (reason) { 2276 pr_err("Image mismatch: %s\n", reason); 2277 return -EPERM; 2278 } 2279 return 0; 2280 } 2281 2282 /** 2283 * load_header - Check the image header and copy the data from it. 2284 */ 2285 static int load_header(struct swsusp_info *info) 2286 { 2287 int error; 2288 2289 restore_pblist = NULL; 2290 error = check_header(info); 2291 if (!error) { 2292 nr_copy_pages = info->image_pages; 2293 nr_meta_pages = info->pages - info->image_pages - 1; 2294 } 2295 return error; 2296 } 2297 2298 /** 2299 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap. 2300 * @bm: Memory bitmap. 2301 * @buf: Area of memory containing the PFNs. 2302 * 2303 * For each element of the array pointed to by @buf (1 page at a time), set the 2304 * corresponding bit in @bm. 2305 */ 2306 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) 2307 { 2308 int j; 2309 2310 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2311 if (unlikely(buf[j] == BM_END_OF_MAP)) 2312 break; 2313 2314 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) { 2315 memory_bm_set_bit(bm, buf[j]); 2316 } else { 2317 if (!pfn_valid(buf[j])) 2318 pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n", 2319 (unsigned long long)PFN_PHYS(buf[j])); 2320 return -EFAULT; 2321 } 2322 } 2323 2324 return 0; 2325 } 2326 2327 #ifdef CONFIG_HIGHMEM 2328 /* 2329 * struct highmem_pbe is used for creating the list of highmem pages that 2330 * should be restored atomically during the resume from disk, because the page 2331 * frames they have occupied before the suspend are in use. 2332 */ 2333 struct highmem_pbe { 2334 struct page *copy_page; /* data is here now */ 2335 struct page *orig_page; /* data was here before the suspend */ 2336 struct highmem_pbe *next; 2337 }; 2338 2339 /* 2340 * List of highmem PBEs needed for restoring the highmem pages that were 2341 * allocated before the suspend and included in the suspend image, but have 2342 * also been allocated by the "resume" kernel, so their contents cannot be 2343 * written directly to their "original" page frames. 2344 */ 2345 static struct highmem_pbe *highmem_pblist; 2346 2347 /** 2348 * count_highmem_image_pages - Compute the number of highmem pages in the image. 2349 * @bm: Memory bitmap. 2350 * 2351 * The bits in @bm that correspond to image pages are assumed to be set. 2352 */ 2353 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) 2354 { 2355 unsigned long pfn; 2356 unsigned int cnt = 0; 2357 2358 memory_bm_position_reset(bm); 2359 pfn = memory_bm_next_pfn(bm); 2360 while (pfn != BM_END_OF_MAP) { 2361 if (PageHighMem(pfn_to_page(pfn))) 2362 cnt++; 2363 2364 pfn = memory_bm_next_pfn(bm); 2365 } 2366 return cnt; 2367 } 2368 2369 static unsigned int safe_highmem_pages; 2370 2371 static struct memory_bitmap *safe_highmem_bm; 2372 2373 /** 2374 * prepare_highmem_image - Allocate memory for loading highmem data from image. 2375 * @bm: Pointer to an uninitialized memory bitmap structure. 2376 * @nr_highmem_p: Pointer to the number of highmem image pages. 2377 * 2378 * Try to allocate as many highmem pages as there are highmem image pages 2379 * (@nr_highmem_p points to the variable containing the number of highmem image 2380 * pages). The pages that are "safe" (ie. will not be overwritten when the 2381 * hibernation image is restored entirely) have the corresponding bits set in 2382 * @bm (it must be uninitialized). 2383 * 2384 * NOTE: This function should not be called if there are no highmem image pages. 2385 */ 2386 static int prepare_highmem_image(struct memory_bitmap *bm, 2387 unsigned int *nr_highmem_p) 2388 { 2389 unsigned int to_alloc; 2390 2391 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE)) 2392 return -ENOMEM; 2393 2394 if (get_highmem_buffer(PG_SAFE)) 2395 return -ENOMEM; 2396 2397 to_alloc = count_free_highmem_pages(); 2398 if (to_alloc > *nr_highmem_p) 2399 to_alloc = *nr_highmem_p; 2400 else 2401 *nr_highmem_p = to_alloc; 2402 2403 safe_highmem_pages = 0; 2404 while (to_alloc-- > 0) { 2405 struct page *page; 2406 2407 page = alloc_page(__GFP_HIGHMEM); 2408 if (!swsusp_page_is_free(page)) { 2409 /* The page is "safe", set its bit the bitmap */ 2410 memory_bm_set_bit(bm, page_to_pfn(page)); 2411 safe_highmem_pages++; 2412 } 2413 /* Mark the page as allocated */ 2414 swsusp_set_page_forbidden(page); 2415 swsusp_set_page_free(page); 2416 } 2417 memory_bm_position_reset(bm); 2418 safe_highmem_bm = bm; 2419 return 0; 2420 } 2421 2422 static struct page *last_highmem_page; 2423 2424 /** 2425 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page. 2426 * 2427 * For a given highmem image page get a buffer that suspend_write_next() should 2428 * return to its caller to write to. 2429 * 2430 * If the page is to be saved to its "original" page frame or a copy of 2431 * the page is to be made in the highmem, @buffer is returned. Otherwise, 2432 * the copy of the page is to be made in normal memory, so the address of 2433 * the copy is returned. 2434 * 2435 * If @buffer is returned, the caller of suspend_write_next() will write 2436 * the page's contents to @buffer, so they will have to be copied to the 2437 * right location on the next call to suspend_write_next() and it is done 2438 * with the help of copy_last_highmem_page(). For this purpose, if 2439 * @buffer is returned, @last_highmem_page is set to the page to which 2440 * the data will have to be copied from @buffer. 2441 */ 2442 static void *get_highmem_page_buffer(struct page *page, 2443 struct chain_allocator *ca) 2444 { 2445 struct highmem_pbe *pbe; 2446 void *kaddr; 2447 2448 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) { 2449 /* 2450 * We have allocated the "original" page frame and we can 2451 * use it directly to store the loaded page. 2452 */ 2453 last_highmem_page = page; 2454 return buffer; 2455 } 2456 /* 2457 * The "original" page frame has not been allocated and we have to 2458 * use a "safe" page frame to store the loaded page. 2459 */ 2460 pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); 2461 if (!pbe) { 2462 swsusp_free(); 2463 return ERR_PTR(-ENOMEM); 2464 } 2465 pbe->orig_page = page; 2466 if (safe_highmem_pages > 0) { 2467 struct page *tmp; 2468 2469 /* Copy of the page will be stored in high memory */ 2470 kaddr = buffer; 2471 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm)); 2472 safe_highmem_pages--; 2473 last_highmem_page = tmp; 2474 pbe->copy_page = tmp; 2475 } else { 2476 /* Copy of the page will be stored in normal memory */ 2477 kaddr = safe_pages_list; 2478 safe_pages_list = safe_pages_list->next; 2479 pbe->copy_page = virt_to_page(kaddr); 2480 } 2481 pbe->next = highmem_pblist; 2482 highmem_pblist = pbe; 2483 return kaddr; 2484 } 2485 2486 /** 2487 * copy_last_highmem_page - Copy most the most recent highmem image page. 2488 * 2489 * Copy the contents of a highmem image from @buffer, where the caller of 2490 * snapshot_write_next() has stored them, to the right location represented by 2491 * @last_highmem_page . 2492 */ 2493 static void copy_last_highmem_page(void) 2494 { 2495 if (last_highmem_page) { 2496 void *dst; 2497 2498 dst = kmap_atomic(last_highmem_page); 2499 copy_page(dst, buffer); 2500 kunmap_atomic(dst); 2501 last_highmem_page = NULL; 2502 } 2503 } 2504 2505 static inline int last_highmem_page_copied(void) 2506 { 2507 return !last_highmem_page; 2508 } 2509 2510 static inline void free_highmem_data(void) 2511 { 2512 if (safe_highmem_bm) 2513 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR); 2514 2515 if (buffer) 2516 free_image_page(buffer, PG_UNSAFE_CLEAR); 2517 } 2518 #else 2519 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } 2520 2521 static inline int prepare_highmem_image(struct memory_bitmap *bm, 2522 unsigned int *nr_highmem_p) { return 0; } 2523 2524 static inline void *get_highmem_page_buffer(struct page *page, 2525 struct chain_allocator *ca) 2526 { 2527 return ERR_PTR(-EINVAL); 2528 } 2529 2530 static inline void copy_last_highmem_page(void) {} 2531 static inline int last_highmem_page_copied(void) { return 1; } 2532 static inline void free_highmem_data(void) {} 2533 #endif /* CONFIG_HIGHMEM */ 2534 2535 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe)) 2536 2537 /** 2538 * prepare_image - Make room for loading hibernation image. 2539 * @new_bm: Uninitialized memory bitmap structure. 2540 * @bm: Memory bitmap with unsafe pages marked. 2541 * 2542 * Use @bm to mark the pages that will be overwritten in the process of 2543 * restoring the system memory state from the suspend image ("unsafe" pages) 2544 * and allocate memory for the image. 2545 * 2546 * The idea is to allocate a new memory bitmap first and then allocate 2547 * as many pages as needed for image data, but without specifying what those 2548 * pages will be used for just yet. Instead, we mark them all as allocated and 2549 * create a lists of "safe" pages to be used later. On systems with high 2550 * memory a list of "safe" highmem pages is created too. 2551 */ 2552 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) 2553 { 2554 unsigned int nr_pages, nr_highmem; 2555 struct linked_page *lp; 2556 int error; 2557 2558 /* If there is no highmem, the buffer will not be necessary */ 2559 free_image_page(buffer, PG_UNSAFE_CLEAR); 2560 buffer = NULL; 2561 2562 nr_highmem = count_highmem_image_pages(bm); 2563 mark_unsafe_pages(bm); 2564 2565 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); 2566 if (error) 2567 goto Free; 2568 2569 duplicate_memory_bitmap(new_bm, bm); 2570 memory_bm_free(bm, PG_UNSAFE_KEEP); 2571 if (nr_highmem > 0) { 2572 error = prepare_highmem_image(bm, &nr_highmem); 2573 if (error) 2574 goto Free; 2575 } 2576 /* 2577 * Reserve some safe pages for potential later use. 2578 * 2579 * NOTE: This way we make sure there will be enough safe pages for the 2580 * chain_alloc() in get_buffer(). It is a bit wasteful, but 2581 * nr_copy_pages cannot be greater than 50% of the memory anyway. 2582 * 2583 * nr_copy_pages cannot be less than allocated_unsafe_pages too. 2584 */ 2585 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2586 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); 2587 while (nr_pages > 0) { 2588 lp = get_image_page(GFP_ATOMIC, PG_SAFE); 2589 if (!lp) { 2590 error = -ENOMEM; 2591 goto Free; 2592 } 2593 lp->next = safe_pages_list; 2594 safe_pages_list = lp; 2595 nr_pages--; 2596 } 2597 /* Preallocate memory for the image */ 2598 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2599 while (nr_pages > 0) { 2600 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); 2601 if (!lp) { 2602 error = -ENOMEM; 2603 goto Free; 2604 } 2605 if (!swsusp_page_is_free(virt_to_page(lp))) { 2606 /* The page is "safe", add it to the list */ 2607 lp->next = safe_pages_list; 2608 safe_pages_list = lp; 2609 } 2610 /* Mark the page as allocated */ 2611 swsusp_set_page_forbidden(virt_to_page(lp)); 2612 swsusp_set_page_free(virt_to_page(lp)); 2613 nr_pages--; 2614 } 2615 return 0; 2616 2617 Free: 2618 swsusp_free(); 2619 return error; 2620 } 2621 2622 /** 2623 * get_buffer - Get the address to store the next image data page. 2624 * 2625 * Get the address that snapshot_write_next() should return to its caller to 2626 * write to. 2627 */ 2628 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) 2629 { 2630 struct pbe *pbe; 2631 struct page *page; 2632 unsigned long pfn = memory_bm_next_pfn(bm); 2633 2634 if (pfn == BM_END_OF_MAP) 2635 return ERR_PTR(-EFAULT); 2636 2637 page = pfn_to_page(pfn); 2638 if (PageHighMem(page)) 2639 return get_highmem_page_buffer(page, ca); 2640 2641 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) 2642 /* 2643 * We have allocated the "original" page frame and we can 2644 * use it directly to store the loaded page. 2645 */ 2646 return page_address(page); 2647 2648 /* 2649 * The "original" page frame has not been allocated and we have to 2650 * use a "safe" page frame to store the loaded page. 2651 */ 2652 pbe = chain_alloc(ca, sizeof(struct pbe)); 2653 if (!pbe) { 2654 swsusp_free(); 2655 return ERR_PTR(-ENOMEM); 2656 } 2657 pbe->orig_address = page_address(page); 2658 pbe->address = safe_pages_list; 2659 safe_pages_list = safe_pages_list->next; 2660 pbe->next = restore_pblist; 2661 restore_pblist = pbe; 2662 return pbe->address; 2663 } 2664 2665 /** 2666 * snapshot_write_next - Get the address to store the next image page. 2667 * @handle: Snapshot handle structure to guide the writing. 2668 * 2669 * On the first call, @handle should point to a zeroed snapshot_handle 2670 * structure. The structure gets populated then and a pointer to it should be 2671 * passed to this function every next time. 2672 * 2673 * On success, the function returns a positive number. Then, the caller 2674 * is allowed to write up to the returned number of bytes to the memory 2675 * location computed by the data_of() macro. 2676 * 2677 * The function returns 0 to indicate the "end of file" condition. Negative 2678 * numbers are returned on errors, in which cases the structure pointed to by 2679 * @handle is not updated and should not be used any more. 2680 */ 2681 int snapshot_write_next(struct snapshot_handle *handle) 2682 { 2683 static struct chain_allocator ca; 2684 int error = 0; 2685 2686 /* Check if we have already loaded the entire image */ 2687 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) 2688 return 0; 2689 2690 handle->sync_read = 1; 2691 2692 if (!handle->cur) { 2693 if (!buffer) 2694 /* This makes the buffer be freed by swsusp_free() */ 2695 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2696 2697 if (!buffer) 2698 return -ENOMEM; 2699 2700 handle->buffer = buffer; 2701 } else if (handle->cur == 1) { 2702 error = load_header(buffer); 2703 if (error) 2704 return error; 2705 2706 safe_pages_list = NULL; 2707 2708 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY); 2709 if (error) 2710 return error; 2711 2712 hibernate_restore_protection_begin(); 2713 } else if (handle->cur <= nr_meta_pages + 1) { 2714 error = unpack_orig_pfns(buffer, ©_bm); 2715 if (error) 2716 return error; 2717 2718 if (handle->cur == nr_meta_pages + 1) { 2719 error = prepare_image(&orig_bm, ©_bm); 2720 if (error) 2721 return error; 2722 2723 chain_init(&ca, GFP_ATOMIC, PG_SAFE); 2724 memory_bm_position_reset(&orig_bm); 2725 restore_pblist = NULL; 2726 handle->buffer = get_buffer(&orig_bm, &ca); 2727 handle->sync_read = 0; 2728 if (IS_ERR(handle->buffer)) 2729 return PTR_ERR(handle->buffer); 2730 } 2731 } else { 2732 copy_last_highmem_page(); 2733 hibernate_restore_protect_page(handle->buffer); 2734 handle->buffer = get_buffer(&orig_bm, &ca); 2735 if (IS_ERR(handle->buffer)) 2736 return PTR_ERR(handle->buffer); 2737 if (handle->buffer != buffer) 2738 handle->sync_read = 0; 2739 } 2740 handle->cur++; 2741 return PAGE_SIZE; 2742 } 2743 2744 /** 2745 * snapshot_write_finalize - Complete the loading of a hibernation image. 2746 * 2747 * Must be called after the last call to snapshot_write_next() in case the last 2748 * page in the image happens to be a highmem page and its contents should be 2749 * stored in highmem. Additionally, it recycles bitmap memory that's not 2750 * necessary any more. 2751 */ 2752 void snapshot_write_finalize(struct snapshot_handle *handle) 2753 { 2754 copy_last_highmem_page(); 2755 hibernate_restore_protect_page(handle->buffer); 2756 /* Do that only if we have loaded the image entirely */ 2757 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { 2758 memory_bm_recycle(&orig_bm); 2759 free_highmem_data(); 2760 } 2761 } 2762 2763 int snapshot_image_loaded(struct snapshot_handle *handle) 2764 { 2765 return !(!nr_copy_pages || !last_highmem_page_copied() || 2766 handle->cur <= nr_meta_pages + nr_copy_pages); 2767 } 2768 2769 #ifdef CONFIG_HIGHMEM 2770 /* Assumes that @buf is ready and points to a "safe" page */ 2771 static inline void swap_two_pages_data(struct page *p1, struct page *p2, 2772 void *buf) 2773 { 2774 void *kaddr1, *kaddr2; 2775 2776 kaddr1 = kmap_atomic(p1); 2777 kaddr2 = kmap_atomic(p2); 2778 copy_page(buf, kaddr1); 2779 copy_page(kaddr1, kaddr2); 2780 copy_page(kaddr2, buf); 2781 kunmap_atomic(kaddr2); 2782 kunmap_atomic(kaddr1); 2783 } 2784 2785 /** 2786 * restore_highmem - Put highmem image pages into their original locations. 2787 * 2788 * For each highmem page that was in use before hibernation and is included in 2789 * the image, and also has been allocated by the "restore" kernel, swap its 2790 * current contents with the previous (ie. "before hibernation") ones. 2791 * 2792 * If the restore eventually fails, we can call this function once again and 2793 * restore the highmem state as seen by the restore kernel. 2794 */ 2795 int restore_highmem(void) 2796 { 2797 struct highmem_pbe *pbe = highmem_pblist; 2798 void *buf; 2799 2800 if (!pbe) 2801 return 0; 2802 2803 buf = get_image_page(GFP_ATOMIC, PG_SAFE); 2804 if (!buf) 2805 return -ENOMEM; 2806 2807 while (pbe) { 2808 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf); 2809 pbe = pbe->next; 2810 } 2811 free_image_page(buf, PG_UNSAFE_CLEAR); 2812 return 0; 2813 } 2814 #endif /* CONFIG_HIGHMEM */ 2815