1 /* 2 * linux/kernel/power/snapshot.c 3 * 4 * This file provides system snapshot/restore functionality for swsusp. 5 * 6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz> 7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 8 * 9 * This file is released under the GPLv2. 10 * 11 */ 12 13 #include <linux/version.h> 14 #include <linux/module.h> 15 #include <linux/mm.h> 16 #include <linux/suspend.h> 17 #include <linux/delay.h> 18 #include <linux/bitops.h> 19 #include <linux/spinlock.h> 20 #include <linux/kernel.h> 21 #include <linux/pm.h> 22 #include <linux/device.h> 23 #include <linux/init.h> 24 #include <linux/bootmem.h> 25 #include <linux/nmi.h> 26 #include <linux/syscalls.h> 27 #include <linux/console.h> 28 #include <linux/highmem.h> 29 #include <linux/list.h> 30 #include <linux/slab.h> 31 #include <linux/compiler.h> 32 #include <linux/ktime.h> 33 34 #include <linux/uaccess.h> 35 #include <asm/mmu_context.h> 36 #include <asm/pgtable.h> 37 #include <asm/tlbflush.h> 38 #include <asm/io.h> 39 #ifdef CONFIG_STRICT_KERNEL_RWX 40 #include <asm/set_memory.h> 41 #endif 42 43 #include "power.h" 44 45 #ifdef CONFIG_STRICT_KERNEL_RWX 46 static bool hibernate_restore_protection; 47 static bool hibernate_restore_protection_active; 48 49 void enable_restore_image_protection(void) 50 { 51 hibernate_restore_protection = true; 52 } 53 54 static inline void hibernate_restore_protection_begin(void) 55 { 56 hibernate_restore_protection_active = hibernate_restore_protection; 57 } 58 59 static inline void hibernate_restore_protection_end(void) 60 { 61 hibernate_restore_protection_active = false; 62 } 63 64 static inline void hibernate_restore_protect_page(void *page_address) 65 { 66 if (hibernate_restore_protection_active) 67 set_memory_ro((unsigned long)page_address, 1); 68 } 69 70 static inline void hibernate_restore_unprotect_page(void *page_address) 71 { 72 if (hibernate_restore_protection_active) 73 set_memory_rw((unsigned long)page_address, 1); 74 } 75 #else 76 static inline void hibernate_restore_protection_begin(void) {} 77 static inline void hibernate_restore_protection_end(void) {} 78 static inline void hibernate_restore_protect_page(void *page_address) {} 79 static inline void hibernate_restore_unprotect_page(void *page_address) {} 80 #endif /* CONFIG_STRICT_KERNEL_RWX */ 81 82 static int swsusp_page_is_free(struct page *); 83 static void swsusp_set_page_forbidden(struct page *); 84 static void swsusp_unset_page_forbidden(struct page *); 85 86 /* 87 * Number of bytes to reserve for memory allocations made by device drivers 88 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't 89 * cause image creation to fail (tunable via /sys/power/reserved_size). 90 */ 91 unsigned long reserved_size; 92 93 void __init hibernate_reserved_size_init(void) 94 { 95 reserved_size = SPARE_PAGES * PAGE_SIZE; 96 } 97 98 /* 99 * Preferred image size in bytes (tunable via /sys/power/image_size). 100 * When it is set to N, swsusp will do its best to ensure the image 101 * size will not exceed N bytes, but if that is impossible, it will 102 * try to create the smallest image possible. 103 */ 104 unsigned long image_size; 105 106 void __init hibernate_image_size_init(void) 107 { 108 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; 109 } 110 111 /* 112 * List of PBEs needed for restoring the pages that were allocated before 113 * the suspend and included in the suspend image, but have also been 114 * allocated by the "resume" kernel, so their contents cannot be written 115 * directly to their "original" page frames. 116 */ 117 struct pbe *restore_pblist; 118 119 /* struct linked_page is used to build chains of pages */ 120 121 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) 122 123 struct linked_page { 124 struct linked_page *next; 125 char data[LINKED_PAGE_DATA_SIZE]; 126 } __packed; 127 128 /* 129 * List of "safe" pages (ie. pages that were not used by the image kernel 130 * before hibernation) that may be used as temporary storage for image kernel 131 * memory contents. 132 */ 133 static struct linked_page *safe_pages_list; 134 135 /* Pointer to an auxiliary buffer (1 page) */ 136 static void *buffer; 137 138 #define PG_ANY 0 139 #define PG_SAFE 1 140 #define PG_UNSAFE_CLEAR 1 141 #define PG_UNSAFE_KEEP 0 142 143 static unsigned int allocated_unsafe_pages; 144 145 /** 146 * get_image_page - Allocate a page for a hibernation image. 147 * @gfp_mask: GFP mask for the allocation. 148 * @safe_needed: Get pages that were not used before hibernation (restore only) 149 * 150 * During image restoration, for storing the PBE list and the image data, we can 151 * only use memory pages that do not conflict with the pages used before 152 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them 153 * using allocated_unsafe_pages. 154 * 155 * Each allocated image page is marked as PageNosave and PageNosaveFree so that 156 * swsusp_free() can release it. 157 */ 158 static void *get_image_page(gfp_t gfp_mask, int safe_needed) 159 { 160 void *res; 161 162 res = (void *)get_zeroed_page(gfp_mask); 163 if (safe_needed) 164 while (res && swsusp_page_is_free(virt_to_page(res))) { 165 /* The page is unsafe, mark it for swsusp_free() */ 166 swsusp_set_page_forbidden(virt_to_page(res)); 167 allocated_unsafe_pages++; 168 res = (void *)get_zeroed_page(gfp_mask); 169 } 170 if (res) { 171 swsusp_set_page_forbidden(virt_to_page(res)); 172 swsusp_set_page_free(virt_to_page(res)); 173 } 174 return res; 175 } 176 177 static void *__get_safe_page(gfp_t gfp_mask) 178 { 179 if (safe_pages_list) { 180 void *ret = safe_pages_list; 181 182 safe_pages_list = safe_pages_list->next; 183 memset(ret, 0, PAGE_SIZE); 184 return ret; 185 } 186 return get_image_page(gfp_mask, PG_SAFE); 187 } 188 189 unsigned long get_safe_page(gfp_t gfp_mask) 190 { 191 return (unsigned long)__get_safe_page(gfp_mask); 192 } 193 194 static struct page *alloc_image_page(gfp_t gfp_mask) 195 { 196 struct page *page; 197 198 page = alloc_page(gfp_mask); 199 if (page) { 200 swsusp_set_page_forbidden(page); 201 swsusp_set_page_free(page); 202 } 203 return page; 204 } 205 206 static void recycle_safe_page(void *page_address) 207 { 208 struct linked_page *lp = page_address; 209 210 lp->next = safe_pages_list; 211 safe_pages_list = lp; 212 } 213 214 /** 215 * free_image_page - Free a page allocated for hibernation image. 216 * @addr: Address of the page to free. 217 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page. 218 * 219 * The page to free should have been allocated by get_image_page() (page flags 220 * set by it are affected). 221 */ 222 static inline void free_image_page(void *addr, int clear_nosave_free) 223 { 224 struct page *page; 225 226 BUG_ON(!virt_addr_valid(addr)); 227 228 page = virt_to_page(addr); 229 230 swsusp_unset_page_forbidden(page); 231 if (clear_nosave_free) 232 swsusp_unset_page_free(page); 233 234 __free_page(page); 235 } 236 237 static inline void free_list_of_pages(struct linked_page *list, 238 int clear_page_nosave) 239 { 240 while (list) { 241 struct linked_page *lp = list->next; 242 243 free_image_page(list, clear_page_nosave); 244 list = lp; 245 } 246 } 247 248 /* 249 * struct chain_allocator is used for allocating small objects out of 250 * a linked list of pages called 'the chain'. 251 * 252 * The chain grows each time when there is no room for a new object in 253 * the current page. The allocated objects cannot be freed individually. 254 * It is only possible to free them all at once, by freeing the entire 255 * chain. 256 * 257 * NOTE: The chain allocator may be inefficient if the allocated objects 258 * are not much smaller than PAGE_SIZE. 259 */ 260 struct chain_allocator { 261 struct linked_page *chain; /* the chain */ 262 unsigned int used_space; /* total size of objects allocated out 263 of the current page */ 264 gfp_t gfp_mask; /* mask for allocating pages */ 265 int safe_needed; /* if set, only "safe" pages are allocated */ 266 }; 267 268 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask, 269 int safe_needed) 270 { 271 ca->chain = NULL; 272 ca->used_space = LINKED_PAGE_DATA_SIZE; 273 ca->gfp_mask = gfp_mask; 274 ca->safe_needed = safe_needed; 275 } 276 277 static void *chain_alloc(struct chain_allocator *ca, unsigned int size) 278 { 279 void *ret; 280 281 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { 282 struct linked_page *lp; 283 284 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) : 285 get_image_page(ca->gfp_mask, PG_ANY); 286 if (!lp) 287 return NULL; 288 289 lp->next = ca->chain; 290 ca->chain = lp; 291 ca->used_space = 0; 292 } 293 ret = ca->chain->data + ca->used_space; 294 ca->used_space += size; 295 return ret; 296 } 297 298 /** 299 * Data types related to memory bitmaps. 300 * 301 * Memory bitmap is a structure consiting of many linked lists of 302 * objects. The main list's elements are of type struct zone_bitmap 303 * and each of them corresonds to one zone. For each zone bitmap 304 * object there is a list of objects of type struct bm_block that 305 * represent each blocks of bitmap in which information is stored. 306 * 307 * struct memory_bitmap contains a pointer to the main list of zone 308 * bitmap objects, a struct bm_position used for browsing the bitmap, 309 * and a pointer to the list of pages used for allocating all of the 310 * zone bitmap objects and bitmap block objects. 311 * 312 * NOTE: It has to be possible to lay out the bitmap in memory 313 * using only allocations of order 0. Additionally, the bitmap is 314 * designed to work with arbitrary number of zones (this is over the 315 * top for now, but let's avoid making unnecessary assumptions ;-). 316 * 317 * struct zone_bitmap contains a pointer to a list of bitmap block 318 * objects and a pointer to the bitmap block object that has been 319 * most recently used for setting bits. Additionally, it contains the 320 * PFNs that correspond to the start and end of the represented zone. 321 * 322 * struct bm_block contains a pointer to the memory page in which 323 * information is stored (in the form of a block of bitmap) 324 * It also contains the pfns that correspond to the start and end of 325 * the represented memory area. 326 * 327 * The memory bitmap is organized as a radix tree to guarantee fast random 328 * access to the bits. There is one radix tree for each zone (as returned 329 * from create_mem_extents). 330 * 331 * One radix tree is represented by one struct mem_zone_bm_rtree. There are 332 * two linked lists for the nodes of the tree, one for the inner nodes and 333 * one for the leave nodes. The linked leave nodes are used for fast linear 334 * access of the memory bitmap. 335 * 336 * The struct rtree_node represents one node of the radix tree. 337 */ 338 339 #define BM_END_OF_MAP (~0UL) 340 341 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE) 342 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3) 343 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1) 344 345 /* 346 * struct rtree_node is a wrapper struct to link the nodes 347 * of the rtree together for easy linear iteration over 348 * bits and easy freeing 349 */ 350 struct rtree_node { 351 struct list_head list; 352 unsigned long *data; 353 }; 354 355 /* 356 * struct mem_zone_bm_rtree represents a bitmap used for one 357 * populated memory zone. 358 */ 359 struct mem_zone_bm_rtree { 360 struct list_head list; /* Link Zones together */ 361 struct list_head nodes; /* Radix Tree inner nodes */ 362 struct list_head leaves; /* Radix Tree leaves */ 363 unsigned long start_pfn; /* Zone start page frame */ 364 unsigned long end_pfn; /* Zone end page frame + 1 */ 365 struct rtree_node *rtree; /* Radix Tree Root */ 366 int levels; /* Number of Radix Tree Levels */ 367 unsigned int blocks; /* Number of Bitmap Blocks */ 368 }; 369 370 /* strcut bm_position is used for browsing memory bitmaps */ 371 372 struct bm_position { 373 struct mem_zone_bm_rtree *zone; 374 struct rtree_node *node; 375 unsigned long node_pfn; 376 int node_bit; 377 }; 378 379 struct memory_bitmap { 380 struct list_head zones; 381 struct linked_page *p_list; /* list of pages used to store zone 382 bitmap objects and bitmap block 383 objects */ 384 struct bm_position cur; /* most recently used bit position */ 385 }; 386 387 /* Functions that operate on memory bitmaps */ 388 389 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long)) 390 #if BITS_PER_LONG == 32 391 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2) 392 #else 393 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3) 394 #endif 395 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1) 396 397 /** 398 * alloc_rtree_node - Allocate a new node and add it to the radix tree. 399 * 400 * This function is used to allocate inner nodes as well as the 401 * leave nodes of the radix tree. It also adds the node to the 402 * corresponding linked list passed in by the *list parameter. 403 */ 404 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, 405 struct chain_allocator *ca, 406 struct list_head *list) 407 { 408 struct rtree_node *node; 409 410 node = chain_alloc(ca, sizeof(struct rtree_node)); 411 if (!node) 412 return NULL; 413 414 node->data = get_image_page(gfp_mask, safe_needed); 415 if (!node->data) 416 return NULL; 417 418 list_add_tail(&node->list, list); 419 420 return node; 421 } 422 423 /** 424 * add_rtree_block - Add a new leave node to the radix tree. 425 * 426 * The leave nodes need to be allocated in order to keep the leaves 427 * linked list in order. This is guaranteed by the zone->blocks 428 * counter. 429 */ 430 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, 431 int safe_needed, struct chain_allocator *ca) 432 { 433 struct rtree_node *node, *block, **dst; 434 unsigned int levels_needed, block_nr; 435 int i; 436 437 block_nr = zone->blocks; 438 levels_needed = 0; 439 440 /* How many levels do we need for this block nr? */ 441 while (block_nr) { 442 levels_needed += 1; 443 block_nr >>= BM_RTREE_LEVEL_SHIFT; 444 } 445 446 /* Make sure the rtree has enough levels */ 447 for (i = zone->levels; i < levels_needed; i++) { 448 node = alloc_rtree_node(gfp_mask, safe_needed, ca, 449 &zone->nodes); 450 if (!node) 451 return -ENOMEM; 452 453 node->data[0] = (unsigned long)zone->rtree; 454 zone->rtree = node; 455 zone->levels += 1; 456 } 457 458 /* Allocate new block */ 459 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); 460 if (!block) 461 return -ENOMEM; 462 463 /* Now walk the rtree to insert the block */ 464 node = zone->rtree; 465 dst = &zone->rtree; 466 block_nr = zone->blocks; 467 for (i = zone->levels; i > 0; i--) { 468 int index; 469 470 if (!node) { 471 node = alloc_rtree_node(gfp_mask, safe_needed, ca, 472 &zone->nodes); 473 if (!node) 474 return -ENOMEM; 475 *dst = node; 476 } 477 478 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); 479 index &= BM_RTREE_LEVEL_MASK; 480 dst = (struct rtree_node **)&((*dst)->data[index]); 481 node = *dst; 482 } 483 484 zone->blocks += 1; 485 *dst = block; 486 487 return 0; 488 } 489 490 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 491 int clear_nosave_free); 492 493 /** 494 * create_zone_bm_rtree - Create a radix tree for one zone. 495 * 496 * Allocated the mem_zone_bm_rtree structure and initializes it. 497 * This function also allocated and builds the radix tree for the 498 * zone. 499 */ 500 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask, 501 int safe_needed, 502 struct chain_allocator *ca, 503 unsigned long start, 504 unsigned long end) 505 { 506 struct mem_zone_bm_rtree *zone; 507 unsigned int i, nr_blocks; 508 unsigned long pages; 509 510 pages = end - start; 511 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree)); 512 if (!zone) 513 return NULL; 514 515 INIT_LIST_HEAD(&zone->nodes); 516 INIT_LIST_HEAD(&zone->leaves); 517 zone->start_pfn = start; 518 zone->end_pfn = end; 519 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); 520 521 for (i = 0; i < nr_blocks; i++) { 522 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) { 523 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR); 524 return NULL; 525 } 526 } 527 528 return zone; 529 } 530 531 /** 532 * free_zone_bm_rtree - Free the memory of the radix tree. 533 * 534 * Free all node pages of the radix tree. The mem_zone_bm_rtree 535 * structure itself is not freed here nor are the rtree_node 536 * structs. 537 */ 538 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, 539 int clear_nosave_free) 540 { 541 struct rtree_node *node; 542 543 list_for_each_entry(node, &zone->nodes, list) 544 free_image_page(node->data, clear_nosave_free); 545 546 list_for_each_entry(node, &zone->leaves, list) 547 free_image_page(node->data, clear_nosave_free); 548 } 549 550 static void memory_bm_position_reset(struct memory_bitmap *bm) 551 { 552 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, 553 list); 554 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 555 struct rtree_node, list); 556 bm->cur.node_pfn = 0; 557 bm->cur.node_bit = 0; 558 } 559 560 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 561 562 struct mem_extent { 563 struct list_head hook; 564 unsigned long start; 565 unsigned long end; 566 }; 567 568 /** 569 * free_mem_extents - Free a list of memory extents. 570 * @list: List of extents to free. 571 */ 572 static void free_mem_extents(struct list_head *list) 573 { 574 struct mem_extent *ext, *aux; 575 576 list_for_each_entry_safe(ext, aux, list, hook) { 577 list_del(&ext->hook); 578 kfree(ext); 579 } 580 } 581 582 /** 583 * create_mem_extents - Create a list of memory extents. 584 * @list: List to put the extents into. 585 * @gfp_mask: Mask to use for memory allocations. 586 * 587 * The extents represent contiguous ranges of PFNs. 588 */ 589 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) 590 { 591 struct zone *zone; 592 593 INIT_LIST_HEAD(list); 594 595 for_each_populated_zone(zone) { 596 unsigned long zone_start, zone_end; 597 struct mem_extent *ext, *cur, *aux; 598 599 zone_start = zone->zone_start_pfn; 600 zone_end = zone_end_pfn(zone); 601 602 list_for_each_entry(ext, list, hook) 603 if (zone_start <= ext->end) 604 break; 605 606 if (&ext->hook == list || zone_end < ext->start) { 607 /* New extent is necessary */ 608 struct mem_extent *new_ext; 609 610 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); 611 if (!new_ext) { 612 free_mem_extents(list); 613 return -ENOMEM; 614 } 615 new_ext->start = zone_start; 616 new_ext->end = zone_end; 617 list_add_tail(&new_ext->hook, &ext->hook); 618 continue; 619 } 620 621 /* Merge this zone's range of PFNs with the existing one */ 622 if (zone_start < ext->start) 623 ext->start = zone_start; 624 if (zone_end > ext->end) 625 ext->end = zone_end; 626 627 /* More merging may be possible */ 628 cur = ext; 629 list_for_each_entry_safe_continue(cur, aux, list, hook) { 630 if (zone_end < cur->start) 631 break; 632 if (zone_end < cur->end) 633 ext->end = cur->end; 634 list_del(&cur->hook); 635 kfree(cur); 636 } 637 } 638 639 return 0; 640 } 641 642 /** 643 * memory_bm_create - Allocate memory for a memory bitmap. 644 */ 645 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, 646 int safe_needed) 647 { 648 struct chain_allocator ca; 649 struct list_head mem_extents; 650 struct mem_extent *ext; 651 int error; 652 653 chain_init(&ca, gfp_mask, safe_needed); 654 INIT_LIST_HEAD(&bm->zones); 655 656 error = create_mem_extents(&mem_extents, gfp_mask); 657 if (error) 658 return error; 659 660 list_for_each_entry(ext, &mem_extents, hook) { 661 struct mem_zone_bm_rtree *zone; 662 663 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, 664 ext->start, ext->end); 665 if (!zone) { 666 error = -ENOMEM; 667 goto Error; 668 } 669 list_add_tail(&zone->list, &bm->zones); 670 } 671 672 bm->p_list = ca.chain; 673 memory_bm_position_reset(bm); 674 Exit: 675 free_mem_extents(&mem_extents); 676 return error; 677 678 Error: 679 bm->p_list = ca.chain; 680 memory_bm_free(bm, PG_UNSAFE_CLEAR); 681 goto Exit; 682 } 683 684 /** 685 * memory_bm_free - Free memory occupied by the memory bitmap. 686 * @bm: Memory bitmap. 687 */ 688 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 689 { 690 struct mem_zone_bm_rtree *zone; 691 692 list_for_each_entry(zone, &bm->zones, list) 693 free_zone_bm_rtree(zone, clear_nosave_free); 694 695 free_list_of_pages(bm->p_list, clear_nosave_free); 696 697 INIT_LIST_HEAD(&bm->zones); 698 } 699 700 /** 701 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap. 702 * 703 * Find the bit in memory bitmap @bm that corresponds to the given PFN. 704 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated. 705 * 706 * Walk the radix tree to find the page containing the bit that represents @pfn 707 * and return the position of the bit in @addr and @bit_nr. 708 */ 709 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 710 void **addr, unsigned int *bit_nr) 711 { 712 struct mem_zone_bm_rtree *curr, *zone; 713 struct rtree_node *node; 714 int i, block_nr; 715 716 zone = bm->cur.zone; 717 718 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) 719 goto zone_found; 720 721 zone = NULL; 722 723 /* Find the right zone */ 724 list_for_each_entry(curr, &bm->zones, list) { 725 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { 726 zone = curr; 727 break; 728 } 729 } 730 731 if (!zone) 732 return -EFAULT; 733 734 zone_found: 735 /* 736 * We have found the zone. Now walk the radix tree to find the leaf node 737 * for our PFN. 738 */ 739 node = bm->cur.node; 740 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) 741 goto node_found; 742 743 node = zone->rtree; 744 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; 745 746 for (i = zone->levels; i > 0; i--) { 747 int index; 748 749 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); 750 index &= BM_RTREE_LEVEL_MASK; 751 BUG_ON(node->data[index] == 0); 752 node = (struct rtree_node *)node->data[index]; 753 } 754 755 node_found: 756 /* Update last position */ 757 bm->cur.zone = zone; 758 bm->cur.node = node; 759 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; 760 761 /* Set return values */ 762 *addr = node->data; 763 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; 764 765 return 0; 766 } 767 768 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) 769 { 770 void *addr; 771 unsigned int bit; 772 int error; 773 774 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 775 BUG_ON(error); 776 set_bit(bit, addr); 777 } 778 779 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) 780 { 781 void *addr; 782 unsigned int bit; 783 int error; 784 785 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 786 if (!error) 787 set_bit(bit, addr); 788 789 return error; 790 } 791 792 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) 793 { 794 void *addr; 795 unsigned int bit; 796 int error; 797 798 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 799 BUG_ON(error); 800 clear_bit(bit, addr); 801 } 802 803 static void memory_bm_clear_current(struct memory_bitmap *bm) 804 { 805 int bit; 806 807 bit = max(bm->cur.node_bit - 1, 0); 808 clear_bit(bit, bm->cur.node->data); 809 } 810 811 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 812 { 813 void *addr; 814 unsigned int bit; 815 int error; 816 817 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 818 BUG_ON(error); 819 return test_bit(bit, addr); 820 } 821 822 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) 823 { 824 void *addr; 825 unsigned int bit; 826 827 return !memory_bm_find_bit(bm, pfn, &addr, &bit); 828 } 829 830 /* 831 * rtree_next_node - Jump to the next leaf node. 832 * 833 * Set the position to the beginning of the next node in the 834 * memory bitmap. This is either the next node in the current 835 * zone's radix tree or the first node in the radix tree of the 836 * next zone. 837 * 838 * Return true if there is a next node, false otherwise. 839 */ 840 static bool rtree_next_node(struct memory_bitmap *bm) 841 { 842 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { 843 bm->cur.node = list_entry(bm->cur.node->list.next, 844 struct rtree_node, list); 845 bm->cur.node_pfn += BM_BITS_PER_BLOCK; 846 bm->cur.node_bit = 0; 847 touch_softlockup_watchdog(); 848 return true; 849 } 850 851 /* No more nodes, goto next zone */ 852 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) { 853 bm->cur.zone = list_entry(bm->cur.zone->list.next, 854 struct mem_zone_bm_rtree, list); 855 bm->cur.node = list_entry(bm->cur.zone->leaves.next, 856 struct rtree_node, list); 857 bm->cur.node_pfn = 0; 858 bm->cur.node_bit = 0; 859 return true; 860 } 861 862 /* No more zones */ 863 return false; 864 } 865 866 /** 867 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap. 868 * @bm: Memory bitmap. 869 * 870 * Starting from the last returned position this function searches for the next 871 * set bit in @bm and returns the PFN represented by it. If no more bits are 872 * set, BM_END_OF_MAP is returned. 873 * 874 * It is required to run memory_bm_position_reset() before the first call to 875 * this function for the given memory bitmap. 876 */ 877 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 878 { 879 unsigned long bits, pfn, pages; 880 int bit; 881 882 do { 883 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; 884 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); 885 bit = find_next_bit(bm->cur.node->data, bits, 886 bm->cur.node_bit); 887 if (bit < bits) { 888 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; 889 bm->cur.node_bit = bit + 1; 890 return pfn; 891 } 892 } while (rtree_next_node(bm)); 893 894 return BM_END_OF_MAP; 895 } 896 897 /* 898 * This structure represents a range of page frames the contents of which 899 * should not be saved during hibernation. 900 */ 901 struct nosave_region { 902 struct list_head list; 903 unsigned long start_pfn; 904 unsigned long end_pfn; 905 }; 906 907 static LIST_HEAD(nosave_regions); 908 909 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone) 910 { 911 struct rtree_node *node; 912 913 list_for_each_entry(node, &zone->nodes, list) 914 recycle_safe_page(node->data); 915 916 list_for_each_entry(node, &zone->leaves, list) 917 recycle_safe_page(node->data); 918 } 919 920 static void memory_bm_recycle(struct memory_bitmap *bm) 921 { 922 struct mem_zone_bm_rtree *zone; 923 struct linked_page *p_list; 924 925 list_for_each_entry(zone, &bm->zones, list) 926 recycle_zone_bm_rtree(zone); 927 928 p_list = bm->p_list; 929 while (p_list) { 930 struct linked_page *lp = p_list; 931 932 p_list = lp->next; 933 recycle_safe_page(lp); 934 } 935 } 936 937 /** 938 * register_nosave_region - Register a region of unsaveable memory. 939 * 940 * Register a range of page frames the contents of which should not be saved 941 * during hibernation (to be used in the early initialization code). 942 */ 943 void __init __register_nosave_region(unsigned long start_pfn, 944 unsigned long end_pfn, int use_kmalloc) 945 { 946 struct nosave_region *region; 947 948 if (start_pfn >= end_pfn) 949 return; 950 951 if (!list_empty(&nosave_regions)) { 952 /* Try to extend the previous region (they should be sorted) */ 953 region = list_entry(nosave_regions.prev, 954 struct nosave_region, list); 955 if (region->end_pfn == start_pfn) { 956 region->end_pfn = end_pfn; 957 goto Report; 958 } 959 } 960 if (use_kmalloc) { 961 /* During init, this shouldn't fail */ 962 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL); 963 BUG_ON(!region); 964 } else { 965 /* This allocation cannot fail */ 966 region = memblock_virt_alloc(sizeof(struct nosave_region), 0); 967 } 968 region->start_pfn = start_pfn; 969 region->end_pfn = end_pfn; 970 list_add_tail(®ion->list, &nosave_regions); 971 Report: 972 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n", 973 (unsigned long long) start_pfn << PAGE_SHIFT, 974 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 975 } 976 977 /* 978 * Set bits in this map correspond to the page frames the contents of which 979 * should not be saved during the suspend. 980 */ 981 static struct memory_bitmap *forbidden_pages_map; 982 983 /* Set bits in this map correspond to free page frames. */ 984 static struct memory_bitmap *free_pages_map; 985 986 /* 987 * Each page frame allocated for creating the image is marked by setting the 988 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously 989 */ 990 991 void swsusp_set_page_free(struct page *page) 992 { 993 if (free_pages_map) 994 memory_bm_set_bit(free_pages_map, page_to_pfn(page)); 995 } 996 997 static int swsusp_page_is_free(struct page *page) 998 { 999 return free_pages_map ? 1000 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0; 1001 } 1002 1003 void swsusp_unset_page_free(struct page *page) 1004 { 1005 if (free_pages_map) 1006 memory_bm_clear_bit(free_pages_map, page_to_pfn(page)); 1007 } 1008 1009 static void swsusp_set_page_forbidden(struct page *page) 1010 { 1011 if (forbidden_pages_map) 1012 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page)); 1013 } 1014 1015 int swsusp_page_is_forbidden(struct page *page) 1016 { 1017 return forbidden_pages_map ? 1018 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0; 1019 } 1020 1021 static void swsusp_unset_page_forbidden(struct page *page) 1022 { 1023 if (forbidden_pages_map) 1024 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page)); 1025 } 1026 1027 /** 1028 * mark_nosave_pages - Mark pages that should not be saved. 1029 * @bm: Memory bitmap. 1030 * 1031 * Set the bits in @bm that correspond to the page frames the contents of which 1032 * should not be saved. 1033 */ 1034 static void mark_nosave_pages(struct memory_bitmap *bm) 1035 { 1036 struct nosave_region *region; 1037 1038 if (list_empty(&nosave_regions)) 1039 return; 1040 1041 list_for_each_entry(region, &nosave_regions, list) { 1042 unsigned long pfn; 1043 1044 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n", 1045 (unsigned long long) region->start_pfn << PAGE_SHIFT, 1046 ((unsigned long long) region->end_pfn << PAGE_SHIFT) 1047 - 1); 1048 1049 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) 1050 if (pfn_valid(pfn)) { 1051 /* 1052 * It is safe to ignore the result of 1053 * mem_bm_set_bit_check() here, since we won't 1054 * touch the PFNs for which the error is 1055 * returned anyway. 1056 */ 1057 mem_bm_set_bit_check(bm, pfn); 1058 } 1059 } 1060 } 1061 1062 /** 1063 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information. 1064 * 1065 * Create bitmaps needed for marking page frames that should not be saved and 1066 * free page frames. The forbidden_pages_map and free_pages_map pointers are 1067 * only modified if everything goes well, because we don't want the bits to be 1068 * touched before both bitmaps are set up. 1069 */ 1070 int create_basic_memory_bitmaps(void) 1071 { 1072 struct memory_bitmap *bm1, *bm2; 1073 int error = 0; 1074 1075 if (forbidden_pages_map && free_pages_map) 1076 return 0; 1077 else 1078 BUG_ON(forbidden_pages_map || free_pages_map); 1079 1080 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 1081 if (!bm1) 1082 return -ENOMEM; 1083 1084 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY); 1085 if (error) 1086 goto Free_first_object; 1087 1088 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL); 1089 if (!bm2) 1090 goto Free_first_bitmap; 1091 1092 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY); 1093 if (error) 1094 goto Free_second_object; 1095 1096 forbidden_pages_map = bm1; 1097 free_pages_map = bm2; 1098 mark_nosave_pages(forbidden_pages_map); 1099 1100 pr_debug("PM: Basic memory bitmaps created\n"); 1101 1102 return 0; 1103 1104 Free_second_object: 1105 kfree(bm2); 1106 Free_first_bitmap: 1107 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 1108 Free_first_object: 1109 kfree(bm1); 1110 return -ENOMEM; 1111 } 1112 1113 /** 1114 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information. 1115 * 1116 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The 1117 * auxiliary pointers are necessary so that the bitmaps themselves are not 1118 * referred to while they are being freed. 1119 */ 1120 void free_basic_memory_bitmaps(void) 1121 { 1122 struct memory_bitmap *bm1, *bm2; 1123 1124 if (WARN_ON(!(forbidden_pages_map && free_pages_map))) 1125 return; 1126 1127 bm1 = forbidden_pages_map; 1128 bm2 = free_pages_map; 1129 forbidden_pages_map = NULL; 1130 free_pages_map = NULL; 1131 memory_bm_free(bm1, PG_UNSAFE_CLEAR); 1132 kfree(bm1); 1133 memory_bm_free(bm2, PG_UNSAFE_CLEAR); 1134 kfree(bm2); 1135 1136 pr_debug("PM: Basic memory bitmaps freed\n"); 1137 } 1138 1139 void clear_free_pages(void) 1140 { 1141 #ifdef CONFIG_PAGE_POISONING_ZERO 1142 struct memory_bitmap *bm = free_pages_map; 1143 unsigned long pfn; 1144 1145 if (WARN_ON(!(free_pages_map))) 1146 return; 1147 1148 memory_bm_position_reset(bm); 1149 pfn = memory_bm_next_pfn(bm); 1150 while (pfn != BM_END_OF_MAP) { 1151 if (pfn_valid(pfn)) 1152 clear_highpage(pfn_to_page(pfn)); 1153 1154 pfn = memory_bm_next_pfn(bm); 1155 } 1156 memory_bm_position_reset(bm); 1157 pr_info("PM: free pages cleared after restore\n"); 1158 #endif /* PAGE_POISONING_ZERO */ 1159 } 1160 1161 /** 1162 * snapshot_additional_pages - Estimate the number of extra pages needed. 1163 * @zone: Memory zone to carry out the computation for. 1164 * 1165 * Estimate the number of additional pages needed for setting up a hibernation 1166 * image data structures for @zone (usually, the returned value is greater than 1167 * the exact number). 1168 */ 1169 unsigned int snapshot_additional_pages(struct zone *zone) 1170 { 1171 unsigned int rtree, nodes; 1172 1173 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 1174 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node), 1175 LINKED_PAGE_DATA_SIZE); 1176 while (nodes > 1) { 1177 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL); 1178 rtree += nodes; 1179 } 1180 1181 return 2 * rtree; 1182 } 1183 1184 #ifdef CONFIG_HIGHMEM 1185 /** 1186 * count_free_highmem_pages - Compute the total number of free highmem pages. 1187 * 1188 * The returned number is system-wide. 1189 */ 1190 static unsigned int count_free_highmem_pages(void) 1191 { 1192 struct zone *zone; 1193 unsigned int cnt = 0; 1194 1195 for_each_populated_zone(zone) 1196 if (is_highmem(zone)) 1197 cnt += zone_page_state(zone, NR_FREE_PAGES); 1198 1199 return cnt; 1200 } 1201 1202 /** 1203 * saveable_highmem_page - Check if a highmem page is saveable. 1204 * 1205 * Determine whether a highmem page should be included in a hibernation image. 1206 * 1207 * We should save the page if it isn't Nosave or NosaveFree, or Reserved, 1208 * and it isn't part of a free chunk of pages. 1209 */ 1210 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) 1211 { 1212 struct page *page; 1213 1214 if (!pfn_valid(pfn)) 1215 return NULL; 1216 1217 page = pfn_to_page(pfn); 1218 if (page_zone(page) != zone) 1219 return NULL; 1220 1221 BUG_ON(!PageHighMem(page)); 1222 1223 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) || 1224 PageReserved(page)) 1225 return NULL; 1226 1227 if (page_is_guard(page)) 1228 return NULL; 1229 1230 return page; 1231 } 1232 1233 /** 1234 * count_highmem_pages - Compute the total number of saveable highmem pages. 1235 */ 1236 static unsigned int count_highmem_pages(void) 1237 { 1238 struct zone *zone; 1239 unsigned int n = 0; 1240 1241 for_each_populated_zone(zone) { 1242 unsigned long pfn, max_zone_pfn; 1243 1244 if (!is_highmem(zone)) 1245 continue; 1246 1247 mark_free_pages(zone); 1248 max_zone_pfn = zone_end_pfn(zone); 1249 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1250 if (saveable_highmem_page(zone, pfn)) 1251 n++; 1252 } 1253 return n; 1254 } 1255 #else 1256 static inline void *saveable_highmem_page(struct zone *z, unsigned long p) 1257 { 1258 return NULL; 1259 } 1260 #endif /* CONFIG_HIGHMEM */ 1261 1262 /** 1263 * saveable_page - Check if the given page is saveable. 1264 * 1265 * Determine whether a non-highmem page should be included in a hibernation 1266 * image. 1267 * 1268 * We should save the page if it isn't Nosave, and is not in the range 1269 * of pages statically defined as 'unsaveable', and it isn't part of 1270 * a free chunk of pages. 1271 */ 1272 static struct page *saveable_page(struct zone *zone, unsigned long pfn) 1273 { 1274 struct page *page; 1275 1276 if (!pfn_valid(pfn)) 1277 return NULL; 1278 1279 page = pfn_to_page(pfn); 1280 if (page_zone(page) != zone) 1281 return NULL; 1282 1283 BUG_ON(PageHighMem(page)); 1284 1285 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) 1286 return NULL; 1287 1288 if (PageReserved(page) 1289 && (!kernel_page_present(page) || pfn_is_nosave(pfn))) 1290 return NULL; 1291 1292 if (page_is_guard(page)) 1293 return NULL; 1294 1295 return page; 1296 } 1297 1298 /** 1299 * count_data_pages - Compute the total number of saveable non-highmem pages. 1300 */ 1301 static unsigned int count_data_pages(void) 1302 { 1303 struct zone *zone; 1304 unsigned long pfn, max_zone_pfn; 1305 unsigned int n = 0; 1306 1307 for_each_populated_zone(zone) { 1308 if (is_highmem(zone)) 1309 continue; 1310 1311 mark_free_pages(zone); 1312 max_zone_pfn = zone_end_pfn(zone); 1313 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1314 if (saveable_page(zone, pfn)) 1315 n++; 1316 } 1317 return n; 1318 } 1319 1320 /* 1321 * This is needed, because copy_page and memcpy are not usable for copying 1322 * task structs. 1323 */ 1324 static inline void do_copy_page(long *dst, long *src) 1325 { 1326 int n; 1327 1328 for (n = PAGE_SIZE / sizeof(long); n; n--) 1329 *dst++ = *src++; 1330 } 1331 1332 /** 1333 * safe_copy_page - Copy a page in a safe way. 1334 * 1335 * Check if the page we are going to copy is marked as present in the kernel 1336 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set 1337 * and in that case kernel_page_present() always returns 'true'). 1338 */ 1339 static void safe_copy_page(void *dst, struct page *s_page) 1340 { 1341 if (kernel_page_present(s_page)) { 1342 do_copy_page(dst, page_address(s_page)); 1343 } else { 1344 kernel_map_pages(s_page, 1, 1); 1345 do_copy_page(dst, page_address(s_page)); 1346 kernel_map_pages(s_page, 1, 0); 1347 } 1348 } 1349 1350 #ifdef CONFIG_HIGHMEM 1351 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn) 1352 { 1353 return is_highmem(zone) ? 1354 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); 1355 } 1356 1357 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1358 { 1359 struct page *s_page, *d_page; 1360 void *src, *dst; 1361 1362 s_page = pfn_to_page(src_pfn); 1363 d_page = pfn_to_page(dst_pfn); 1364 if (PageHighMem(s_page)) { 1365 src = kmap_atomic(s_page); 1366 dst = kmap_atomic(d_page); 1367 do_copy_page(dst, src); 1368 kunmap_atomic(dst); 1369 kunmap_atomic(src); 1370 } else { 1371 if (PageHighMem(d_page)) { 1372 /* 1373 * The page pointed to by src may contain some kernel 1374 * data modified by kmap_atomic() 1375 */ 1376 safe_copy_page(buffer, s_page); 1377 dst = kmap_atomic(d_page); 1378 copy_page(dst, buffer); 1379 kunmap_atomic(dst); 1380 } else { 1381 safe_copy_page(page_address(d_page), s_page); 1382 } 1383 } 1384 } 1385 #else 1386 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn) 1387 1388 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn) 1389 { 1390 safe_copy_page(page_address(pfn_to_page(dst_pfn)), 1391 pfn_to_page(src_pfn)); 1392 } 1393 #endif /* CONFIG_HIGHMEM */ 1394 1395 static void copy_data_pages(struct memory_bitmap *copy_bm, 1396 struct memory_bitmap *orig_bm) 1397 { 1398 struct zone *zone; 1399 unsigned long pfn; 1400 1401 for_each_populated_zone(zone) { 1402 unsigned long max_zone_pfn; 1403 1404 mark_free_pages(zone); 1405 max_zone_pfn = zone_end_pfn(zone); 1406 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1407 if (page_is_saveable(zone, pfn)) 1408 memory_bm_set_bit(orig_bm, pfn); 1409 } 1410 memory_bm_position_reset(orig_bm); 1411 memory_bm_position_reset(copy_bm); 1412 for(;;) { 1413 pfn = memory_bm_next_pfn(orig_bm); 1414 if (unlikely(pfn == BM_END_OF_MAP)) 1415 break; 1416 copy_data_page(memory_bm_next_pfn(copy_bm), pfn); 1417 } 1418 } 1419 1420 /* Total number of image pages */ 1421 static unsigned int nr_copy_pages; 1422 /* Number of pages needed for saving the original pfns of the image pages */ 1423 static unsigned int nr_meta_pages; 1424 /* 1425 * Numbers of normal and highmem page frames allocated for hibernation image 1426 * before suspending devices. 1427 */ 1428 static unsigned int alloc_normal, alloc_highmem; 1429 /* 1430 * Memory bitmap used for marking saveable pages (during hibernation) or 1431 * hibernation image pages (during restore) 1432 */ 1433 static struct memory_bitmap orig_bm; 1434 /* 1435 * Memory bitmap used during hibernation for marking allocated page frames that 1436 * will contain copies of saveable pages. During restore it is initially used 1437 * for marking hibernation image pages, but then the set bits from it are 1438 * duplicated in @orig_bm and it is released. On highmem systems it is next 1439 * used for marking "safe" highmem pages, but it has to be reinitialized for 1440 * this purpose. 1441 */ 1442 static struct memory_bitmap copy_bm; 1443 1444 /** 1445 * swsusp_free - Free pages allocated for hibernation image. 1446 * 1447 * Image pages are alocated before snapshot creation, so they need to be 1448 * released after resume. 1449 */ 1450 void swsusp_free(void) 1451 { 1452 unsigned long fb_pfn, fr_pfn; 1453 1454 if (!forbidden_pages_map || !free_pages_map) 1455 goto out; 1456 1457 memory_bm_position_reset(forbidden_pages_map); 1458 memory_bm_position_reset(free_pages_map); 1459 1460 loop: 1461 fr_pfn = memory_bm_next_pfn(free_pages_map); 1462 fb_pfn = memory_bm_next_pfn(forbidden_pages_map); 1463 1464 /* 1465 * Find the next bit set in both bitmaps. This is guaranteed to 1466 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP. 1467 */ 1468 do { 1469 if (fb_pfn < fr_pfn) 1470 fb_pfn = memory_bm_next_pfn(forbidden_pages_map); 1471 if (fr_pfn < fb_pfn) 1472 fr_pfn = memory_bm_next_pfn(free_pages_map); 1473 } while (fb_pfn != fr_pfn); 1474 1475 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) { 1476 struct page *page = pfn_to_page(fr_pfn); 1477 1478 memory_bm_clear_current(forbidden_pages_map); 1479 memory_bm_clear_current(free_pages_map); 1480 hibernate_restore_unprotect_page(page_address(page)); 1481 __free_page(page); 1482 goto loop; 1483 } 1484 1485 out: 1486 nr_copy_pages = 0; 1487 nr_meta_pages = 0; 1488 restore_pblist = NULL; 1489 buffer = NULL; 1490 alloc_normal = 0; 1491 alloc_highmem = 0; 1492 hibernate_restore_protection_end(); 1493 } 1494 1495 /* Helper functions used for the shrinking of memory. */ 1496 1497 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN) 1498 1499 /** 1500 * preallocate_image_pages - Allocate a number of pages for hibernation image. 1501 * @nr_pages: Number of page frames to allocate. 1502 * @mask: GFP flags to use for the allocation. 1503 * 1504 * Return value: Number of page frames actually allocated 1505 */ 1506 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) 1507 { 1508 unsigned long nr_alloc = 0; 1509 1510 while (nr_pages > 0) { 1511 struct page *page; 1512 1513 page = alloc_image_page(mask); 1514 if (!page) 1515 break; 1516 memory_bm_set_bit(©_bm, page_to_pfn(page)); 1517 if (PageHighMem(page)) 1518 alloc_highmem++; 1519 else 1520 alloc_normal++; 1521 nr_pages--; 1522 nr_alloc++; 1523 } 1524 1525 return nr_alloc; 1526 } 1527 1528 static unsigned long preallocate_image_memory(unsigned long nr_pages, 1529 unsigned long avail_normal) 1530 { 1531 unsigned long alloc; 1532 1533 if (avail_normal <= alloc_normal) 1534 return 0; 1535 1536 alloc = avail_normal - alloc_normal; 1537 if (nr_pages < alloc) 1538 alloc = nr_pages; 1539 1540 return preallocate_image_pages(alloc, GFP_IMAGE); 1541 } 1542 1543 #ifdef CONFIG_HIGHMEM 1544 static unsigned long preallocate_image_highmem(unsigned long nr_pages) 1545 { 1546 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); 1547 } 1548 1549 /** 1550 * __fraction - Compute (an approximation of) x * (multiplier / base). 1551 */ 1552 static unsigned long __fraction(u64 x, u64 multiplier, u64 base) 1553 { 1554 x *= multiplier; 1555 do_div(x, base); 1556 return (unsigned long)x; 1557 } 1558 1559 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1560 unsigned long highmem, 1561 unsigned long total) 1562 { 1563 unsigned long alloc = __fraction(nr_pages, highmem, total); 1564 1565 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM); 1566 } 1567 #else /* CONFIG_HIGHMEM */ 1568 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages) 1569 { 1570 return 0; 1571 } 1572 1573 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, 1574 unsigned long highmem, 1575 unsigned long total) 1576 { 1577 return 0; 1578 } 1579 #endif /* CONFIG_HIGHMEM */ 1580 1581 /** 1582 * free_unnecessary_pages - Release preallocated pages not needed for the image. 1583 */ 1584 static unsigned long free_unnecessary_pages(void) 1585 { 1586 unsigned long save, to_free_normal, to_free_highmem, free; 1587 1588 save = count_data_pages(); 1589 if (alloc_normal >= save) { 1590 to_free_normal = alloc_normal - save; 1591 save = 0; 1592 } else { 1593 to_free_normal = 0; 1594 save -= alloc_normal; 1595 } 1596 save += count_highmem_pages(); 1597 if (alloc_highmem >= save) { 1598 to_free_highmem = alloc_highmem - save; 1599 } else { 1600 to_free_highmem = 0; 1601 save -= alloc_highmem; 1602 if (to_free_normal > save) 1603 to_free_normal -= save; 1604 else 1605 to_free_normal = 0; 1606 } 1607 free = to_free_normal + to_free_highmem; 1608 1609 memory_bm_position_reset(©_bm); 1610 1611 while (to_free_normal > 0 || to_free_highmem > 0) { 1612 unsigned long pfn = memory_bm_next_pfn(©_bm); 1613 struct page *page = pfn_to_page(pfn); 1614 1615 if (PageHighMem(page)) { 1616 if (!to_free_highmem) 1617 continue; 1618 to_free_highmem--; 1619 alloc_highmem--; 1620 } else { 1621 if (!to_free_normal) 1622 continue; 1623 to_free_normal--; 1624 alloc_normal--; 1625 } 1626 memory_bm_clear_bit(©_bm, pfn); 1627 swsusp_unset_page_forbidden(page); 1628 swsusp_unset_page_free(page); 1629 __free_page(page); 1630 } 1631 1632 return free; 1633 } 1634 1635 /** 1636 * minimum_image_size - Estimate the minimum acceptable size of an image. 1637 * @saveable: Number of saveable pages in the system. 1638 * 1639 * We want to avoid attempting to free too much memory too hard, so estimate the 1640 * minimum acceptable size of a hibernation image to use as the lower limit for 1641 * preallocating memory. 1642 * 1643 * We assume that the minimum image size should be proportional to 1644 * 1645 * [number of saveable pages] - [number of pages that can be freed in theory] 1646 * 1647 * where the second term is the sum of (1) reclaimable slab pages, (2) active 1648 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages, 1649 * minus mapped file pages. 1650 */ 1651 static unsigned long minimum_image_size(unsigned long saveable) 1652 { 1653 unsigned long size; 1654 1655 size = global_page_state(NR_SLAB_RECLAIMABLE) 1656 + global_node_page_state(NR_ACTIVE_ANON) 1657 + global_node_page_state(NR_INACTIVE_ANON) 1658 + global_node_page_state(NR_ACTIVE_FILE) 1659 + global_node_page_state(NR_INACTIVE_FILE) 1660 - global_node_page_state(NR_FILE_MAPPED); 1661 1662 return saveable <= size ? 0 : saveable - size; 1663 } 1664 1665 /** 1666 * hibernate_preallocate_memory - Preallocate memory for hibernation image. 1667 * 1668 * To create a hibernation image it is necessary to make a copy of every page 1669 * frame in use. We also need a number of page frames to be free during 1670 * hibernation for allocations made while saving the image and for device 1671 * drivers, in case they need to allocate memory from their hibernation 1672 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough 1673 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through 1674 * /sys/power/reserved_size, respectively). To make this happen, we compute the 1675 * total number of available page frames and allocate at least 1676 * 1677 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 1678 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE) 1679 * 1680 * of them, which corresponds to the maximum size of a hibernation image. 1681 * 1682 * If image_size is set below the number following from the above formula, 1683 * the preallocation of memory is continued until the total number of saveable 1684 * pages in the system is below the requested image size or the minimum 1685 * acceptable image size returned by minimum_image_size(), whichever is greater. 1686 */ 1687 int hibernate_preallocate_memory(void) 1688 { 1689 struct zone *zone; 1690 unsigned long saveable, size, max_size, count, highmem, pages = 0; 1691 unsigned long alloc, save_highmem, pages_highmem, avail_normal; 1692 ktime_t start, stop; 1693 int error; 1694 1695 printk(KERN_INFO "PM: Preallocating image memory... "); 1696 start = ktime_get(); 1697 1698 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY); 1699 if (error) 1700 goto err_out; 1701 1702 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY); 1703 if (error) 1704 goto err_out; 1705 1706 alloc_normal = 0; 1707 alloc_highmem = 0; 1708 1709 /* Count the number of saveable data pages. */ 1710 save_highmem = count_highmem_pages(); 1711 saveable = count_data_pages(); 1712 1713 /* 1714 * Compute the total number of page frames we can use (count) and the 1715 * number of pages needed for image metadata (size). 1716 */ 1717 count = saveable; 1718 saveable += save_highmem; 1719 highmem = save_highmem; 1720 size = 0; 1721 for_each_populated_zone(zone) { 1722 size += snapshot_additional_pages(zone); 1723 if (is_highmem(zone)) 1724 highmem += zone_page_state(zone, NR_FREE_PAGES); 1725 else 1726 count += zone_page_state(zone, NR_FREE_PAGES); 1727 } 1728 avail_normal = count; 1729 count += highmem; 1730 count -= totalreserve_pages; 1731 1732 /* Add number of pages required for page keys (s390 only). */ 1733 size += page_key_additional_pages(saveable); 1734 1735 /* Compute the maximum number of saveable pages to leave in memory. */ 1736 max_size = (count - (size + PAGES_FOR_IO)) / 2 1737 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); 1738 /* Compute the desired number of image pages specified by image_size. */ 1739 size = DIV_ROUND_UP(image_size, PAGE_SIZE); 1740 if (size > max_size) 1741 size = max_size; 1742 /* 1743 * If the desired number of image pages is at least as large as the 1744 * current number of saveable pages in memory, allocate page frames for 1745 * the image and we're done. 1746 */ 1747 if (size >= saveable) { 1748 pages = preallocate_image_highmem(save_highmem); 1749 pages += preallocate_image_memory(saveable - pages, avail_normal); 1750 goto out; 1751 } 1752 1753 /* Estimate the minimum size of the image. */ 1754 pages = minimum_image_size(saveable); 1755 /* 1756 * To avoid excessive pressure on the normal zone, leave room in it to 1757 * accommodate an image of the minimum size (unless it's already too 1758 * small, in which case don't preallocate pages from it at all). 1759 */ 1760 if (avail_normal > pages) 1761 avail_normal -= pages; 1762 else 1763 avail_normal = 0; 1764 if (size < pages) 1765 size = min_t(unsigned long, pages, max_size); 1766 1767 /* 1768 * Let the memory management subsystem know that we're going to need a 1769 * large number of page frames to allocate and make it free some memory. 1770 * NOTE: If this is not done, performance will be hurt badly in some 1771 * test cases. 1772 */ 1773 shrink_all_memory(saveable - size); 1774 1775 /* 1776 * The number of saveable pages in memory was too high, so apply some 1777 * pressure to decrease it. First, make room for the largest possible 1778 * image and fail if that doesn't work. Next, try to decrease the size 1779 * of the image as much as indicated by 'size' using allocations from 1780 * highmem and non-highmem zones separately. 1781 */ 1782 pages_highmem = preallocate_image_highmem(highmem / 2); 1783 alloc = count - max_size; 1784 if (alloc > pages_highmem) 1785 alloc -= pages_highmem; 1786 else 1787 alloc = 0; 1788 pages = preallocate_image_memory(alloc, avail_normal); 1789 if (pages < alloc) { 1790 /* We have exhausted non-highmem pages, try highmem. */ 1791 alloc -= pages; 1792 pages += pages_highmem; 1793 pages_highmem = preallocate_image_highmem(alloc); 1794 if (pages_highmem < alloc) 1795 goto err_out; 1796 pages += pages_highmem; 1797 /* 1798 * size is the desired number of saveable pages to leave in 1799 * memory, so try to preallocate (all memory - size) pages. 1800 */ 1801 alloc = (count - pages) - size; 1802 pages += preallocate_image_highmem(alloc); 1803 } else { 1804 /* 1805 * There are approximately max_size saveable pages at this point 1806 * and we want to reduce this number down to size. 1807 */ 1808 alloc = max_size - size; 1809 size = preallocate_highmem_fraction(alloc, highmem, count); 1810 pages_highmem += size; 1811 alloc -= size; 1812 size = preallocate_image_memory(alloc, avail_normal); 1813 pages_highmem += preallocate_image_highmem(alloc - size); 1814 pages += pages_highmem + size; 1815 } 1816 1817 /* 1818 * We only need as many page frames for the image as there are saveable 1819 * pages in memory, but we have allocated more. Release the excessive 1820 * ones now. 1821 */ 1822 pages -= free_unnecessary_pages(); 1823 1824 out: 1825 stop = ktime_get(); 1826 printk(KERN_CONT "done (allocated %lu pages)\n", pages); 1827 swsusp_show_speed(start, stop, pages, "Allocated"); 1828 1829 return 0; 1830 1831 err_out: 1832 printk(KERN_CONT "\n"); 1833 swsusp_free(); 1834 return -ENOMEM; 1835 } 1836 1837 #ifdef CONFIG_HIGHMEM 1838 /** 1839 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem. 1840 * 1841 * Compute the number of non-highmem pages that will be necessary for creating 1842 * copies of highmem pages. 1843 */ 1844 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) 1845 { 1846 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem; 1847 1848 if (free_highmem >= nr_highmem) 1849 nr_highmem = 0; 1850 else 1851 nr_highmem -= free_highmem; 1852 1853 return nr_highmem; 1854 } 1855 #else 1856 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; } 1857 #endif /* CONFIG_HIGHMEM */ 1858 1859 /** 1860 * enough_free_mem - Check if there is enough free memory for the image. 1861 */ 1862 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem) 1863 { 1864 struct zone *zone; 1865 unsigned int free = alloc_normal; 1866 1867 for_each_populated_zone(zone) 1868 if (!is_highmem(zone)) 1869 free += zone_page_state(zone, NR_FREE_PAGES); 1870 1871 nr_pages += count_pages_for_highmem(nr_highmem); 1872 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n", 1873 nr_pages, PAGES_FOR_IO, free); 1874 1875 return free > nr_pages + PAGES_FOR_IO; 1876 } 1877 1878 #ifdef CONFIG_HIGHMEM 1879 /** 1880 * get_highmem_buffer - Allocate a buffer for highmem pages. 1881 * 1882 * If there are some highmem pages in the hibernation image, we may need a 1883 * buffer to copy them and/or load their data. 1884 */ 1885 static inline int get_highmem_buffer(int safe_needed) 1886 { 1887 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed); 1888 return buffer ? 0 : -ENOMEM; 1889 } 1890 1891 /** 1892 * alloc_highmem_image_pages - Allocate some highmem pages for the image. 1893 * 1894 * Try to allocate as many pages as needed, but if the number of free highmem 1895 * pages is less than that, allocate them all. 1896 */ 1897 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 1898 unsigned int nr_highmem) 1899 { 1900 unsigned int to_alloc = count_free_highmem_pages(); 1901 1902 if (to_alloc > nr_highmem) 1903 to_alloc = nr_highmem; 1904 1905 nr_highmem -= to_alloc; 1906 while (to_alloc-- > 0) { 1907 struct page *page; 1908 1909 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM); 1910 memory_bm_set_bit(bm, page_to_pfn(page)); 1911 } 1912 return nr_highmem; 1913 } 1914 #else 1915 static inline int get_highmem_buffer(int safe_needed) { return 0; } 1916 1917 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, 1918 unsigned int n) { return 0; } 1919 #endif /* CONFIG_HIGHMEM */ 1920 1921 /** 1922 * swsusp_alloc - Allocate memory for hibernation image. 1923 * 1924 * We first try to allocate as many highmem pages as there are 1925 * saveable highmem pages in the system. If that fails, we allocate 1926 * non-highmem pages for the copies of the remaining highmem ones. 1927 * 1928 * In this approach it is likely that the copies of highmem pages will 1929 * also be located in the high memory, because of the way in which 1930 * copy_data_pages() works. 1931 */ 1932 static int swsusp_alloc(struct memory_bitmap *orig_bm, 1933 struct memory_bitmap *copy_bm, 1934 unsigned int nr_pages, unsigned int nr_highmem) 1935 { 1936 if (nr_highmem > 0) { 1937 if (get_highmem_buffer(PG_ANY)) 1938 goto err_out; 1939 if (nr_highmem > alloc_highmem) { 1940 nr_highmem -= alloc_highmem; 1941 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem); 1942 } 1943 } 1944 if (nr_pages > alloc_normal) { 1945 nr_pages -= alloc_normal; 1946 while (nr_pages-- > 0) { 1947 struct page *page; 1948 1949 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD); 1950 if (!page) 1951 goto err_out; 1952 memory_bm_set_bit(copy_bm, page_to_pfn(page)); 1953 } 1954 } 1955 1956 return 0; 1957 1958 err_out: 1959 swsusp_free(); 1960 return -ENOMEM; 1961 } 1962 1963 asmlinkage __visible int swsusp_save(void) 1964 { 1965 unsigned int nr_pages, nr_highmem; 1966 1967 printk(KERN_INFO "PM: Creating hibernation image:\n"); 1968 1969 drain_local_pages(NULL); 1970 nr_pages = count_data_pages(); 1971 nr_highmem = count_highmem_pages(); 1972 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem); 1973 1974 if (!enough_free_mem(nr_pages, nr_highmem)) { 1975 printk(KERN_ERR "PM: Not enough free memory\n"); 1976 return -ENOMEM; 1977 } 1978 1979 if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) { 1980 printk(KERN_ERR "PM: Memory allocation failed\n"); 1981 return -ENOMEM; 1982 } 1983 1984 /* 1985 * During allocating of suspend pagedir, new cold pages may appear. 1986 * Kill them. 1987 */ 1988 drain_local_pages(NULL); 1989 copy_data_pages(©_bm, &orig_bm); 1990 1991 /* 1992 * End of critical section. From now on, we can write to memory, 1993 * but we should not touch disk. This specially means we must _not_ 1994 * touch swap space! Except we must write out our image of course. 1995 */ 1996 1997 nr_pages += nr_highmem; 1998 nr_copy_pages = nr_pages; 1999 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE); 2000 2001 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n", 2002 nr_pages); 2003 2004 return 0; 2005 } 2006 2007 #ifndef CONFIG_ARCH_HIBERNATION_HEADER 2008 static int init_header_complete(struct swsusp_info *info) 2009 { 2010 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); 2011 info->version_code = LINUX_VERSION_CODE; 2012 return 0; 2013 } 2014 2015 static char *check_image_kernel(struct swsusp_info *info) 2016 { 2017 if (info->version_code != LINUX_VERSION_CODE) 2018 return "kernel version"; 2019 if (strcmp(info->uts.sysname,init_utsname()->sysname)) 2020 return "system type"; 2021 if (strcmp(info->uts.release,init_utsname()->release)) 2022 return "kernel release"; 2023 if (strcmp(info->uts.version,init_utsname()->version)) 2024 return "version"; 2025 if (strcmp(info->uts.machine,init_utsname()->machine)) 2026 return "machine"; 2027 return NULL; 2028 } 2029 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */ 2030 2031 unsigned long snapshot_get_image_size(void) 2032 { 2033 return nr_copy_pages + nr_meta_pages + 1; 2034 } 2035 2036 static int init_header(struct swsusp_info *info) 2037 { 2038 memset(info, 0, sizeof(struct swsusp_info)); 2039 info->num_physpages = get_num_physpages(); 2040 info->image_pages = nr_copy_pages; 2041 info->pages = snapshot_get_image_size(); 2042 info->size = info->pages; 2043 info->size <<= PAGE_SHIFT; 2044 return init_header_complete(info); 2045 } 2046 2047 /** 2048 * pack_pfns - Prepare PFNs for saving. 2049 * @bm: Memory bitmap. 2050 * @buf: Memory buffer to store the PFNs in. 2051 * 2052 * PFNs corresponding to set bits in @bm are stored in the area of memory 2053 * pointed to by @buf (1 page at a time). 2054 */ 2055 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm) 2056 { 2057 int j; 2058 2059 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2060 buf[j] = memory_bm_next_pfn(bm); 2061 if (unlikely(buf[j] == BM_END_OF_MAP)) 2062 break; 2063 /* Save page key for data page (s390 only). */ 2064 page_key_read(buf + j); 2065 } 2066 } 2067 2068 /** 2069 * snapshot_read_next - Get the address to read the next image page from. 2070 * @handle: Snapshot handle to be used for the reading. 2071 * 2072 * On the first call, @handle should point to a zeroed snapshot_handle 2073 * structure. The structure gets populated then and a pointer to it should be 2074 * passed to this function every next time. 2075 * 2076 * On success, the function returns a positive number. Then, the caller 2077 * is allowed to read up to the returned number of bytes from the memory 2078 * location computed by the data_of() macro. 2079 * 2080 * The function returns 0 to indicate the end of the data stream condition, 2081 * and negative numbers are returned on errors. If that happens, the structure 2082 * pointed to by @handle is not updated and should not be used any more. 2083 */ 2084 int snapshot_read_next(struct snapshot_handle *handle) 2085 { 2086 if (handle->cur > nr_meta_pages + nr_copy_pages) 2087 return 0; 2088 2089 if (!buffer) { 2090 /* This makes the buffer be freed by swsusp_free() */ 2091 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2092 if (!buffer) 2093 return -ENOMEM; 2094 } 2095 if (!handle->cur) { 2096 int error; 2097 2098 error = init_header((struct swsusp_info *)buffer); 2099 if (error) 2100 return error; 2101 handle->buffer = buffer; 2102 memory_bm_position_reset(&orig_bm); 2103 memory_bm_position_reset(©_bm); 2104 } else if (handle->cur <= nr_meta_pages) { 2105 clear_page(buffer); 2106 pack_pfns(buffer, &orig_bm); 2107 } else { 2108 struct page *page; 2109 2110 page = pfn_to_page(memory_bm_next_pfn(©_bm)); 2111 if (PageHighMem(page)) { 2112 /* 2113 * Highmem pages are copied to the buffer, 2114 * because we can't return with a kmapped 2115 * highmem page (we may not be called again). 2116 */ 2117 void *kaddr; 2118 2119 kaddr = kmap_atomic(page); 2120 copy_page(buffer, kaddr); 2121 kunmap_atomic(kaddr); 2122 handle->buffer = buffer; 2123 } else { 2124 handle->buffer = page_address(page); 2125 } 2126 } 2127 handle->cur++; 2128 return PAGE_SIZE; 2129 } 2130 2131 static void duplicate_memory_bitmap(struct memory_bitmap *dst, 2132 struct memory_bitmap *src) 2133 { 2134 unsigned long pfn; 2135 2136 memory_bm_position_reset(src); 2137 pfn = memory_bm_next_pfn(src); 2138 while (pfn != BM_END_OF_MAP) { 2139 memory_bm_set_bit(dst, pfn); 2140 pfn = memory_bm_next_pfn(src); 2141 } 2142 } 2143 2144 /** 2145 * mark_unsafe_pages - Mark pages that were used before hibernation. 2146 * 2147 * Mark the pages that cannot be used for storing the image during restoration, 2148 * because they conflict with the pages that had been used before hibernation. 2149 */ 2150 static void mark_unsafe_pages(struct memory_bitmap *bm) 2151 { 2152 unsigned long pfn; 2153 2154 /* Clear the "free"/"unsafe" bit for all PFNs */ 2155 memory_bm_position_reset(free_pages_map); 2156 pfn = memory_bm_next_pfn(free_pages_map); 2157 while (pfn != BM_END_OF_MAP) { 2158 memory_bm_clear_current(free_pages_map); 2159 pfn = memory_bm_next_pfn(free_pages_map); 2160 } 2161 2162 /* Mark pages that correspond to the "original" PFNs as "unsafe" */ 2163 duplicate_memory_bitmap(free_pages_map, bm); 2164 2165 allocated_unsafe_pages = 0; 2166 } 2167 2168 static int check_header(struct swsusp_info *info) 2169 { 2170 char *reason; 2171 2172 reason = check_image_kernel(info); 2173 if (!reason && info->num_physpages != get_num_physpages()) 2174 reason = "memory size"; 2175 if (reason) { 2176 printk(KERN_ERR "PM: Image mismatch: %s\n", reason); 2177 return -EPERM; 2178 } 2179 return 0; 2180 } 2181 2182 /** 2183 * load header - Check the image header and copy the data from it. 2184 */ 2185 static int load_header(struct swsusp_info *info) 2186 { 2187 int error; 2188 2189 restore_pblist = NULL; 2190 error = check_header(info); 2191 if (!error) { 2192 nr_copy_pages = info->image_pages; 2193 nr_meta_pages = info->pages - info->image_pages - 1; 2194 } 2195 return error; 2196 } 2197 2198 /** 2199 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap. 2200 * @bm: Memory bitmap. 2201 * @buf: Area of memory containing the PFNs. 2202 * 2203 * For each element of the array pointed to by @buf (1 page at a time), set the 2204 * corresponding bit in @bm. 2205 */ 2206 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) 2207 { 2208 int j; 2209 2210 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { 2211 if (unlikely(buf[j] == BM_END_OF_MAP)) 2212 break; 2213 2214 /* Extract and buffer page key for data page (s390 only). */ 2215 page_key_memorize(buf + j); 2216 2217 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) 2218 memory_bm_set_bit(bm, buf[j]); 2219 else 2220 return -EFAULT; 2221 } 2222 2223 return 0; 2224 } 2225 2226 #ifdef CONFIG_HIGHMEM 2227 /* 2228 * struct highmem_pbe is used for creating the list of highmem pages that 2229 * should be restored atomically during the resume from disk, because the page 2230 * frames they have occupied before the suspend are in use. 2231 */ 2232 struct highmem_pbe { 2233 struct page *copy_page; /* data is here now */ 2234 struct page *orig_page; /* data was here before the suspend */ 2235 struct highmem_pbe *next; 2236 }; 2237 2238 /* 2239 * List of highmem PBEs needed for restoring the highmem pages that were 2240 * allocated before the suspend and included in the suspend image, but have 2241 * also been allocated by the "resume" kernel, so their contents cannot be 2242 * written directly to their "original" page frames. 2243 */ 2244 static struct highmem_pbe *highmem_pblist; 2245 2246 /** 2247 * count_highmem_image_pages - Compute the number of highmem pages in the image. 2248 * @bm: Memory bitmap. 2249 * 2250 * The bits in @bm that correspond to image pages are assumed to be set. 2251 */ 2252 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) 2253 { 2254 unsigned long pfn; 2255 unsigned int cnt = 0; 2256 2257 memory_bm_position_reset(bm); 2258 pfn = memory_bm_next_pfn(bm); 2259 while (pfn != BM_END_OF_MAP) { 2260 if (PageHighMem(pfn_to_page(pfn))) 2261 cnt++; 2262 2263 pfn = memory_bm_next_pfn(bm); 2264 } 2265 return cnt; 2266 } 2267 2268 static unsigned int safe_highmem_pages; 2269 2270 static struct memory_bitmap *safe_highmem_bm; 2271 2272 /** 2273 * prepare_highmem_image - Allocate memory for loading highmem data from image. 2274 * @bm: Pointer to an uninitialized memory bitmap structure. 2275 * @nr_highmem_p: Pointer to the number of highmem image pages. 2276 * 2277 * Try to allocate as many highmem pages as there are highmem image pages 2278 * (@nr_highmem_p points to the variable containing the number of highmem image 2279 * pages). The pages that are "safe" (ie. will not be overwritten when the 2280 * hibernation image is restored entirely) have the corresponding bits set in 2281 * @bm (it must be unitialized). 2282 * 2283 * NOTE: This function should not be called if there are no highmem image pages. 2284 */ 2285 static int prepare_highmem_image(struct memory_bitmap *bm, 2286 unsigned int *nr_highmem_p) 2287 { 2288 unsigned int to_alloc; 2289 2290 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE)) 2291 return -ENOMEM; 2292 2293 if (get_highmem_buffer(PG_SAFE)) 2294 return -ENOMEM; 2295 2296 to_alloc = count_free_highmem_pages(); 2297 if (to_alloc > *nr_highmem_p) 2298 to_alloc = *nr_highmem_p; 2299 else 2300 *nr_highmem_p = to_alloc; 2301 2302 safe_highmem_pages = 0; 2303 while (to_alloc-- > 0) { 2304 struct page *page; 2305 2306 page = alloc_page(__GFP_HIGHMEM); 2307 if (!swsusp_page_is_free(page)) { 2308 /* The page is "safe", set its bit the bitmap */ 2309 memory_bm_set_bit(bm, page_to_pfn(page)); 2310 safe_highmem_pages++; 2311 } 2312 /* Mark the page as allocated */ 2313 swsusp_set_page_forbidden(page); 2314 swsusp_set_page_free(page); 2315 } 2316 memory_bm_position_reset(bm); 2317 safe_highmem_bm = bm; 2318 return 0; 2319 } 2320 2321 static struct page *last_highmem_page; 2322 2323 /** 2324 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page. 2325 * 2326 * For a given highmem image page get a buffer that suspend_write_next() should 2327 * return to its caller to write to. 2328 * 2329 * If the page is to be saved to its "original" page frame or a copy of 2330 * the page is to be made in the highmem, @buffer is returned. Otherwise, 2331 * the copy of the page is to be made in normal memory, so the address of 2332 * the copy is returned. 2333 * 2334 * If @buffer is returned, the caller of suspend_write_next() will write 2335 * the page's contents to @buffer, so they will have to be copied to the 2336 * right location on the next call to suspend_write_next() and it is done 2337 * with the help of copy_last_highmem_page(). For this purpose, if 2338 * @buffer is returned, @last_highmem_page is set to the page to which 2339 * the data will have to be copied from @buffer. 2340 */ 2341 static void *get_highmem_page_buffer(struct page *page, 2342 struct chain_allocator *ca) 2343 { 2344 struct highmem_pbe *pbe; 2345 void *kaddr; 2346 2347 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) { 2348 /* 2349 * We have allocated the "original" page frame and we can 2350 * use it directly to store the loaded page. 2351 */ 2352 last_highmem_page = page; 2353 return buffer; 2354 } 2355 /* 2356 * The "original" page frame has not been allocated and we have to 2357 * use a "safe" page frame to store the loaded page. 2358 */ 2359 pbe = chain_alloc(ca, sizeof(struct highmem_pbe)); 2360 if (!pbe) { 2361 swsusp_free(); 2362 return ERR_PTR(-ENOMEM); 2363 } 2364 pbe->orig_page = page; 2365 if (safe_highmem_pages > 0) { 2366 struct page *tmp; 2367 2368 /* Copy of the page will be stored in high memory */ 2369 kaddr = buffer; 2370 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm)); 2371 safe_highmem_pages--; 2372 last_highmem_page = tmp; 2373 pbe->copy_page = tmp; 2374 } else { 2375 /* Copy of the page will be stored in normal memory */ 2376 kaddr = safe_pages_list; 2377 safe_pages_list = safe_pages_list->next; 2378 pbe->copy_page = virt_to_page(kaddr); 2379 } 2380 pbe->next = highmem_pblist; 2381 highmem_pblist = pbe; 2382 return kaddr; 2383 } 2384 2385 /** 2386 * copy_last_highmem_page - Copy most the most recent highmem image page. 2387 * 2388 * Copy the contents of a highmem image from @buffer, where the caller of 2389 * snapshot_write_next() has stored them, to the right location represented by 2390 * @last_highmem_page . 2391 */ 2392 static void copy_last_highmem_page(void) 2393 { 2394 if (last_highmem_page) { 2395 void *dst; 2396 2397 dst = kmap_atomic(last_highmem_page); 2398 copy_page(dst, buffer); 2399 kunmap_atomic(dst); 2400 last_highmem_page = NULL; 2401 } 2402 } 2403 2404 static inline int last_highmem_page_copied(void) 2405 { 2406 return !last_highmem_page; 2407 } 2408 2409 static inline void free_highmem_data(void) 2410 { 2411 if (safe_highmem_bm) 2412 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR); 2413 2414 if (buffer) 2415 free_image_page(buffer, PG_UNSAFE_CLEAR); 2416 } 2417 #else 2418 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } 2419 2420 static inline int prepare_highmem_image(struct memory_bitmap *bm, 2421 unsigned int *nr_highmem_p) { return 0; } 2422 2423 static inline void *get_highmem_page_buffer(struct page *page, 2424 struct chain_allocator *ca) 2425 { 2426 return ERR_PTR(-EINVAL); 2427 } 2428 2429 static inline void copy_last_highmem_page(void) {} 2430 static inline int last_highmem_page_copied(void) { return 1; } 2431 static inline void free_highmem_data(void) {} 2432 #endif /* CONFIG_HIGHMEM */ 2433 2434 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe)) 2435 2436 /** 2437 * prepare_image - Make room for loading hibernation image. 2438 * @new_bm: Unitialized memory bitmap structure. 2439 * @bm: Memory bitmap with unsafe pages marked. 2440 * 2441 * Use @bm to mark the pages that will be overwritten in the process of 2442 * restoring the system memory state from the suspend image ("unsafe" pages) 2443 * and allocate memory for the image. 2444 * 2445 * The idea is to allocate a new memory bitmap first and then allocate 2446 * as many pages as needed for image data, but without specifying what those 2447 * pages will be used for just yet. Instead, we mark them all as allocated and 2448 * create a lists of "safe" pages to be used later. On systems with high 2449 * memory a list of "safe" highmem pages is created too. 2450 */ 2451 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) 2452 { 2453 unsigned int nr_pages, nr_highmem; 2454 struct linked_page *lp; 2455 int error; 2456 2457 /* If there is no highmem, the buffer will not be necessary */ 2458 free_image_page(buffer, PG_UNSAFE_CLEAR); 2459 buffer = NULL; 2460 2461 nr_highmem = count_highmem_image_pages(bm); 2462 mark_unsafe_pages(bm); 2463 2464 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); 2465 if (error) 2466 goto Free; 2467 2468 duplicate_memory_bitmap(new_bm, bm); 2469 memory_bm_free(bm, PG_UNSAFE_KEEP); 2470 if (nr_highmem > 0) { 2471 error = prepare_highmem_image(bm, &nr_highmem); 2472 if (error) 2473 goto Free; 2474 } 2475 /* 2476 * Reserve some safe pages for potential later use. 2477 * 2478 * NOTE: This way we make sure there will be enough safe pages for the 2479 * chain_alloc() in get_buffer(). It is a bit wasteful, but 2480 * nr_copy_pages cannot be greater than 50% of the memory anyway. 2481 * 2482 * nr_copy_pages cannot be less than allocated_unsafe_pages too. 2483 */ 2484 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2485 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); 2486 while (nr_pages > 0) { 2487 lp = get_image_page(GFP_ATOMIC, PG_SAFE); 2488 if (!lp) { 2489 error = -ENOMEM; 2490 goto Free; 2491 } 2492 lp->next = safe_pages_list; 2493 safe_pages_list = lp; 2494 nr_pages--; 2495 } 2496 /* Preallocate memory for the image */ 2497 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages; 2498 while (nr_pages > 0) { 2499 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); 2500 if (!lp) { 2501 error = -ENOMEM; 2502 goto Free; 2503 } 2504 if (!swsusp_page_is_free(virt_to_page(lp))) { 2505 /* The page is "safe", add it to the list */ 2506 lp->next = safe_pages_list; 2507 safe_pages_list = lp; 2508 } 2509 /* Mark the page as allocated */ 2510 swsusp_set_page_forbidden(virt_to_page(lp)); 2511 swsusp_set_page_free(virt_to_page(lp)); 2512 nr_pages--; 2513 } 2514 return 0; 2515 2516 Free: 2517 swsusp_free(); 2518 return error; 2519 } 2520 2521 /** 2522 * get_buffer - Get the address to store the next image data page. 2523 * 2524 * Get the address that snapshot_write_next() should return to its caller to 2525 * write to. 2526 */ 2527 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) 2528 { 2529 struct pbe *pbe; 2530 struct page *page; 2531 unsigned long pfn = memory_bm_next_pfn(bm); 2532 2533 if (pfn == BM_END_OF_MAP) 2534 return ERR_PTR(-EFAULT); 2535 2536 page = pfn_to_page(pfn); 2537 if (PageHighMem(page)) 2538 return get_highmem_page_buffer(page, ca); 2539 2540 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) 2541 /* 2542 * We have allocated the "original" page frame and we can 2543 * use it directly to store the loaded page. 2544 */ 2545 return page_address(page); 2546 2547 /* 2548 * The "original" page frame has not been allocated and we have to 2549 * use a "safe" page frame to store the loaded page. 2550 */ 2551 pbe = chain_alloc(ca, sizeof(struct pbe)); 2552 if (!pbe) { 2553 swsusp_free(); 2554 return ERR_PTR(-ENOMEM); 2555 } 2556 pbe->orig_address = page_address(page); 2557 pbe->address = safe_pages_list; 2558 safe_pages_list = safe_pages_list->next; 2559 pbe->next = restore_pblist; 2560 restore_pblist = pbe; 2561 return pbe->address; 2562 } 2563 2564 /** 2565 * snapshot_write_next - Get the address to store the next image page. 2566 * @handle: Snapshot handle structure to guide the writing. 2567 * 2568 * On the first call, @handle should point to a zeroed snapshot_handle 2569 * structure. The structure gets populated then and a pointer to it should be 2570 * passed to this function every next time. 2571 * 2572 * On success, the function returns a positive number. Then, the caller 2573 * is allowed to write up to the returned number of bytes to the memory 2574 * location computed by the data_of() macro. 2575 * 2576 * The function returns 0 to indicate the "end of file" condition. Negative 2577 * numbers are returned on errors, in which cases the structure pointed to by 2578 * @handle is not updated and should not be used any more. 2579 */ 2580 int snapshot_write_next(struct snapshot_handle *handle) 2581 { 2582 static struct chain_allocator ca; 2583 int error = 0; 2584 2585 /* Check if we have already loaded the entire image */ 2586 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) 2587 return 0; 2588 2589 handle->sync_read = 1; 2590 2591 if (!handle->cur) { 2592 if (!buffer) 2593 /* This makes the buffer be freed by swsusp_free() */ 2594 buffer = get_image_page(GFP_ATOMIC, PG_ANY); 2595 2596 if (!buffer) 2597 return -ENOMEM; 2598 2599 handle->buffer = buffer; 2600 } else if (handle->cur == 1) { 2601 error = load_header(buffer); 2602 if (error) 2603 return error; 2604 2605 safe_pages_list = NULL; 2606 2607 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY); 2608 if (error) 2609 return error; 2610 2611 /* Allocate buffer for page keys. */ 2612 error = page_key_alloc(nr_copy_pages); 2613 if (error) 2614 return error; 2615 2616 hibernate_restore_protection_begin(); 2617 } else if (handle->cur <= nr_meta_pages + 1) { 2618 error = unpack_orig_pfns(buffer, ©_bm); 2619 if (error) 2620 return error; 2621 2622 if (handle->cur == nr_meta_pages + 1) { 2623 error = prepare_image(&orig_bm, ©_bm); 2624 if (error) 2625 return error; 2626 2627 chain_init(&ca, GFP_ATOMIC, PG_SAFE); 2628 memory_bm_position_reset(&orig_bm); 2629 restore_pblist = NULL; 2630 handle->buffer = get_buffer(&orig_bm, &ca); 2631 handle->sync_read = 0; 2632 if (IS_ERR(handle->buffer)) 2633 return PTR_ERR(handle->buffer); 2634 } 2635 } else { 2636 copy_last_highmem_page(); 2637 /* Restore page key for data page (s390 only). */ 2638 page_key_write(handle->buffer); 2639 hibernate_restore_protect_page(handle->buffer); 2640 handle->buffer = get_buffer(&orig_bm, &ca); 2641 if (IS_ERR(handle->buffer)) 2642 return PTR_ERR(handle->buffer); 2643 if (handle->buffer != buffer) 2644 handle->sync_read = 0; 2645 } 2646 handle->cur++; 2647 return PAGE_SIZE; 2648 } 2649 2650 /** 2651 * snapshot_write_finalize - Complete the loading of a hibernation image. 2652 * 2653 * Must be called after the last call to snapshot_write_next() in case the last 2654 * page in the image happens to be a highmem page and its contents should be 2655 * stored in highmem. Additionally, it recycles bitmap memory that's not 2656 * necessary any more. 2657 */ 2658 void snapshot_write_finalize(struct snapshot_handle *handle) 2659 { 2660 copy_last_highmem_page(); 2661 /* Restore page key for data page (s390 only). */ 2662 page_key_write(handle->buffer); 2663 page_key_free(); 2664 hibernate_restore_protect_page(handle->buffer); 2665 /* Do that only if we have loaded the image entirely */ 2666 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) { 2667 memory_bm_recycle(&orig_bm); 2668 free_highmem_data(); 2669 } 2670 } 2671 2672 int snapshot_image_loaded(struct snapshot_handle *handle) 2673 { 2674 return !(!nr_copy_pages || !last_highmem_page_copied() || 2675 handle->cur <= nr_meta_pages + nr_copy_pages); 2676 } 2677 2678 #ifdef CONFIG_HIGHMEM 2679 /* Assumes that @buf is ready and points to a "safe" page */ 2680 static inline void swap_two_pages_data(struct page *p1, struct page *p2, 2681 void *buf) 2682 { 2683 void *kaddr1, *kaddr2; 2684 2685 kaddr1 = kmap_atomic(p1); 2686 kaddr2 = kmap_atomic(p2); 2687 copy_page(buf, kaddr1); 2688 copy_page(kaddr1, kaddr2); 2689 copy_page(kaddr2, buf); 2690 kunmap_atomic(kaddr2); 2691 kunmap_atomic(kaddr1); 2692 } 2693 2694 /** 2695 * restore_highmem - Put highmem image pages into their original locations. 2696 * 2697 * For each highmem page that was in use before hibernation and is included in 2698 * the image, and also has been allocated by the "restore" kernel, swap its 2699 * current contents with the previous (ie. "before hibernation") ones. 2700 * 2701 * If the restore eventually fails, we can call this function once again and 2702 * restore the highmem state as seen by the restore kernel. 2703 */ 2704 int restore_highmem(void) 2705 { 2706 struct highmem_pbe *pbe = highmem_pblist; 2707 void *buf; 2708 2709 if (!pbe) 2710 return 0; 2711 2712 buf = get_image_page(GFP_ATOMIC, PG_SAFE); 2713 if (!buf) 2714 return -ENOMEM; 2715 2716 while (pbe) { 2717 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf); 2718 pbe = pbe->next; 2719 } 2720 free_image_page(buf, PG_UNSAFE_CLEAR); 2721 return 0; 2722 } 2723 #endif /* CONFIG_HIGHMEM */ 2724