Lines Matching +full:bm +full:- +full:work

1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
119 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
152 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
177 * get_image_page - Allocate a page for a hibernation image.
213 safe_pages_list = safe_pages_list->next; in __get_safe_page()
241 lp->next = safe_pages_list; in recycle_safe_page()
246 * free_image_page - Free a page allocated for hibernation image.
272 struct linked_page *lp = list->next; in free_list_of_pages()
302 ca->chain = NULL; in chain_init()
303 ca->used_space = LINKED_PAGE_DATA_SIZE; in chain_init()
304 ca->gfp_mask = gfp_mask; in chain_init()
305 ca->safe_needed = safe_needed; in chain_init()
312 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { in chain_alloc()
315 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) : in chain_alloc()
316 get_image_page(ca->gfp_mask, PG_ANY); in chain_alloc()
320 lp->next = ca->chain; in chain_alloc()
321 ca->chain = lp; in chain_alloc()
322 ca->used_space = 0; in chain_alloc()
324 ret = ca->chain->data + ca->used_space; in chain_alloc()
325 ca->used_space += size; in chain_alloc()
345 * designed to work with arbitrary number of zones (this is over the
346 * top for now, but let's avoid making unnecessary assumptions ;-).
374 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
423 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
425 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
427 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
430 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
450 node->data = get_image_page(gfp_mask, safe_needed); in alloc_rtree_node()
451 if (!node->data) in alloc_rtree_node()
454 list_add_tail(&node->list, list); in alloc_rtree_node()
460 * add_rtree_block - Add a new leave node to the radix tree.
463 * linked list in order. This is guaranteed by the zone->blocks
473 block_nr = zone->blocks; in add_rtree_block()
483 for (i = zone->levels; i < levels_needed; i++) { in add_rtree_block()
485 &zone->nodes); in add_rtree_block()
487 return -ENOMEM; in add_rtree_block()
489 node->data[0] = (unsigned long)zone->rtree; in add_rtree_block()
490 zone->rtree = node; in add_rtree_block()
491 zone->levels += 1; in add_rtree_block()
495 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); in add_rtree_block()
497 return -ENOMEM; in add_rtree_block()
500 node = zone->rtree; in add_rtree_block()
501 dst = &zone->rtree; in add_rtree_block()
502 block_nr = zone->blocks; in add_rtree_block()
503 for (i = zone->levels; i > 0; i--) { in add_rtree_block()
508 &zone->nodes); in add_rtree_block()
510 return -ENOMEM; in add_rtree_block()
514 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); in add_rtree_block()
516 dst = (struct rtree_node **)&((*dst)->data[index]); in add_rtree_block()
520 zone->blocks += 1; in add_rtree_block()
530 * create_zone_bm_rtree - Create a radix tree for one zone.
546 pages = end - start; in create_zone_bm_rtree()
551 INIT_LIST_HEAD(&zone->nodes); in create_zone_bm_rtree()
552 INIT_LIST_HEAD(&zone->leaves); in create_zone_bm_rtree()
553 zone->start_pfn = start; in create_zone_bm_rtree()
554 zone->end_pfn = end; in create_zone_bm_rtree()
568 * free_zone_bm_rtree - Free the memory of the radix tree.
579 list_for_each_entry(node, &zone->nodes, list) in free_zone_bm_rtree()
580 free_image_page(node->data, clear_nosave_free); in free_zone_bm_rtree()
582 list_for_each_entry(node, &zone->leaves, list) in free_zone_bm_rtree()
583 free_image_page(node->data, clear_nosave_free); in free_zone_bm_rtree()
586 static void memory_bm_position_reset(struct memory_bitmap *bm) in memory_bm_position_reset() argument
588 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, in memory_bm_position_reset()
590 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in memory_bm_position_reset()
592 bm->cur.node_pfn = 0; in memory_bm_position_reset()
593 bm->cur.cur_pfn = BM_END_OF_MAP; in memory_bm_position_reset()
594 bm->cur.node_bit = 0; in memory_bm_position_reset()
597 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
606 * free_mem_extents - Free a list of memory extents.
614 list_del(&ext->hook); in free_mem_extents()
620 * create_mem_extents - Create a list of memory extents.
636 zone_start = zone->zone_start_pfn; in create_mem_extents()
640 if (zone_start <= ext->end) in create_mem_extents()
643 if (&ext->hook == list || zone_end < ext->start) { in create_mem_extents()
650 return -ENOMEM; in create_mem_extents()
652 new_ext->start = zone_start; in create_mem_extents()
653 new_ext->end = zone_end; in create_mem_extents()
654 list_add_tail(&new_ext->hook, &ext->hook); in create_mem_extents()
659 if (zone_start < ext->start) in create_mem_extents()
660 ext->start = zone_start; in create_mem_extents()
661 if (zone_end > ext->end) in create_mem_extents()
662 ext->end = zone_end; in create_mem_extents()
667 if (zone_end < cur->start) in create_mem_extents()
669 if (zone_end < cur->end) in create_mem_extents()
670 ext->end = cur->end; in create_mem_extents()
671 list_del(&cur->hook); in create_mem_extents()
680 * memory_bm_create - Allocate memory for a memory bitmap.
682 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, in memory_bm_create() argument
691 INIT_LIST_HEAD(&bm->zones); in memory_bm_create()
701 ext->start, ext->end); in memory_bm_create()
703 error = -ENOMEM; in memory_bm_create()
706 list_add_tail(&zone->list, &bm->zones); in memory_bm_create()
709 bm->p_list = ca.chain; in memory_bm_create()
710 memory_bm_position_reset(bm); in memory_bm_create()
716 bm->p_list = ca.chain; in memory_bm_create()
717 memory_bm_free(bm, PG_UNSAFE_CLEAR); in memory_bm_create()
722 * memory_bm_free - Free memory occupied by the memory bitmap.
723 * @bm: Memory bitmap.
725 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) in memory_bm_free() argument
729 list_for_each_entry(zone, &bm->zones, list) in memory_bm_free()
732 free_list_of_pages(bm->p_list, clear_nosave_free); in memory_bm_free()
734 INIT_LIST_HEAD(&bm->zones); in memory_bm_free()
738 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
740 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
741 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
746 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, in memory_bm_find_bit() argument
753 zone = bm->cur.zone; in memory_bm_find_bit()
755 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) in memory_bm_find_bit()
761 list_for_each_entry(curr, &bm->zones, list) { in memory_bm_find_bit()
762 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { in memory_bm_find_bit()
769 return -EFAULT; in memory_bm_find_bit()
782 node = bm->cur.node; in memory_bm_find_bit()
783 if (zone == bm->cur.zone && in memory_bm_find_bit()
784 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) in memory_bm_find_bit()
787 node = zone->rtree; in memory_bm_find_bit()
788 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; in memory_bm_find_bit()
790 for (i = zone->levels; i > 0; i--) { in memory_bm_find_bit()
793 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); in memory_bm_find_bit()
795 BUG_ON(node->data[index] == 0); in memory_bm_find_bit()
796 node = (struct rtree_node *)node->data[index]; in memory_bm_find_bit()
801 bm->cur.zone = zone; in memory_bm_find_bit()
802 bm->cur.node = node; in memory_bm_find_bit()
803 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; in memory_bm_find_bit()
804 bm->cur.cur_pfn = pfn; in memory_bm_find_bit()
807 *addr = node->data; in memory_bm_find_bit()
808 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; in memory_bm_find_bit()
813 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) in memory_bm_set_bit() argument
819 error = memory_bm_find_bit(bm, pfn, &addr, &bit); in memory_bm_set_bit()
824 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) in mem_bm_set_bit_check() argument
830 error = memory_bm_find_bit(bm, pfn, &addr, &bit); in mem_bm_set_bit_check()
837 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) in memory_bm_clear_bit() argument
843 error = memory_bm_find_bit(bm, pfn, &addr, &bit); in memory_bm_clear_bit()
848 static void memory_bm_clear_current(struct memory_bitmap *bm) in memory_bm_clear_current() argument
852 bit = max(bm->cur.node_bit - 1, 0); in memory_bm_clear_current()
853 clear_bit(bit, bm->cur.node->data); in memory_bm_clear_current()
856 static unsigned long memory_bm_get_current(struct memory_bitmap *bm) in memory_bm_get_current() argument
858 return bm->cur.cur_pfn; in memory_bm_get_current()
861 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) in memory_bm_test_bit() argument
867 error = memory_bm_find_bit(bm, pfn, &addr, &bit); in memory_bm_test_bit()
872 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) in memory_bm_pfn_present() argument
877 return !memory_bm_find_bit(bm, pfn, &addr, &bit); in memory_bm_pfn_present()
881 * rtree_next_node - Jump to the next leaf node.
890 static bool rtree_next_node(struct memory_bitmap *bm) in rtree_next_node() argument
892 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { in rtree_next_node()
893 bm->cur.node = list_entry(bm->cur.node->list.next, in rtree_next_node()
895 bm->cur.node_pfn += BM_BITS_PER_BLOCK; in rtree_next_node()
896 bm->cur.node_bit = 0; in rtree_next_node()
902 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) { in rtree_next_node()
903 bm->cur.zone = list_entry(bm->cur.zone->list.next, in rtree_next_node()
905 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in rtree_next_node()
907 bm->cur.node_pfn = 0; in rtree_next_node()
908 bm->cur.node_bit = 0; in rtree_next_node()
917 * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
918 * @bm: Memory bitmap.
921 * set bit in @bm and returns the PFN represented by it. If no more bits are
927 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) in memory_bm_next_pfn() argument
933 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn()
934 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); in memory_bm_next_pfn()
935 bit = find_next_bit(bm->cur.node->data, bits, in memory_bm_next_pfn()
936 bm->cur.node_bit); in memory_bm_next_pfn()
938 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; in memory_bm_next_pfn()
939 bm->cur.node_bit = bit + 1; in memory_bm_next_pfn()
940 bm->cur.cur_pfn = pfn; in memory_bm_next_pfn()
943 } while (rtree_next_node(bm)); in memory_bm_next_pfn()
945 bm->cur.cur_pfn = BM_END_OF_MAP; in memory_bm_next_pfn()
965 list_for_each_entry(node, &zone->nodes, list) in recycle_zone_bm_rtree()
966 recycle_safe_page(node->data); in recycle_zone_bm_rtree()
968 list_for_each_entry(node, &zone->leaves, list) in recycle_zone_bm_rtree()
969 recycle_safe_page(node->data); in recycle_zone_bm_rtree()
972 static void memory_bm_recycle(struct memory_bitmap *bm) in memory_bm_recycle() argument
977 list_for_each_entry(zone, &bm->zones, list) in memory_bm_recycle()
980 p_list = bm->p_list; in memory_bm_recycle()
984 p_list = lp->next; in memory_bm_recycle()
990 * register_nosave_region - Register a region of unsaveable memory.
1006 if (region->end_pfn == start_pfn) { in register_nosave_region()
1007 region->end_pfn = end_pfn; in register_nosave_region()
1017 region->start_pfn = start_pfn; in register_nosave_region()
1018 region->end_pfn = end_pfn; in register_nosave_region()
1019 list_add_tail(&region->list, &nosave_regions); in register_nosave_region()
1021 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n", in register_nosave_region()
1023 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); in register_nosave_region()
1077 * mark_nosave_pages - Mark pages that should not be saved.
1078 * @bm: Memory bitmap.
1080 * Set the bits in @bm that correspond to the page frames the contents of which
1083 static void mark_nosave_pages(struct memory_bitmap *bm) in mark_nosave_pages() argument
1093 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n", in mark_nosave_pages()
1094 (unsigned long long) region->start_pfn << PAGE_SHIFT, in mark_nosave_pages()
1095 ((unsigned long long) region->end_pfn << PAGE_SHIFT) in mark_nosave_pages()
1096 - 1); in mark_nosave_pages()
1098 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) in mark_nosave_pages()
1106 mem_bm_set_bit_check(bm, pfn); in mark_nosave_pages()
1112 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1131 return -ENOMEM; in create_basic_memory_bitmaps()
1159 return -ENOMEM; in create_basic_memory_bitmaps()
1163 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1198 struct memory_bitmap *bm = free_pages_map; in clear_or_poison_free_pages() local
1205 memory_bm_position_reset(bm); in clear_or_poison_free_pages()
1206 pfn = memory_bm_next_pfn(bm); in clear_or_poison_free_pages()
1211 pfn = memory_bm_next_pfn(bm); in clear_or_poison_free_pages()
1213 memory_bm_position_reset(bm); in clear_or_poison_free_pages()
1219 * snapshot_additional_pages - Estimate the number of extra pages needed.
1230 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); in snapshot_additional_pages()
1256 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
1259 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
1263 if (!--page_count) { in mark_free_pages()
1277 &zone->free_area[order].free_list[t], buddy_list) { in mark_free_pages()
1282 if (!--page_count) { in mark_free_pages()
1290 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
1295 * count_free_highmem_pages - Compute the total number of free highmem pages.
1297 * The returned number is system-wide.
1312 * saveable_highmem_page - Check if a highmem page is saveable.
1345 * count_highmem_pages - Compute the total number of saveable highmem pages.
1360 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_highmem_pages()
1374 * saveable_page - Check if the given page is saveable.
1376 * Determine whether a non-highmem page should be included in a hibernation
1413 * count_data_pages - Compute the total number of saveable non-highmem pages.
1427 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_data_pages()
1444 for (n = PAGE_SIZE / sizeof(long); n; n--) { in do_copy_page()
1452 * safe_copy_page - Copy a page in a safe way.
1540 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in copy_data_pages()
1593 * swsusp_free - Free pages allocated for hibernation image.
1649 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1670 nr_pages--; in preallocate_image_pages()
1685 alloc = avail_normal - alloc_normal; in preallocate_image_memory()
1699 * __fraction - Compute (an approximation of) x * (multiplier / base).
1729 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1737 to_free_normal = alloc_normal - save; in free_unnecessary_pages()
1741 save -= alloc_normal; in free_unnecessary_pages()
1745 to_free_highmem = alloc_highmem - save; in free_unnecessary_pages()
1748 save -= alloc_highmem; in free_unnecessary_pages()
1750 to_free_normal -= save; in free_unnecessary_pages()
1765 to_free_highmem--; in free_unnecessary_pages()
1766 alloc_highmem--; in free_unnecessary_pages()
1770 to_free_normal--; in free_unnecessary_pages()
1771 alloc_normal--; in free_unnecessary_pages()
1783 * minimum_image_size - Estimate the minimum acceptable size of an image.
1792 * [number of saveable pages] - [number of pages that can be freed in theory]
1807 return saveable <= size ? 0 : saveable - size; in minimum_image_size()
1811 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1822 * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1823 * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1886 count -= totalreserve_pages; in hibernate_preallocate_memory()
1889 max_size = (count - (size + PAGES_FOR_IO)) / 2 in hibernate_preallocate_memory()
1890 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE); in hibernate_preallocate_memory()
1902 pages += preallocate_image_memory(saveable - pages, avail_normal); in hibernate_preallocate_memory()
1914 avail_normal -= pages; in hibernate_preallocate_memory()
1926 shrink_all_memory(saveable - size); in hibernate_preallocate_memory()
1931 * image and fail if that doesn't work. Next, try to decrease the size in hibernate_preallocate_memory()
1933 * highmem and non-highmem zones separately. in hibernate_preallocate_memory()
1936 alloc = count - max_size; in hibernate_preallocate_memory()
1938 alloc -= pages_highmem; in hibernate_preallocate_memory()
1943 /* We have exhausted non-highmem pages, try highmem. */ in hibernate_preallocate_memory()
1944 alloc -= pages; in hibernate_preallocate_memory()
1949 alloc - pages_highmem); in hibernate_preallocate_memory()
1955 * memory, so try to preallocate (all memory - size) pages. in hibernate_preallocate_memory()
1957 alloc = (count - pages) - size; in hibernate_preallocate_memory()
1964 alloc = max_size - size; in hibernate_preallocate_memory()
1967 alloc -= size; in hibernate_preallocate_memory()
1969 pages_highmem += preallocate_image_highmem(alloc - size); in hibernate_preallocate_memory()
1978 pages -= free_unnecessary_pages(); in hibernate_preallocate_memory()
1989 return -ENOMEM; in hibernate_preallocate_memory()
1994 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1996 * Compute the number of non-highmem pages that will be necessary for creating
2006 nr_highmem -= free_highmem; in count_pages_for_highmem()
2015 * enough_free_mem - Check if there is enough free memory for the image.
2035 * get_highmem_buffer - Allocate a buffer for highmem pages.
2043 return buffer ? 0 : -ENOMEM; in get_highmem_buffer()
2047 * alloc_highmem_pages - Allocate some highmem pages for the image.
2052 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, in alloc_highmem_pages() argument
2060 nr_highmem -= to_alloc; in alloc_highmem_pages()
2061 while (to_alloc-- > 0) { in alloc_highmem_pages()
2065 memory_bm_set_bit(bm, page_to_pfn(page)); in alloc_highmem_pages()
2072 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm, in alloc_highmem_pages() argument
2077 * swsusp_alloc - Allocate memory for hibernation image.
2081 * non-highmem pages for the copies of the remaining highmem ones.
2094 nr_highmem -= alloc_highmem; in swsusp_alloc()
2099 nr_pages -= alloc_normal; in swsusp_alloc()
2100 while (nr_pages-- > 0) { in swsusp_alloc()
2114 return -ENOMEM; in swsusp_alloc()
2130 return -ENOMEM; in swsusp_save()
2135 return -ENOMEM; in swsusp_save()
2152 nr_zero_pages = nr_pages - nr_copy_pages; in swsusp_save()
2163 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); in init_header_complete()
2164 info->version_code = LINUX_VERSION_CODE; in init_header_complete()
2170 if (info->version_code != LINUX_VERSION_CODE) in check_image_kernel()
2172 if (strcmp(info->uts.sysname,init_utsname()->sysname)) in check_image_kernel()
2174 if (strcmp(info->uts.release,init_utsname()->release)) in check_image_kernel()
2176 if (strcmp(info->uts.version,init_utsname()->version)) in check_image_kernel()
2178 if (strcmp(info->uts.machine,init_utsname()->machine)) in check_image_kernel()
2192 info->num_physpages = get_num_physpages(); in init_header()
2193 info->image_pages = nr_copy_pages; in init_header()
2194 info->pages = snapshot_get_image_size(); in init_header()
2195 info->size = info->pages; in init_header()
2196 info->size <<= PAGE_SHIFT; in init_header()
2200 #define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
2204 * pack_pfns - Prepare PFNs for saving.
2205 * @bm: Memory bitmap.
2209 * PFNs corresponding to set bits in @bm are stored in the area of memory
2214 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm, in pack_pfns() argument
2220 buf[j] = memory_bm_next_pfn(bm); in pack_pfns()
2229 * snapshot_read_next - Get the address to read the next image page from.
2246 if (handle->cur > nr_meta_pages + nr_copy_pages) in snapshot_read_next()
2253 return -ENOMEM; in snapshot_read_next()
2255 if (!handle->cur) { in snapshot_read_next()
2261 handle->buffer = buffer; in snapshot_read_next()
2264 } else if (handle->cur <= nr_meta_pages) { in snapshot_read_next()
2282 handle->buffer = buffer; in snapshot_read_next()
2284 handle->buffer = page_address(page); in snapshot_read_next()
2287 handle->cur++; in snapshot_read_next()
2305 * mark_unsafe_pages - Mark pages that were used before hibernation.
2310 static void mark_unsafe_pages(struct memory_bitmap *bm) in mark_unsafe_pages() argument
2323 duplicate_memory_bitmap(free_pages_map, bm); in mark_unsafe_pages()
2333 if (!reason && info->num_physpages != get_num_physpages()) in check_header()
2337 return -EPERM; in check_header()
2343 * load_header - Check the image header and copy the data from it.
2352 nr_copy_pages = info->image_pages; in load_header()
2353 nr_meta_pages = info->pages - info->image_pages - 1; in load_header()
2359 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2360 * @bm: Memory bitmap.
2365 * corresponding bit in @bm. If the page was originally populated with only
2368 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm, in unpack_orig_pfns() argument
2381 if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) { in unpack_orig_pfns()
2382 memory_bm_set_bit(bm, decoded_pfn); in unpack_orig_pfns()
2391 return -EFAULT; in unpack_orig_pfns()
2419 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2420 * @bm: Memory bitmap.
2422 * The bits in @bm that correspond to image pages are assumed to be set.
2424 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) in count_highmem_image_pages() argument
2429 memory_bm_position_reset(bm); in count_highmem_image_pages()
2430 pfn = memory_bm_next_pfn(bm); in count_highmem_image_pages()
2435 pfn = memory_bm_next_pfn(bm); in count_highmem_image_pages()
2445 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2446 * @bm: Pointer to an uninitialized memory bitmap structure.
2453 * @bm (it must be uninitialized).
2457 static int prepare_highmem_image(struct memory_bitmap *bm, in prepare_highmem_image() argument
2462 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE)) in prepare_highmem_image()
2463 return -ENOMEM; in prepare_highmem_image()
2466 return -ENOMEM; in prepare_highmem_image()
2475 while (to_alloc-- > 0) { in prepare_highmem_image()
2481 memory_bm_set_bit(bm, page_to_pfn(page)); in prepare_highmem_image()
2488 memory_bm_position_reset(bm); in prepare_highmem_image()
2489 safe_highmem_bm = bm; in prepare_highmem_image()
2496 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2534 return ERR_PTR(-ENOMEM); in get_highmem_page_buffer()
2536 pbe->orig_page = page; in get_highmem_page_buffer()
2543 safe_highmem_pages--; in get_highmem_page_buffer()
2545 pbe->copy_page = tmp; in get_highmem_page_buffer()
2548 kaddr = __get_safe_page(ca->gfp_mask); in get_highmem_page_buffer()
2550 return ERR_PTR(-ENOMEM); in get_highmem_page_buffer()
2551 pbe->copy_page = virt_to_page(kaddr); in get_highmem_page_buffer()
2553 pbe->next = highmem_pblist; in get_highmem_page_buffer()
2559 * copy_last_highmem_page - Copy most the most recent highmem image page.
2591 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; } in count_highmem_image_pages() argument
2593 static inline int prepare_highmem_image(struct memory_bitmap *bm, in prepare_highmem_image() argument
2599 return ERR_PTR(-EINVAL); in get_highmem_page_buffer()
2610 * prepare_image - Make room for loading hibernation image.
2612 * @bm: Memory bitmap with unsafe pages marked.
2615 * Use @bm to mark the pages that will be overwritten in the process of
2628 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm, in prepare_image() argument
2640 nr_highmem = count_highmem_image_pages(bm); in prepare_image()
2641 mark_unsafe_pages(bm); in prepare_image()
2647 duplicate_memory_bitmap(new_bm, bm); in prepare_image()
2648 memory_bm_free(bm, PG_UNSAFE_KEEP); in prepare_image()
2668 error = prepare_highmem_image(bm, &nr_highmem); in prepare_image()
2681 nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages; in prepare_image()
2686 error = -ENOMEM; in prepare_image()
2689 lp->next = safe_pages_list; in prepare_image()
2691 nr_pages--; in prepare_image()
2694 nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages; in prepare_image()
2698 error = -ENOMEM; in prepare_image()
2703 lp->next = safe_pages_list; in prepare_image()
2709 nr_pages--; in prepare_image()
2719 * get_buffer - Get the address to store the next image data page.
2724 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) in get_buffer() argument
2728 unsigned long pfn = memory_bm_next_pfn(bm); in get_buffer()
2731 return ERR_PTR(-EFAULT); in get_buffer()
2751 return ERR_PTR(-ENOMEM); in get_buffer()
2753 pbe->orig_address = page_address(page); in get_buffer()
2754 pbe->address = __get_safe_page(ca->gfp_mask); in get_buffer()
2755 if (!pbe->address) in get_buffer()
2756 return ERR_PTR(-ENOMEM); in get_buffer()
2757 pbe->next = restore_pblist; in get_buffer()
2759 return pbe->address; in get_buffer()
2763 * snapshot_write_next - Get the address to store the next image page.
2785 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) in snapshot_write_next()
2788 if (!handle->cur) { in snapshot_write_next()
2794 return -ENOMEM; in snapshot_write_next()
2796 handle->buffer = buffer; in snapshot_write_next()
2797 } else if (handle->cur == 1) { in snapshot_write_next()
2815 } else if (handle->cur <= nr_meta_pages + 1) { in snapshot_write_next()
2820 if (handle->cur == nr_meta_pages + 1) { in snapshot_write_next()
2829 handle->buffer = get_buffer(&orig_bm, &ca); in snapshot_write_next()
2830 if (IS_ERR(handle->buffer)) in snapshot_write_next()
2831 return PTR_ERR(handle->buffer); in snapshot_write_next()
2835 hibernate_restore_protect_page(handle->buffer); in snapshot_write_next()
2836 handle->buffer = get_buffer(&orig_bm, &ca); in snapshot_write_next()
2837 if (IS_ERR(handle->buffer)) in snapshot_write_next()
2838 return PTR_ERR(handle->buffer); in snapshot_write_next()
2840 handle->sync_read = (handle->buffer == buffer); in snapshot_write_next()
2841 handle->cur++; in snapshot_write_next()
2844 if (handle->cur > nr_meta_pages + 1 && in snapshot_write_next()
2846 memset(handle->buffer, 0, PAGE_SIZE); in snapshot_write_next()
2854 * snapshot_write_finalize - Complete the loading of a hibernation image.
2864 hibernate_restore_protect_page(handle->buffer); in snapshot_write_finalize()
2866 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) { in snapshot_write_finalize()
2875 handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages); in snapshot_image_loaded()
2895 * restore_highmem - Put highmem image pages into their original locations.
2914 return -ENOMEM; in restore_highmem()
2917 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf); in restore_highmem()
2918 pbe = pbe->next; in restore_highmem()