Searched refs:zone_start_pfn (Results 1 – 15 of 15) sorted by relevance
/openbmc/linux/arch/x86/mm/ |
H A D | highmem_32.c | 19 unsigned long zone_start_pfn, zone_end_pfn; in set_highmem_pages_init() local 24 zone_start_pfn = zone->zone_start_pfn; in set_highmem_pages_init() 25 zone_end_pfn = zone_start_pfn + zone->spanned_pages; in set_highmem_pages_init() 29 zone->name, nid, zone_start_pfn, zone_end_pfn); in set_highmem_pages_init() 31 add_highpages_with_active_regions(nid, zone_start_pfn, in set_highmem_pages_init()
|
/openbmc/linux/mm/ |
H A D | mm_init.c | 914 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone_range() local 915 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone_range() 918 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); in memmap_init_zone_range() 919 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); in memmap_init_zone_range() 1119 unsigned long *zone_start_pfn, in adjust_zone_range_for_zone_movable() argument 1126 *zone_start_pfn = zone_movable_pfn[nid]; in adjust_zone_range_for_zone_movable() 1132 *zone_start_pfn < zone_movable_pfn[nid] && in adjust_zone_range_for_zone_movable() 1137 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) in adjust_zone_range_for_zone_movable() 1138 *zone_start_pfn = *zone_end_pfn; in adjust_zone_range_for_zone_movable() 1178 unsigned long zone_start_pfn, in zone_absent_pages_in_node() argument [all …]
|
H A D | compaction.c | 226 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions() 227 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions() 335 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn() 378 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable() 730 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range() 731 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range() 941 low_pfn == cc->zone->zone_start_pfn)) { in isolate_migratepages_block() 1295 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range() 1296 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range() 1436 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around() [all …]
|
H A D | shuffle.c | 83 unsigned long start_pfn = z->zone_start_pfn; in __shuffle_zone() 112 j = z->zone_start_pfn + in __shuffle_zone()
|
H A D | memory_hotplug.c | 465 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span() 476 zone->zone_start_pfn = pfn; in shrink_zone_span() 478 zone->zone_start_pfn = 0; in shrink_zone_span() 488 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span() 491 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span() 493 zone->zone_start_pfn = 0; in shrink_zone_span() 512 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span() 519 if (zone->zone_start_pfn < node_start_pfn) in update_pgdat_span() 520 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span() 709 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range() [all …]
|
H A D | page_owner.c | 288 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print() 623 unsigned long pfn = zone->zone_start_pfn; in init_pages_in_zone()
|
H A D | page_isolation.c | 330 zone->zone_start_pfn); in isolate_single_pageblock()
|
H A D | vmstat.c | 1555 unsigned long start_pfn = zone->zone_start_pfn; in pagetypeinfo_showblockcount_print() 1757 zone->zone_start_pfn); in zoneinfo_show_print()
|
H A D | memblock.c | 1364 if (zone->zone_start_pfn < epfn && spfn < epfn) { in __next_mem_pfn_range_in_zone() 1372 *out_spfn = max(zone->zone_start_pfn, spfn); in __next_mem_pfn_range_in_zone()
|
H A D | kmemleak.c | 1580 unsigned long start_pfn = zone->zone_start_pfn; in kmemleak_scan()
|
H A D | page_alloc.c | 358 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); in pfn_to_bitidx() 450 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries() 6328 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
|
H A D | huge_memory.c | 2936 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { in split_huge_pages_all()
|
/openbmc/linux/include/linux/ |
H A D | mmzone.h | 857 unsigned long zone_start_pfn; member 1024 return zone->zone_start_pfn + zone->spanned_pages; in zone_end_pfn() 1029 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn() 1163 start_pfn + nr_pages <= zone->zone_start_pfn) in zone_intersects()
|
/openbmc/linux/arch/arm64/kernel/ |
H A D | hibernate.c | 267 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { in swsusp_mte_save_tags()
|
/openbmc/linux/kernel/power/ |
H A D | snapshot.c | 636 zone_start = zone->zone_start_pfn; in create_mem_extents() 1259 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages() 1360 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_highmem_pages() 1427 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_data_pages() 1540 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in copy_data_pages()
|