Lines Matching refs:zone

404 	struct mem_zone_bm_rtree *zone;  member
466 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, in add_rtree_block() argument
473 block_nr = zone->blocks; in add_rtree_block()
483 for (i = zone->levels; i < levels_needed; i++) { in add_rtree_block()
485 &zone->nodes); in add_rtree_block()
489 node->data[0] = (unsigned long)zone->rtree; in add_rtree_block()
490 zone->rtree = node; in add_rtree_block()
491 zone->levels += 1; in add_rtree_block()
495 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); in add_rtree_block()
500 node = zone->rtree; in add_rtree_block()
501 dst = &zone->rtree; in add_rtree_block()
502 block_nr = zone->blocks; in add_rtree_block()
503 for (i = zone->levels; i > 0; i--) { in add_rtree_block()
508 &zone->nodes); in add_rtree_block()
520 zone->blocks += 1; in add_rtree_block()
526 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
542 struct mem_zone_bm_rtree *zone; in create_zone_bm_rtree() local
547 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree)); in create_zone_bm_rtree()
548 if (!zone) in create_zone_bm_rtree()
551 INIT_LIST_HEAD(&zone->nodes); in create_zone_bm_rtree()
552 INIT_LIST_HEAD(&zone->leaves); in create_zone_bm_rtree()
553 zone->start_pfn = start; in create_zone_bm_rtree()
554 zone->end_pfn = end; in create_zone_bm_rtree()
558 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) { in create_zone_bm_rtree()
559 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR); in create_zone_bm_rtree()
564 return zone; in create_zone_bm_rtree()
574 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, in free_zone_bm_rtree() argument
579 list_for_each_entry(node, &zone->nodes, list) in free_zone_bm_rtree()
582 list_for_each_entry(node, &zone->leaves, list) in free_zone_bm_rtree()
588 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, in memory_bm_position_reset()
590 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in memory_bm_position_reset()
628 struct zone *zone; in create_mem_extents() local
632 for_each_populated_zone(zone) { in create_mem_extents()
636 zone_start = zone->zone_start_pfn; in create_mem_extents()
637 zone_end = zone_end_pfn(zone); in create_mem_extents()
698 struct mem_zone_bm_rtree *zone; in memory_bm_create() local
700 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, in memory_bm_create()
702 if (!zone) { in memory_bm_create()
706 list_add_tail(&zone->list, &bm->zones); in memory_bm_create()
727 struct mem_zone_bm_rtree *zone; in memory_bm_free() local
729 list_for_each_entry(zone, &bm->zones, list) in memory_bm_free()
730 free_zone_bm_rtree(zone, clear_nosave_free); in memory_bm_free()
749 struct mem_zone_bm_rtree *curr, *zone; in memory_bm_find_bit() local
753 zone = bm->cur.zone; in memory_bm_find_bit()
755 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) in memory_bm_find_bit()
758 zone = NULL; in memory_bm_find_bit()
763 zone = curr; in memory_bm_find_bit()
768 if (!zone) in memory_bm_find_bit()
783 if (zone == bm->cur.zone && in memory_bm_find_bit()
784 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) in memory_bm_find_bit()
787 node = zone->rtree; in memory_bm_find_bit()
788 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; in memory_bm_find_bit()
790 for (i = zone->levels; i > 0; i--) { in memory_bm_find_bit()
801 bm->cur.zone = zone; in memory_bm_find_bit()
803 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; in memory_bm_find_bit()
808 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; in memory_bm_find_bit()
892 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { in rtree_next_node()
902 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) { in rtree_next_node()
903 bm->cur.zone = list_entry(bm->cur.zone->list.next, in rtree_next_node()
905 bm->cur.node = list_entry(bm->cur.zone->leaves.next, in rtree_next_node()
933 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn()
938 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit; in memory_bm_next_pfn()
961 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone) in recycle_zone_bm_rtree() argument
965 list_for_each_entry(node, &zone->nodes, list) in recycle_zone_bm_rtree()
968 list_for_each_entry(node, &zone->leaves, list) in recycle_zone_bm_rtree()
974 struct mem_zone_bm_rtree *zone; in memory_bm_recycle() local
977 list_for_each_entry(zone, &bm->zones, list) in memory_bm_recycle()
978 recycle_zone_bm_rtree(zone); in memory_bm_recycle()
1226 unsigned int snapshot_additional_pages(struct zone *zone) in snapshot_additional_pages() argument
1230 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); in snapshot_additional_pages()
1246 static void mark_free_pages(struct zone *zone) in mark_free_pages() argument
1253 if (zone_is_empty(zone)) in mark_free_pages()
1256 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
1258 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
1259 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
1268 if (page_zone(page) != zone) in mark_free_pages()
1277 &zone->free_area[order].free_list[t], buddy_list) { in mark_free_pages()
1290 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
1301 struct zone *zone; in count_free_highmem_pages() local
1304 for_each_populated_zone(zone) in count_free_highmem_pages()
1305 if (is_highmem(zone)) in count_free_highmem_pages()
1306 cnt += zone_page_state(zone, NR_FREE_PAGES); in count_free_highmem_pages()
1319 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) in saveable_highmem_page() argument
1327 if (!page || page_zone(page) != zone) in saveable_highmem_page()
1349 struct zone *zone; in count_highmem_pages() local
1352 for_each_populated_zone(zone) { in count_highmem_pages()
1355 if (!is_highmem(zone)) in count_highmem_pages()
1358 mark_free_pages(zone); in count_highmem_pages()
1359 max_zone_pfn = zone_end_pfn(zone); in count_highmem_pages()
1360 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_highmem_pages()
1361 if (saveable_highmem_page(zone, pfn)) in count_highmem_pages()
1367 static inline void *saveable_highmem_page(struct zone *z, unsigned long p) in saveable_highmem_page()
1383 static struct page *saveable_page(struct zone *zone, unsigned long pfn) in saveable_page() argument
1391 if (!page || page_zone(page) != zone) in saveable_page()
1417 struct zone *zone; in count_data_pages() local
1421 for_each_populated_zone(zone) { in count_data_pages()
1422 if (is_highmem(zone)) in count_data_pages()
1425 mark_free_pages(zone); in count_data_pages()
1426 max_zone_pfn = zone_end_pfn(zone); in count_data_pages()
1427 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in count_data_pages()
1428 if (saveable_page(zone, pfn)) in count_data_pages()
1475 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn) in page_is_saveable() argument
1477 return is_highmem(zone) ? in page_is_saveable()
1478 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn); in page_is_saveable()
1512 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn) argument
1532 struct zone *zone; in copy_data_pages() local
1535 for_each_populated_zone(zone) { in copy_data_pages()
1538 mark_free_pages(zone); in copy_data_pages()
1539 max_zone_pfn = zone_end_pfn(zone); in copy_data_pages()
1540 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in copy_data_pages()
1541 if (page_is_saveable(zone, pfn)) in copy_data_pages()
1834 struct zone *zone; in hibernate_preallocate_memory() local
1877 for_each_populated_zone(zone) { in hibernate_preallocate_memory()
1878 size += snapshot_additional_pages(zone); in hibernate_preallocate_memory()
1879 if (is_highmem(zone)) in hibernate_preallocate_memory()
1880 highmem += zone_page_state(zone, NR_FREE_PAGES); in hibernate_preallocate_memory()
1882 count += zone_page_state(zone, NR_FREE_PAGES); in hibernate_preallocate_memory()
2019 struct zone *zone; in enough_free_mem() local
2022 for_each_populated_zone(zone) in enough_free_mem()
2023 if (!is_highmem(zone)) in enough_free_mem()
2024 free += zone_page_state(zone, NR_FREE_PAGES); in enough_free_mem()