/openbmc/linux/include/linux/ |
H A D | oom.h | 13 struct zonelist; 31 struct zonelist *zonelist; member
|
H A D | mmzone.h | 1214 struct zonelist { struct 1279 struct zonelist node_zonelists[MAX_ZONELISTS]; 1654 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, in first_zones_zonelist() argument 1658 return next_zones_zonelist(zonelist->_zonerefs, in first_zones_zonelist() 1701 struct zonelist *zonelist; in movable_only_nodes() local 1714 zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; in movable_only_nodes() 1715 z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); in movable_only_nodes()
|
H A D | swap.h | 404 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
|
H A D | gfp.h | 165 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
|
/openbmc/linux/Documentation/translations/zh_CN/mm/ |
H A D | numa.rst | 57 中的一个或多个]构建了一个有序的“区列表”。zonelist指定了当一个选定的区/节点不能满足分配请求 63 代表了相对稀缺的资源。Linux选择了一个默认的Node ordered zonelist。这意味着在使用按NUMA距
|
/openbmc/linux/Documentation/mm/ |
H A D | numa.rst | 74 an ordered "zonelist". A zonelist specifies the zones/nodes to visit when a 84 a default Node ordered zonelist. This means it tries to fallback to other zones 89 Linux will attempt to allocate from the first node in the appropriate zonelist 92 nodes' zones in the selected zonelist looking for the first zone in the list 120 zonelist--will not be the node itself. Rather, it will be the node that the
|
/openbmc/linux/mm/ |
H A D | page_alloc.c | 1943 struct zonelist *zonelist = ac->zonelist; in unreserve_highatomic_pageblock() local 1951 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock() 3066 * get_page_from_freelist goes through the zonelist trying to allocate 3081 * Scan zonelist, looking for a zone with enough free. in get_page_from_freelist() 3297 .zonelist = ac->zonelist, in __alloc_pages_may_oom() 3318 * Go through the zonelist yet one more time, keep very high watermark in __alloc_pages_may_oom() 3539 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry() 4791 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); nr_free_zone_pages() local 6319 struct zonelist *zonelist; alloc_contig_pages() local [all...] |
H A D | vmscan.c | 6721 static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc) in shrink_zones() argument 6742 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones() 6844 static unsigned long do_try_to_free_pages(struct zonelist *zonelist, in do_try_to_free_pages() argument 6862 shrink_zones(zonelist, sc); in do_try_to_free_pages() 6879 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages() 6982 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, in throttle_direct_reclaim() argument 7020 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim() 7062 unsigned long try_to_free_pages(struct zonelist *zonelist, int order, in try_to_free_pages() argument 7091 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) in try_to_free_pages() 7097 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); in try_to_free_pages() [all …]
|
H A D | oom_kill.c | 272 if (!oc->zonelist) in constrained_alloc() 296 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist, in constrained_alloc()
|
H A D | internal.h | 288 struct zonelist *zonelist; member
|
H A D | mm_init.c | 51 struct zonelist *zonelist; in mminit_verify_zonelist() local 60 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist() 71 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist()
|
H A D | mempolicy.c | 1935 struct zonelist *zonelist; in mempolicy_slab_node() local 1937 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK]; in mempolicy_slab_node() 1938 z = first_zones_zonelist(zonelist, highest_zoneidx, in mempolicy_slab_node()
|
H A D | slab.c | 3052 struct zonelist *zonelist; in fallback_alloc() local 3066 zonelist = node_zonelist(mempolicy_slab_node(), flags); in fallback_alloc() 3073 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { in fallback_alloc()
|
H A D | compaction.c | 2360 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable() 2697 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
|
H A D | slub.c | 2343 struct zonelist *zonelist; in get_any_partial() local 2374 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags); in get_any_partial() 2375 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) { in get_any_partial()
|
H A D | hugetlb.c | 1359 struct zonelist *zonelist; in dequeue_hugetlb_folio_nodemask() local 1364 zonelist = node_zonelist(nid, gfp_mask); in dequeue_hugetlb_folio_nodemask() 1368 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { in dequeue_hugetlb_folio_nodemask()
|
H A D | memcontrol.c | 1727 .zonelist = NULL, in mem_cgroup_out_of_memory()
|
/openbmc/linux/drivers/tty/ |
H A D | sysrq.c | 389 .zonelist = node_zonelist(first_memory_node, gfp_mask), in moom_callback()
|
/openbmc/linux/Documentation/admin-guide/sysctl/ |
H A D | vm.rst | 648 In non-NUMA case, a zonelist for GFP_KERNEL is ordered as following. 654 Assume 2 node NUMA and below is zonelist of Node(0)'s GFP_KERNEL::
|
/openbmc/linux/Documentation/admin-guide/mm/ |
H A D | numa_memory_policy.rst | 234 node zonelist.
|
/openbmc/linux/Documentation/admin-guide/ |
H A D | kernel-parameters.txt | 4031 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
|