vmalloc.c (b7d90e7a5ea8d64e668d5685925900d33d3884d5) | vmalloc.c (c00b6b9610991c042ff4c3153daaa3ea8522c210) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 1993 Linus Torvalds 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 --- 2829 unchanged lines hidden (view full) --- 2838 int i; 2839 2840 /* 2841 * For order-0 pages we make use of bulk allocator, if 2842 * the page array is partly or not at all populated due 2843 * to fails, fallback to a single page allocator that is 2844 * more permissive. 2845 */ | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 1993 Linus Torvalds 4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 7 * Numa awareness, Christoph Lameter, SGI, June 2005 8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 --- 2829 unchanged lines hidden (view full) --- 2838 int i; 2839 2840 /* 2841 * For order-0 pages we make use of bulk allocator, if 2842 * the page array is partly or not at all populated due 2843 * to fails, fallback to a single page allocator that is 2844 * more permissive. 2845 */ |
2846 if (!order && nid != NUMA_NO_NODE) { | 2846 if (!order) { |
2847 while (nr_allocated < nr_pages) { 2848 unsigned int nr, nr_pages_request; 2849 2850 /* 2851 * A maximum allowed request is hard-coded and is 100 2852 * pages per call. That is done in order to prevent a 2853 * long preemption off scenario in the bulk-allocator 2854 * so the range is [1:100]. 2855 */ 2856 nr_pages_request = min(100U, nr_pages - nr_allocated); 2857 | 2847 while (nr_allocated < nr_pages) { 2848 unsigned int nr, nr_pages_request; 2849 2850 /* 2851 * A maximum allowed request is hard-coded and is 100 2852 * pages per call. That is done in order to prevent a 2853 * long preemption off scenario in the bulk-allocator 2854 * so the range is [1:100]. 2855 */ 2856 nr_pages_request = min(100U, nr_pages - nr_allocated); 2857 |
2858 nr = alloc_pages_bulk_array_node(gfp, nid, 2859 nr_pages_request, pages + nr_allocated); | 2858 /* memory allocation should consider mempolicy, we can't 2859 * wrongly use nearest node when nid == NUMA_NO_NODE, 2860 * otherwise memory may be allocated in only one node, 2861 * but mempolcy want to alloc memory by interleaving. 2862 */ 2863 if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) 2864 nr = alloc_pages_bulk_array_mempolicy(gfp, 2865 nr_pages_request, 2866 pages + nr_allocated); |
2860 | 2867 |
2868 else 2869 nr = alloc_pages_bulk_array_node(gfp, nid, 2870 nr_pages_request, 2871 pages + nr_allocated); 2872 |
|
2861 nr_allocated += nr; 2862 cond_resched(); 2863 2864 /* 2865 * If zero or pages were obtained partly, 2866 * fallback to a single page allocator. 2867 */ 2868 if (nr != nr_pages_request) 2869 break; 2870 } | 2873 nr_allocated += nr; 2874 cond_resched(); 2875 2876 /* 2877 * If zero or pages were obtained partly, 2878 * fallback to a single page allocator. 2879 */ 2880 if (nr != nr_pages_request) 2881 break; 2882 } |
2871 } else if (order) | 2883 } else |
2872 /* 2873 * Compound pages required for remap_vmalloc_page if 2874 * high-order pages. 2875 */ 2876 gfp |= __GFP_COMP; 2877 2878 /* High-order pages or fallback path if "bulk" fails. */ 2879 --- 1126 unchanged lines hidden --- | 2884 /* 2885 * Compound pages required for remap_vmalloc_page if 2886 * high-order pages. 2887 */ 2888 gfp |= __GFP_COMP; 2889 2890 /* High-order pages or fallback path if "bulk" fails. */ 2891 --- 1126 unchanged lines hidden --- |