vmalloc.c (23689e91fb22c15b84ac6c22ad9942039792f3af) vmalloc.c (f6e39794f4b6da7ca9b77f2f9ad11fd6f0ac83e5)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019

--- 2228 unchanged lines hidden (view full) ---

2237 return NULL;
2238 }
2239
2240 /*
2241 * Mark the pages as accessible, now that they are mapped.
2242 * With hardware tag-based KASAN, marking is skipped for
2243 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2244 */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1993 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
6 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
7 * Numa awareness, Christoph Lameter, SGI, June 2005
8 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019

--- 2228 unchanged lines hidden (view full) ---

2237 return NULL;
2238 }
2239
2240 /*
2241 * Mark the pages as accessible, now that they are mapped.
2242 * With hardware tag-based KASAN, marking is skipped for
2243 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2244 */
2245 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_NONE);
2245 mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL);
2246
2247 return mem;
2248}
2249EXPORT_SYMBOL(vm_map_ram);
2250
2251static struct vm_struct *vmlist __initdata;
2252
2253static inline unsigned int vm_area_page_order(struct vm_struct *vm)

--- 222 unchanged lines hidden (view full) ---

2476 * best-effort approach, as they can be mapped outside of vmalloc code.
2477 * For VM_ALLOC mappings, the pages are marked as accessible after
2478 * getting mapped in __vmalloc_node_range().
2479 * With hardware tag-based KASAN, marking is skipped for
2480 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2481 */
2482 if (!(flags & VM_ALLOC))
2483 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2246
2247 return mem;
2248}
2249EXPORT_SYMBOL(vm_map_ram);
2250
2251static struct vm_struct *vmlist __initdata;
2252
2253static inline unsigned int vm_area_page_order(struct vm_struct *vm)

--- 222 unchanged lines hidden (view full) ---

2476 * best-effort approach, as they can be mapped outside of vmalloc code.
2477 * For VM_ALLOC mappings, the pages are marked as accessible after
2478 * getting mapped in __vmalloc_node_range().
2479 * With hardware tag-based KASAN, marking is skipped for
2480 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
2481 */
2482 if (!(flags & VM_ALLOC))
2483 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
2484 KASAN_VMALLOC_NONE);
2484 KASAN_VMALLOC_PROT_NORMAL);
2485
2486 return area;
2487}
2488
2489struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2490 unsigned long start, unsigned long end,
2491 const void *caller)
2492{

--- 593 unchanged lines hidden (view full) ---

3086 */
3087void *__vmalloc_node_range(unsigned long size, unsigned long align,
3088 unsigned long start, unsigned long end, gfp_t gfp_mask,
3089 pgprot_t prot, unsigned long vm_flags, int node,
3090 const void *caller)
3091{
3092 struct vm_struct *area;
3093 void *ret;
2485
2486 return area;
2487}
2488
2489struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2490 unsigned long start, unsigned long end,
2491 const void *caller)
2492{

--- 593 unchanged lines hidden (view full) ---

3086 */
3087void *__vmalloc_node_range(unsigned long size, unsigned long align,
3088 unsigned long start, unsigned long end, gfp_t gfp_mask,
3089 pgprot_t prot, unsigned long vm_flags, int node,
3090 const void *caller)
3091{
3092 struct vm_struct *area;
3093 void *ret;
3094 kasan_vmalloc_flags_t kasan_flags;
3094 kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE;
3095 unsigned long real_size = size;
3096 unsigned long real_align = align;
3097 unsigned int shift = PAGE_SHIFT;
3098
3099 if (WARN_ON_ONCE(!size))
3100 return NULL;
3101
3102 if ((size >> PAGE_SHIFT) > totalram_pages()) {

--- 36 unchanged lines hidden (view full) ---

3139 real_size, (nofail) ? ". Retrying." : "");
3140 if (nofail) {
3141 schedule_timeout_uninterruptible(1);
3142 goto again;
3143 }
3144 goto fail;
3145 }
3146
3095 unsigned long real_size = size;
3096 unsigned long real_align = align;
3097 unsigned int shift = PAGE_SHIFT;
3098
3099 if (WARN_ON_ONCE(!size))
3100 return NULL;
3101
3102 if ((size >> PAGE_SHIFT) > totalram_pages()) {

--- 36 unchanged lines hidden (view full) ---

3139 real_size, (nofail) ? ". Retrying." : "");
3140 if (nofail) {
3141 schedule_timeout_uninterruptible(1);
3142 goto again;
3143 }
3144 goto fail;
3145 }
3146
3147 /* Prepare arguments for __vmalloc_area_node(). */
3148 if (kasan_hw_tags_enabled() &&
3149 pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3150 /*
3151 * Modify protection bits to allow tagging.
3152 * This must be done before mapping in __vmalloc_area_node().
3153 */
3154 prot = arch_vmap_pgprot_tagged(prot);
3147 /*
3148 * Prepare arguments for __vmalloc_area_node() and
3149 * kasan_unpoison_vmalloc().
3150 */
3151 if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
3152 if (kasan_hw_tags_enabled()) {
3153 /*
3154 * Modify protection bits to allow tagging.
3155 * This must be done before mapping.
3156 */
3157 prot = arch_vmap_pgprot_tagged(prot);
3155
3158
3156 /*
3157 * Skip page_alloc poisoning and zeroing for physical pages
3158 * backing VM_ALLOC mapping. Memory is instead poisoned and
3159 * zeroed by kasan_unpoison_vmalloc().
3160 */
3161 gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
3159 /*
3160 * Skip page_alloc poisoning and zeroing for physical
3161 * pages backing VM_ALLOC mapping. Memory is instead
3162 * poisoned and zeroed by kasan_unpoison_vmalloc().
3163 */
3164 gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
3165 }
3166
3167 /* Take note that the mapping is PAGE_KERNEL. */
3168 kasan_flags |= KASAN_VMALLOC_PROT_NORMAL;
3162 }
3163
3164 /* Allocate physical pages and map them into vmalloc space. */
3165 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3166 if (!ret)
3167 goto fail;
3168
3169 /*
3170 * Mark the pages as accessible, now that they are mapped.
3171 * The init condition should match the one in post_alloc_hook()
3172 * (except for the should_skip_init() check) to make sure that memory
3173 * is initialized under the same conditions regardless of the enabled
3174 * KASAN mode.
3169 }
3170
3171 /* Allocate physical pages and map them into vmalloc space. */
3172 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
3173 if (!ret)
3174 goto fail;
3175
3176 /*
3177 * Mark the pages as accessible, now that they are mapped.
3178 * The init condition should match the one in post_alloc_hook()
3179 * (except for the should_skip_init() check) to make sure that memory
3180 * is initialized under the same conditions regardless of the enabled
3181 * KASAN mode.
3182 * Tag-based KASAN modes only assign tags to normal non-executable
3183 * allocations, see __kasan_unpoison_vmalloc().
3175 */
3184 */
3176 kasan_flags = KASAN_VMALLOC_VM_ALLOC;
3185 kasan_flags |= KASAN_VMALLOC_VM_ALLOC;
3177 if (!want_init_on_free() && want_init_on_alloc(gfp_mask))
3178 kasan_flags |= KASAN_VMALLOC_INIT;
3186 if (!want_init_on_free() && want_init_on_alloc(gfp_mask))
3187 kasan_flags |= KASAN_VMALLOC_INIT;
3188 /* KASAN_VMALLOC_PROT_NORMAL already set if required. */
3179 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3180
3181 /*
3182 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3183 * flag. It means that vm_struct is not fully initialized.
3184 * Now, it is fully initialized, so remove this flag here.
3185 */
3186 clear_vm_uninitialized_flag(area);

--- 689 unchanged lines hidden (view full) ---

3876 /*
3877 * Mark allocated areas as accessible. Do it now as a best-effort
3878 * approach, as they can be mapped outside of vmalloc code.
3879 * With hardware tag-based KASAN, marking is skipped for
3880 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3881 */
3882 for (area = 0; area < nr_vms; area++)
3883 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
3189 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
3190
3191 /*
3192 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
3193 * flag. It means that vm_struct is not fully initialized.
3194 * Now, it is fully initialized, so remove this flag here.
3195 */
3196 clear_vm_uninitialized_flag(area);

--- 689 unchanged lines hidden (view full) ---

3886 /*
3887 * Mark allocated areas as accessible. Do it now as a best-effort
3888 * approach, as they can be mapped outside of vmalloc code.
3889 * With hardware tag-based KASAN, marking is skipped for
3890 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
3891 */
3892 for (area = 0; area < nr_vms; area++)
3893 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
3884 vms[area]->size,
3885 KASAN_VMALLOC_NONE);
3894 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
3886
3887 kfree(vas);
3888 return vms;
3889
3890recovery:
3891 /*
3892 * Remove previously allocated areas. There is no
3893 * need in removing these areas from the busy tree,

--- 247 unchanged lines hidden ---
3895
3896 kfree(vas);
3897 return vms;
3898
3899recovery:
3900 /*
3901 * Remove previously allocated areas. There is no
3902 * need in removing these areas from the busy tree,

--- 247 unchanged lines hidden ---