module.c (7ae9fb1b7ecbb5d85d07857943f677fd1a559b18) | module.c (8339f7d8e178d9c933f437d14be0a5fd1359f53d) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * AArch64 loadable module support. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ --- 18 unchanged lines hidden (view full) --- 27 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; 28 gfp_t gfp_mask = GFP_KERNEL; 29 void *p; 30 31 /* Silence the initial allocation */ 32 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 33 gfp_mask |= __GFP_NOWARN; 34 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * AArch64 loadable module support. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <will.deacon@arm.com> 8 */ --- 18 unchanged lines hidden (view full) --- 27 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; 28 gfp_t gfp_mask = GFP_KERNEL; 29 void *p; 30 31 /* Silence the initial allocation */ 32 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) 33 gfp_mask |= __GFP_NOWARN; 34 |
35 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 36 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 37 /* don't exceed the static module region - see below */ 38 module_alloc_end = MODULES_END; 39 | |
40 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, | 35 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, |
41 module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK, | 36 module_alloc_end, gfp_mask, PAGE_KERNEL, 0, |
42 NUMA_NO_NODE, __builtin_return_address(0)); 43 | 37 NUMA_NO_NODE, __builtin_return_address(0)); 38 |
44 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && 45 (IS_ENABLED(CONFIG_KASAN_VMALLOC) || 46 (!IS_ENABLED(CONFIG_KASAN_GENERIC) && 47 !IS_ENABLED(CONFIG_KASAN_SW_TAGS)))) 48 /* 49 * KASAN without KASAN_VMALLOC can only deal with module 50 * allocations being served from the reserved module region, 51 * since the remainder of the vmalloc region is already 52 * backed by zero shadow pages, and punching holes into it 53 * is non-trivial. Since the module region is not randomized 54 * when KASAN is enabled without KASAN_VMALLOC, it is even 55 * less likely that the module region gets exhausted, so we 56 * can simply omit this fallback in that case. 57 */ | 39 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) { |
58 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 59 module_alloc_base + SZ_2G, GFP_KERNEL, 60 PAGE_KERNEL, 0, NUMA_NO_NODE, 61 __builtin_return_address(0)); | 40 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 41 module_alloc_base + SZ_2G, GFP_KERNEL, 42 PAGE_KERNEL, 0, NUMA_NO_NODE, 43 __builtin_return_address(0)); |
44 } |
|
62 63 if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { 64 vfree(p); 65 return NULL; 66 } 67 68 /* Memory is intended to be executable, reset the pointer tag. */ 69 return kasan_reset_tag(p); --- 454 unchanged lines hidden --- | 45 46 if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { 47 vfree(p); 48 return NULL; 49 } 50 51 /* Memory is intended to be executable, reset the pointer tag. */ 52 return kasan_reset_tag(p); --- 454 unchanged lines hidden --- |