1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Based on arch/arm/mm/mmap.c 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7 8 #include <linux/elf.h> 9 #include <linux/fs.h> 10 #include <linux/memblock.h> 11 #include <linux/mm.h> 12 #include <linux/mman.h> 13 #include <linux/export.h> 14 #include <linux/shm.h> 15 #include <linux/sched/signal.h> 16 #include <linux/sched/mm.h> 17 #include <linux/io.h> 18 #include <linux/personality.h> 19 #include <linux/random.h> 20 21 #include <asm/cputype.h> 22 23 /* 24 * Leave enough space between the mmap area and the stack to honour ulimit in 25 * the face of randomisation. 26 */ 27 #define MIN_GAP (SZ_128M) 28 #define MAX_GAP (STACK_TOP/6*5) 29 30 static int mmap_is_legacy(struct rlimit *rlim_stack) 31 { 32 if (current->personality & ADDR_COMPAT_LAYOUT) 33 return 1; 34 35 if (rlim_stack->rlim_cur == RLIM_INFINITY) 36 return 1; 37 38 return sysctl_legacy_va_layout; 39 } 40 41 unsigned long arch_mmap_rnd(void) 42 { 43 unsigned long rnd; 44 45 #ifdef CONFIG_COMPAT 46 if (test_thread_flag(TIF_32BIT)) 47 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1); 48 else 49 #endif 50 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); 51 return rnd << PAGE_SHIFT; 52 } 53 54 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) 55 { 56 unsigned long gap = rlim_stack->rlim_cur; 57 unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap; 58 59 /* Values close to RLIM_INFINITY can overflow. */ 60 if (gap + pad > gap) 61 gap += pad; 62 63 if (gap < MIN_GAP) 64 gap = MIN_GAP; 65 else if (gap > MAX_GAP) 66 gap = MAX_GAP; 67 68 return PAGE_ALIGN(STACK_TOP - gap - rnd); 69 } 70 71 /* 72 * This function, called very early during the creation of a new process VM 73 * image, sets up which VM layout function to use: 74 */ 75 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) 76 { 77 unsigned long random_factor = 0UL; 78 79 if (current->flags & PF_RANDOMIZE) 80 random_factor = arch_mmap_rnd(); 81 82 /* 83 * Fall back to the standard layout if the personality bit is set, or 84 * if the expected stack growth is unlimited: 85 */ 86 if (mmap_is_legacy(rlim_stack)) { 87 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 88 mm->get_unmapped_area = arch_get_unmapped_area; 89 } else { 90 mm->mmap_base = mmap_base(random_factor, rlim_stack); 91 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 92 } 93 } 94 95 /* 96 * You really shouldn't be using read() or write() on /dev/mem. This might go 97 * away in the future. 98 */ 99 int valid_phys_addr_range(phys_addr_t addr, size_t size) 100 { 101 /* 102 * Check whether addr is covered by a memory region without the 103 * MEMBLOCK_NOMAP attribute, and whether that region covers the 104 * entire range. In theory, this could lead to false negatives 105 * if the range is covered by distinct but adjacent memory regions 106 * that only differ in other attributes. However, few of such 107 * attributes have been defined, and it is debatable whether it 108 * follows that /dev/mem read() calls should be able traverse 109 * such boundaries. 110 */ 111 return memblock_is_region_memory(addr, size) && 112 memblock_is_map_memory(addr); 113 } 114 115 /* 116 * Do not allow /dev/mem mappings beyond the supported physical range. 117 */ 118 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 119 { 120 return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); 121 } 122 123 #ifdef CONFIG_STRICT_DEVMEM 124 125 #include <linux/ioport.h> 126 127 /* 128 * devmem_is_allowed() checks to see if /dev/mem access to a certain address 129 * is valid. The argument is a physical page number. We mimic x86 here by 130 * disallowing access to system RAM as well as device-exclusive MMIO regions. 131 * This effectively disable read()/write() on /dev/mem. 132 */ 133 int devmem_is_allowed(unsigned long pfn) 134 { 135 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) 136 return 0; 137 if (!page_is_ram(pfn)) 138 return 1; 139 return 0; 140 } 141 142 #endif 143