1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 */ 5 #include <linux/export.h> 6 #include <linux/mm.h> 7 #include <linux/mman.h> 8 9 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 10 EXPORT_SYMBOL(shm_align_mask); 11 12 #define COLOUR_ALIGN(addr, pgoff) \ 13 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 14 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 15 16 enum mmap_allocation_direction {UP, DOWN}; 17 18 static unsigned long arch_get_unmapped_area_common(struct file *filp, 19 unsigned long addr0, unsigned long len, unsigned long pgoff, 20 unsigned long flags, enum mmap_allocation_direction dir) 21 { 22 struct mm_struct *mm = current->mm; 23 struct vm_area_struct *vma; 24 unsigned long addr = addr0; 25 int do_color_align; 26 struct vm_unmapped_area_info info; 27 28 if (unlikely(len > TASK_SIZE)) 29 return -ENOMEM; 30 31 if (flags & MAP_FIXED) { 32 /* Even MAP_FIXED mappings must reside within TASK_SIZE */ 33 if (TASK_SIZE - len < addr) 34 return -EINVAL; 35 36 /* 37 * We do not accept a shared mapping if it would violate 38 * cache aliasing constraints. 39 */ 40 if ((flags & MAP_SHARED) && 41 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask)) 42 return -EINVAL; 43 return addr; 44 } 45 46 do_color_align = 0; 47 if (filp || (flags & MAP_SHARED)) 48 do_color_align = 1; 49 50 /* requesting a specific address */ 51 if (addr) { 52 if (do_color_align) 53 addr = COLOUR_ALIGN(addr, pgoff); 54 else 55 addr = PAGE_ALIGN(addr); 56 57 vma = find_vma(mm, addr); 58 if (TASK_SIZE - len >= addr && 59 (!vma || addr + len <= vm_start_gap(vma))) 60 return addr; 61 } 62 63 info.length = len; 64 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0; 65 info.align_offset = pgoff << PAGE_SHIFT; 66 67 if (dir == DOWN) { 68 info.flags = VM_UNMAPPED_AREA_TOPDOWN; 69 info.low_limit = PAGE_SIZE; 70 info.high_limit = mm->mmap_base; 71 addr = vm_unmapped_area(&info); 72 73 if (!(addr & ~PAGE_MASK)) 74 return addr; 75 76 /* 77 * A failed mmap() very likely causes application failure, 78 * so fall back to the bottom-up function here. This scenario 79 * can happen with large stack limits and large mmap() 80 * allocations. 81 */ 82 } 83 84 info.flags = 0; 85 info.low_limit = mm->mmap_base; 86 info.high_limit = TASK_SIZE; 87 return vm_unmapped_area(&info); 88 } 89 90 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, 91 unsigned long len, unsigned long pgoff, unsigned long flags) 92 { 93 return arch_get_unmapped_area_common(filp, 94 addr0, len, pgoff, flags, UP); 95 } 96 97 /* 98 * There is no need to export this but sched.h declares the function as 99 * extern so making it static here results in an error. 100 */ 101 unsigned long arch_get_unmapped_area_topdown(struct file *filp, 102 unsigned long addr0, unsigned long len, unsigned long pgoff, 103 unsigned long flags) 104 { 105 return arch_get_unmapped_area_common(filp, 106 addr0, len, pgoff, flags, DOWN); 107 } 108 109 int __virt_addr_valid(volatile void *kaddr) 110 { 111 unsigned long vaddr = (unsigned long)kaddr; 112 113 if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base)) 114 return 0; 115 116 return pfn_valid(PFN_DOWN(PHYSADDR(kaddr))); 117 } 118 EXPORT_SYMBOL_GPL(__virt_addr_valid); 119