1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ARC700 mmap 4 * 5 * (started from arm version - for VIPT alias handling) 6 * 7 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) 8 */ 9 10 #include <linux/fs.h> 11 #include <linux/mm.h> 12 #include <linux/mman.h> 13 #include <linux/sched/mm.h> 14 15 #include <asm/cacheflush.h> 16 17 #define COLOUR_ALIGN(addr, pgoff) \ 18 ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ 19 (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) 20 21 /* 22 * Ensure that shared mappings are correctly aligned to 23 * avoid aliasing issues with VIPT caches. 24 * We need to ensure that 25 * a specific page of an object is always mapped at a multiple of 26 * SHMLBA bytes. 27 */ 28 unsigned long 29 arch_get_unmapped_area(struct file *filp, unsigned long addr, 30 unsigned long len, unsigned long pgoff, unsigned long flags) 31 { 32 struct mm_struct *mm = current->mm; 33 struct vm_area_struct *vma; 34 int do_align = 0; 35 int aliasing = cache_is_vipt_aliasing(); 36 struct vm_unmapped_area_info info; 37 38 /* 39 * We only need to do colour alignment if D cache aliases. 40 */ 41 if (aliasing) 42 do_align = filp || (flags & MAP_SHARED); 43 44 /* 45 * We enforce the MAP_FIXED case. 46 */ 47 if (flags & MAP_FIXED) { 48 if (aliasing && flags & MAP_SHARED && 49 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 50 return -EINVAL; 51 return addr; 52 } 53 54 if (len > TASK_SIZE) 55 return -ENOMEM; 56 57 if (addr) { 58 if (do_align) 59 addr = COLOUR_ALIGN(addr, pgoff); 60 else 61 addr = PAGE_ALIGN(addr); 62 63 vma = find_vma(mm, addr); 64 if (TASK_SIZE - len >= addr && 65 (!vma || addr + len <= vm_start_gap(vma))) 66 return addr; 67 } 68 69 info.flags = 0; 70 info.length = len; 71 info.low_limit = mm->mmap_base; 72 info.high_limit = TASK_SIZE; 73 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; 74 info.align_offset = pgoff << PAGE_SHIFT; 75 return vm_unmapped_area(&info); 76 } 77 78 static const pgprot_t protection_map[16] = { 79 [VM_NONE] = PAGE_U_NONE, 80 [VM_READ] = PAGE_U_R, 81 [VM_WRITE] = PAGE_U_R, 82 [VM_WRITE | VM_READ] = PAGE_U_R, 83 [VM_EXEC] = PAGE_U_X_R, 84 [VM_EXEC | VM_READ] = PAGE_U_X_R, 85 [VM_EXEC | VM_WRITE] = PAGE_U_X_R, 86 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_R, 87 [VM_SHARED] = PAGE_U_NONE, 88 [VM_SHARED | VM_READ] = PAGE_U_R, 89 [VM_SHARED | VM_WRITE] = PAGE_U_W_R, 90 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_U_W_R, 91 [VM_SHARED | VM_EXEC] = PAGE_U_X_R, 92 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_U_X_R, 93 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_U_X_W_R, 94 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_U_X_W_R 95 }; 96 DECLARE_VM_GET_PAGE_PROT 97