1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) 4 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 5 */ 6 7 #include <linux/mm.h> 8 #include <linux/sched/signal.h> 9 #include <linux/slab.h> 10 11 #include <asm/pgalloc.h> 12 #include <asm/pgtable.h> 13 #include <asm/sections.h> 14 #include <as-layout.h> 15 #include <os.h> 16 #include <skas.h> 17 18 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, 19 unsigned long kernel) 20 { 21 pgd_t *pgd; 22 p4d_t *p4d; 23 pud_t *pud; 24 pmd_t *pmd; 25 pte_t *pte; 26 27 pgd = pgd_offset(mm, proc); 28 29 p4d = p4d_alloc(mm, pgd, proc); 30 if (!p4d) 31 goto out; 32 33 pud = pud_alloc(mm, p4d, proc); 34 if (!pud) 35 goto out_pud; 36 37 pmd = pmd_alloc(mm, pud, proc); 38 if (!pmd) 39 goto out_pmd; 40 41 pte = pte_alloc_map(mm, pmd, proc); 42 if (!pte) 43 goto out_pte; 44 45 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 46 *pte = pte_mkread(*pte); 47 return 0; 48 49 out_pte: 50 pmd_free(mm, pmd); 51 out_pmd: 52 pud_free(mm, pud); 53 out_pud: 54 p4d_free(mm, p4d); 55 out: 56 return -ENOMEM; 57 } 58 59 int init_new_context(struct task_struct *task, struct mm_struct *mm) 60 { 61 struct mm_context *from_mm = NULL; 62 struct mm_context *to_mm = &mm->context; 63 unsigned long stack = 0; 64 int ret = -ENOMEM; 65 66 stack = get_zeroed_page(GFP_KERNEL); 67 if (stack == 0) 68 goto out; 69 70 to_mm->id.stack = stack; 71 if (current->mm != NULL && current->mm != &init_mm) 72 from_mm = ¤t->mm->context; 73 74 block_signals_trace(); 75 if (from_mm) 76 to_mm->id.u.pid = copy_context_skas0(stack, 77 from_mm->id.u.pid); 78 else to_mm->id.u.pid = start_userspace(stack); 79 unblock_signals_trace(); 80 81 if (to_mm->id.u.pid < 0) { 82 ret = to_mm->id.u.pid; 83 goto out_free; 84 } 85 86 ret = init_new_ldt(to_mm, from_mm); 87 if (ret < 0) { 88 printk(KERN_ERR "init_new_context_skas - init_ldt" 89 " failed, errno = %d\n", ret); 90 goto out_free; 91 } 92 93 return 0; 94 95 out_free: 96 if (to_mm->id.stack != 0) 97 free_page(to_mm->id.stack); 98 out: 99 return ret; 100 } 101 102 void uml_setup_stubs(struct mm_struct *mm) 103 { 104 int err, ret; 105 106 ret = init_stub_pte(mm, STUB_CODE, 107 (unsigned long) __syscall_stub_start); 108 if (ret) 109 goto out; 110 111 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); 112 if (ret) 113 goto out; 114 115 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); 116 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); 117 118 /* dup_mmap already holds mmap_sem */ 119 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, 120 VM_READ | VM_MAYREAD | VM_EXEC | 121 VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, 122 mm->context.stub_pages); 123 if (err) { 124 printk(KERN_ERR "install_special_mapping returned %d\n", err); 125 goto out; 126 } 127 return; 128 129 out: 130 force_sigsegv(SIGSEGV); 131 } 132 133 void arch_exit_mmap(struct mm_struct *mm) 134 { 135 pte_t *pte; 136 137 pte = virt_to_pte(mm, STUB_CODE); 138 if (pte != NULL) 139 pte_clear(mm, STUB_CODE, pte); 140 141 pte = virt_to_pte(mm, STUB_DATA); 142 if (pte == NULL) 143 return; 144 145 pte_clear(mm, STUB_DATA, pte); 146 } 147 148 void destroy_context(struct mm_struct *mm) 149 { 150 struct mm_context *mmu = &mm->context; 151 152 /* 153 * If init_new_context wasn't called, this will be 154 * zero, resulting in a kill(0), which will result in the 155 * whole UML suddenly dying. Also, cover negative and 156 * 1 cases, since they shouldn't happen either. 157 */ 158 if (mmu->id.u.pid < 2) { 159 printk(KERN_ERR "corrupt mm_context - pid = %d\n", 160 mmu->id.u.pid); 161 return; 162 } 163 os_kill_ptraced_process(mmu->id.u.pid, 1); 164 165 free_page(mmu->id.stack); 166 free_ldt(mmu); 167 } 168