1 /* 2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include <linux/mm.h> 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <asm/pgalloc.h> 10 #include <asm/pgtable.h> 11 #include <asm/sections.h> 12 #include <as-layout.h> 13 #include <os.h> 14 #include <skas.h> 15 16 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, 17 unsigned long kernel) 18 { 19 pgd_t *pgd; 20 pud_t *pud; 21 pmd_t *pmd; 22 pte_t *pte; 23 24 pgd = pgd_offset(mm, proc); 25 pud = pud_alloc(mm, pgd, proc); 26 if (!pud) 27 goto out; 28 29 pmd = pmd_alloc(mm, pud, proc); 30 if (!pmd) 31 goto out_pmd; 32 33 pte = pte_alloc_map(mm, NULL, pmd, proc); 34 if (!pte) 35 goto out_pte; 36 37 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 38 *pte = pte_mkread(*pte); 39 return 0; 40 41 out_pte: 42 pmd_free(mm, pmd); 43 out_pmd: 44 pud_free(mm, pud); 45 out: 46 return -ENOMEM; 47 } 48 49 int init_new_context(struct task_struct *task, struct mm_struct *mm) 50 { 51 struct mm_context *from_mm = NULL; 52 struct mm_context *to_mm = &mm->context; 53 unsigned long stack = 0; 54 int ret = -ENOMEM; 55 56 stack = get_zeroed_page(GFP_KERNEL); 57 if (stack == 0) 58 goto out; 59 60 to_mm->id.stack = stack; 61 if (current->mm != NULL && current->mm != &init_mm) 62 from_mm = ¤t->mm->context; 63 64 if (from_mm) 65 to_mm->id.u.pid = copy_context_skas0(stack, 66 from_mm->id.u.pid); 67 else to_mm->id.u.pid = start_userspace(stack); 68 69 if (to_mm->id.u.pid < 0) { 70 ret = to_mm->id.u.pid; 71 goto out_free; 72 } 73 74 ret = init_new_ldt(to_mm, from_mm); 75 if (ret < 0) { 76 printk(KERN_ERR "init_new_context_skas - init_ldt" 77 " failed, errno = %d\n", ret); 78 goto out_free; 79 } 80 81 return 0; 82 83 out_free: 84 if (to_mm->id.stack != 0) 85 free_page(to_mm->id.stack); 86 out: 87 return ret; 88 } 89 90 void uml_setup_stubs(struct mm_struct *mm) 91 { 92 int err, ret; 93 94 ret = init_stub_pte(mm, STUB_CODE, 95 (unsigned long) __syscall_stub_start); 96 if (ret) 97 goto out; 98 99 ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack); 100 if (ret) 101 goto out; 102 103 mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start); 104 mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack); 105 106 /* dup_mmap already holds mmap_sem */ 107 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, 108 VM_READ | VM_MAYREAD | VM_EXEC | 109 VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP, 110 mm->context.stub_pages); 111 if (err) { 112 printk(KERN_ERR "install_special_mapping returned %d\n", err); 113 goto out; 114 } 115 return; 116 117 out: 118 force_sigsegv(SIGSEGV, current); 119 } 120 121 void arch_exit_mmap(struct mm_struct *mm) 122 { 123 pte_t *pte; 124 125 pte = virt_to_pte(mm, STUB_CODE); 126 if (pte != NULL) 127 pte_clear(mm, STUB_CODE, pte); 128 129 pte = virt_to_pte(mm, STUB_DATA); 130 if (pte == NULL) 131 return; 132 133 pte_clear(mm, STUB_DATA, pte); 134 } 135 136 void destroy_context(struct mm_struct *mm) 137 { 138 struct mm_context *mmu = &mm->context; 139 140 /* 141 * If init_new_context wasn't called, this will be 142 * zero, resulting in a kill(0), which will result in the 143 * whole UML suddenly dying. Also, cover negative and 144 * 1 cases, since they shouldn't happen either. 145 */ 146 if (mmu->id.u.pid < 2) { 147 printk(KERN_ERR "corrupt mm_context - pid = %d\n", 148 mmu->id.u.pid); 149 return; 150 } 151 os_kill_ptraced_process(mmu->id.u.pid, 1); 152 153 free_page(mmu->id.stack); 154 free_ldt(mmu); 155 } 156