1 /* 2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/mm.h" 7 #include "linux/sched.h" 8 #include "asm/pgalloc.h" 9 #include "asm/pgtable.h" 10 #include "as-layout.h" 11 #include "os.h" 12 #include "skas.h" 13 14 extern int __syscall_stub_start; 15 16 static int init_stub_pte(struct mm_struct *mm, unsigned long proc, 17 unsigned long kernel) 18 { 19 pgd_t *pgd; 20 pud_t *pud; 21 pmd_t *pmd; 22 pte_t *pte; 23 24 pgd = pgd_offset(mm, proc); 25 pud = pud_alloc(mm, pgd, proc); 26 if (!pud) 27 goto out; 28 29 pmd = pmd_alloc(mm, pud, proc); 30 if (!pmd) 31 goto out_pmd; 32 33 pte = pte_alloc_map(mm, pmd, proc); 34 if (!pte) 35 goto out_pte; 36 37 /* 38 * There's an interaction between the skas0 stub pages, stack 39 * randomization, and the BUG at the end of exit_mmap. exit_mmap 40 * checks that the number of page tables freed is the same as had 41 * been allocated. If the stack is on the last page table page, 42 * then the stack pte page will be freed, and if not, it won't. To 43 * avoid having to know where the stack is, or if the process mapped 44 * something at the top of its address space for some other reason, 45 * we set TASK_SIZE to end at the start of the last page table. 46 * This keeps exit_mmap off the last page, but introduces a leak 47 * of that page. So, we hang onto it here and free it in 48 * destroy_context_skas. 49 */ 50 51 mm->context.last_page_table = pmd_page_vaddr(*pmd); 52 #ifdef CONFIG_3_LEVEL_PGTABLES 53 mm->context.last_pmd = (unsigned long) __va(pud_val(*pud)); 54 #endif 55 56 *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); 57 *pte = pte_mkread(*pte); 58 return 0; 59 60 out_pmd: 61 pud_free(pud); 62 out_pte: 63 pmd_free(pmd); 64 out: 65 return -ENOMEM; 66 } 67 68 int init_new_context(struct task_struct *task, struct mm_struct *mm) 69 { 70 struct mm_context *from_mm = NULL; 71 struct mm_context *to_mm = &mm->context; 72 unsigned long stack = 0; 73 int ret = -ENOMEM; 74 75 if (skas_needs_stub) { 76 stack = get_zeroed_page(GFP_KERNEL); 77 if (stack == 0) 78 goto out; 79 80 /* 81 * This zeros the entry that pgd_alloc didn't, needed since 82 * we are about to reinitialize it, and want mm.nr_ptes to 83 * be accurate. 84 */ 85 mm->pgd[USER_PTRS_PER_PGD] = __pgd(0); 86 87 ret = init_stub_pte(mm, STUB_CODE, 88 (unsigned long) &__syscall_stub_start); 89 if (ret) 90 goto out_free; 91 92 ret = init_stub_pte(mm, STUB_DATA, stack); 93 if (ret) 94 goto out_free; 95 96 mm->nr_ptes--; 97 } 98 99 to_mm->id.stack = stack; 100 if (current->mm != NULL && current->mm != &init_mm) 101 from_mm = ¤t->mm->context; 102 103 if (proc_mm) { 104 ret = new_mm(stack); 105 if (ret < 0) { 106 printk(KERN_ERR "init_new_context_skas - " 107 "new_mm failed, errno = %d\n", ret); 108 goto out_free; 109 } 110 to_mm->id.u.mm_fd = ret; 111 } 112 else { 113 if (from_mm) 114 to_mm->id.u.pid = copy_context_skas0(stack, 115 from_mm->id.u.pid); 116 else to_mm->id.u.pid = start_userspace(stack); 117 } 118 119 ret = init_new_ldt(to_mm, from_mm); 120 if (ret < 0) { 121 printk(KERN_ERR "init_new_context_skas - init_ldt" 122 " failed, errno = %d\n", ret); 123 goto out_free; 124 } 125 126 return 0; 127 128 out_free: 129 if (to_mm->id.stack != 0) 130 free_page(to_mm->id.stack); 131 out: 132 return ret; 133 } 134 135 void destroy_context(struct mm_struct *mm) 136 { 137 struct mm_context *mmu = &mm->context; 138 139 if (proc_mm) 140 os_close_file(mmu->id.u.mm_fd); 141 else 142 os_kill_ptraced_process(mmu->id.u.pid, 1); 143 144 if (!proc_mm || !ptrace_faultinfo) { 145 free_page(mmu->id.stack); 146 pte_lock_deinit(virt_to_page(mmu->last_page_table)); 147 pte_free_kernel((pte_t *) mmu->last_page_table); 148 dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE); 149 #ifdef CONFIG_3_LEVEL_PGTABLES 150 pmd_free((pmd_t *) mmu->last_pmd); 151 #endif 152 } 153 154 free_ldt(mmu); 155 } 156