xref: /openbmc/linux/arch/um/kernel/skas/mmu.c (revision 87c2ce3b)
1 /*
2  * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5 
6 #include "linux/config.h"
7 #include "linux/sched.h"
8 #include "linux/list.h"
9 #include "linux/spinlock.h"
10 #include "linux/slab.h"
11 #include "linux/errno.h"
12 #include "linux/mm.h"
13 #include "asm/current.h"
14 #include "asm/segment.h"
15 #include "asm/mmu.h"
16 #include "asm/pgalloc.h"
17 #include "asm/pgtable.h"
18 #include "asm/ldt.h"
19 #include "os.h"
20 #include "skas.h"
21 
22 extern int __syscall_stub_start;
23 
24 static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
25 			 unsigned long kernel)
26 {
27 	pgd_t *pgd;
28 	pud_t *pud;
29 	pmd_t *pmd;
30 	pte_t *pte;
31 
32 	pgd = pgd_offset(mm, proc);
33 	pud = pud_alloc(mm, pgd, proc);
34 	if (!pud)
35 		goto out;
36 
37 	pmd = pmd_alloc(mm, pud, proc);
38 	if (!pmd)
39 		goto out_pmd;
40 
41 	pte = pte_alloc_map(mm, pmd, proc);
42 	if (!pte)
43 		goto out_pte;
44 
45 	/* There's an interaction between the skas0 stub pages, stack
46 	 * randomization, and the BUG at the end of exit_mmap.  exit_mmap
47          * checks that the number of page tables freed is the same as had
48          * been allocated.  If the stack is on the last page table page,
49 	 * then the stack pte page will be freed, and if not, it won't.  To
50 	 * avoid having to know where the stack is, or if the process mapped
51 	 * something at the top of its address space for some other reason,
52 	 * we set TASK_SIZE to end at the start of the last page table.
53 	 * This keeps exit_mmap off the last page, but introduces a leak
54 	 * of that page.  So, we hang onto it here and free it in
55 	 * destroy_context_skas.
56 	 */
57 
58         mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
59 #ifdef CONFIG_3_LEVEL_PGTABLES
60         mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
61 #endif
62 
63 	*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
64 	*pte = pte_mkexec(*pte);
65 	*pte = pte_wrprotect(*pte);
66 	return(0);
67 
68  out_pmd:
69 	pud_free(pud);
70  out_pte:
71 	pmd_free(pmd);
72  out:
73 	return(-ENOMEM);
74 }
75 
76 int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
77 {
78  	struct mmu_context_skas *from_mm = NULL;
79 	struct mmu_context_skas *to_mm = &mm->context.skas;
80 	unsigned long stack = 0;
81 	int from_fd, ret = -ENOMEM;
82 
83 	if(skas_needs_stub){
84 		stack = get_zeroed_page(GFP_KERNEL);
85 		if(stack == 0)
86 			goto out;
87 
88 		/* This zeros the entry that pgd_alloc didn't, needed since
89 		 * we are about to reinitialize it, and want mm.nr_ptes to
90 		 * be accurate.
91 		 */
92 		mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
93 
94 		ret = init_stub_pte(mm, CONFIG_STUB_CODE,
95 				    (unsigned long) &__syscall_stub_start);
96 		if(ret)
97 			goto out_free;
98 
99 		ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
100 		if(ret)
101 			goto out_free;
102 
103 		mm->nr_ptes--;
104 	}
105 
106 	to_mm->id.stack = stack;
107 	if(current->mm != NULL && current->mm != &init_mm)
108 		from_mm = &current->mm->context.skas;
109 
110 	if(proc_mm){
111 		if(from_mm)
112 			from_fd = from_mm->id.u.mm_fd;
113 		else from_fd = -1;
114 
115 		ret = new_mm(from_fd, stack);
116 		if(ret < 0){
117 			printk("init_new_context_skas - new_mm failed, "
118 			       "errno = %d\n", ret);
119 			goto out_free;
120 		}
121 		to_mm->id.u.mm_fd = ret;
122 	}
123 	else {
124 		if(from_mm)
125 			to_mm->id.u.pid = copy_context_skas0(stack,
126 							     from_mm->id.u.pid);
127 		else to_mm->id.u.pid = start_userspace(stack);
128 	}
129 
130 	ret = init_new_ldt(to_mm, from_mm);
131 	if(ret < 0){
132 		printk("init_new_context_skas - init_ldt"
133 		       " failed, errno = %d\n", ret);
134 		goto out_free;
135 	}
136 
137 	return 0;
138 
139  out_free:
140 	if(to_mm->id.stack != 0)
141 		free_page(to_mm->id.stack);
142  out:
143 	return ret;
144 }
145 
146 void destroy_context_skas(struct mm_struct *mm)
147 {
148 	struct mmu_context_skas *mmu = &mm->context.skas;
149 
150 	if(proc_mm)
151 		os_close_file(mmu->id.u.mm_fd);
152 	else
153 		os_kill_ptraced_process(mmu->id.u.pid, 1);
154 
155 	if(!proc_mm || !ptrace_faultinfo){
156 		free_page(mmu->id.stack);
157 		pte_lock_deinit(virt_to_page(mmu->last_page_table));
158 		pte_free_kernel((pte_t *) mmu->last_page_table);
159                 dec_page_state(nr_page_table_pages);
160 #ifdef CONFIG_3_LEVEL_PGTABLES
161 		pmd_free((pmd_t *) mmu->last_pmd);
162 #endif
163 	}
164 }
165