xref: /openbmc/linux/arch/um/kernel/skas/mmu.c (revision 8c749ce9)
1 /*
2  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
3  * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4  * Licensed under the GPL
5  */
6 
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <asm/pgalloc.h>
11 #include <asm/pgtable.h>
12 #include <asm/sections.h>
13 #include <as-layout.h>
14 #include <os.h>
15 #include <skas.h>
16 
17 static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
18 			 unsigned long kernel)
19 {
20 	pgd_t *pgd;
21 	pud_t *pud;
22 	pmd_t *pmd;
23 	pte_t *pte;
24 
25 	pgd = pgd_offset(mm, proc);
26 	pud = pud_alloc(mm, pgd, proc);
27 	if (!pud)
28 		goto out;
29 
30 	pmd = pmd_alloc(mm, pud, proc);
31 	if (!pmd)
32 		goto out_pmd;
33 
34 	pte = pte_alloc_map(mm, NULL, pmd, proc);
35 	if (!pte)
36 		goto out_pte;
37 
38 	*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
39 	*pte = pte_mkread(*pte);
40 	return 0;
41 
42  out_pte:
43 	pmd_free(mm, pmd);
44  out_pmd:
45 	pud_free(mm, pud);
46  out:
47 	return -ENOMEM;
48 }
49 
50 int init_new_context(struct task_struct *task, struct mm_struct *mm)
51 {
52  	struct mm_context *from_mm = NULL;
53 	struct mm_context *to_mm = &mm->context;
54 	unsigned long stack = 0;
55 	int ret = -ENOMEM;
56 
57 	stack = get_zeroed_page(GFP_KERNEL);
58 	if (stack == 0)
59 		goto out;
60 
61 	to_mm->id.stack = stack;
62 	if (current->mm != NULL && current->mm != &init_mm)
63 		from_mm = &current->mm->context;
64 
65 	block_signals();
66 	if (from_mm)
67 		to_mm->id.u.pid = copy_context_skas0(stack,
68 						     from_mm->id.u.pid);
69 	else to_mm->id.u.pid = start_userspace(stack);
70 	unblock_signals();
71 
72 	if (to_mm->id.u.pid < 0) {
73 		ret = to_mm->id.u.pid;
74 		goto out_free;
75 	}
76 
77 	ret = init_new_ldt(to_mm, from_mm);
78 	if (ret < 0) {
79 		printk(KERN_ERR "init_new_context_skas - init_ldt"
80 		       " failed, errno = %d\n", ret);
81 		goto out_free;
82 	}
83 
84 	return 0;
85 
86  out_free:
87 	if (to_mm->id.stack != 0)
88 		free_page(to_mm->id.stack);
89  out:
90 	return ret;
91 }
92 
93 void uml_setup_stubs(struct mm_struct *mm)
94 {
95 	int err, ret;
96 
97 	ret = init_stub_pte(mm, STUB_CODE,
98 			    (unsigned long) __syscall_stub_start);
99 	if (ret)
100 		goto out;
101 
102 	ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
103 	if (ret)
104 		goto out;
105 
106 	mm->context.stub_pages[0] = virt_to_page(__syscall_stub_start);
107 	mm->context.stub_pages[1] = virt_to_page(mm->context.id.stack);
108 
109 	/* dup_mmap already holds mmap_sem */
110 	err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
111 				      VM_READ | VM_MAYREAD | VM_EXEC |
112 				      VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
113 				      mm->context.stub_pages);
114 	if (err) {
115 		printk(KERN_ERR "install_special_mapping returned %d\n", err);
116 		goto out;
117 	}
118 	return;
119 
120 out:
121 	force_sigsegv(SIGSEGV, current);
122 }
123 
124 void arch_exit_mmap(struct mm_struct *mm)
125 {
126 	pte_t *pte;
127 
128 	pte = virt_to_pte(mm, STUB_CODE);
129 	if (pte != NULL)
130 		pte_clear(mm, STUB_CODE, pte);
131 
132 	pte = virt_to_pte(mm, STUB_DATA);
133 	if (pte == NULL)
134 		return;
135 
136 	pte_clear(mm, STUB_DATA, pte);
137 }
138 
139 void destroy_context(struct mm_struct *mm)
140 {
141 	struct mm_context *mmu = &mm->context;
142 
143 	/*
144 	 * If init_new_context wasn't called, this will be
145 	 * zero, resulting in a kill(0), which will result in the
146 	 * whole UML suddenly dying.  Also, cover negative and
147 	 * 1 cases, since they shouldn't happen either.
148 	 */
149 	if (mmu->id.u.pid < 2) {
150 		printk(KERN_ERR "corrupt mm_context - pid = %d\n",
151 		       mmu->id.u.pid);
152 		return;
153 	}
154 	os_kill_ptraced_process(mmu->id.u.pid, 1);
155 
156 	free_page(mmu->id.stack);
157 	free_ldt(mmu);
158 }
159