1 /*
2  * Copyright 2007-2008 Paul Mackerras, IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/gfp.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/syscalls.h>
17 
18 #include <asm/pgtable.h>
19 #include <linux/uaccess.h>
20 
21 /*
22  * Free all pages allocated for subpage protection maps and pointers.
23  * Also makes sure that the subpage_prot_table structure is
24  * reinitialized for the next user.
25  */
26 void subpage_prot_free(struct mm_struct *mm)
27 {
28 	struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
29 	unsigned long i, j, addr;
30 	u32 **p;
31 
32 	if (!spt)
33 		return;
34 
35 	for (i = 0; i < 4; ++i) {
36 		if (spt->low_prot[i]) {
37 			free_page((unsigned long)spt->low_prot[i]);
38 			spt->low_prot[i] = NULL;
39 		}
40 	}
41 	addr = 0;
42 	for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
43 		p = spt->protptrs[i];
44 		if (!p)
45 			continue;
46 		spt->protptrs[i] = NULL;
47 		for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
48 		     ++j, addr += PAGE_SIZE)
49 			if (p[j])
50 				free_page((unsigned long)p[j]);
51 		free_page((unsigned long)p);
52 	}
53 	spt->maxaddr = 0;
54 	kfree(spt);
55 }
56 
57 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
58 			     int npages)
59 {
60 	pgd_t *pgd;
61 	pud_t *pud;
62 	pmd_t *pmd;
63 	pte_t *pte;
64 	spinlock_t *ptl;
65 
66 	pgd = pgd_offset(mm, addr);
67 	if (pgd_none(*pgd))
68 		return;
69 	pud = pud_offset(pgd, addr);
70 	if (pud_none(*pud))
71 		return;
72 	pmd = pmd_offset(pud, addr);
73 	if (pmd_none(*pmd))
74 		return;
75 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
76 	arch_enter_lazy_mmu_mode();
77 	for (; npages > 0; --npages) {
78 		pte_update(mm, addr, pte, 0, 0, 0);
79 		addr += PAGE_SIZE;
80 		++pte;
81 	}
82 	arch_leave_lazy_mmu_mode();
83 	pte_unmap_unlock(pte - 1, ptl);
84 }
85 
86 /*
87  * Clear the subpage protection map for an address range, allowing
88  * all accesses that are allowed by the pte permissions.
89  */
90 static void subpage_prot_clear(unsigned long addr, unsigned long len)
91 {
92 	struct mm_struct *mm = current->mm;
93 	struct subpage_prot_table *spt;
94 	u32 **spm, *spp;
95 	unsigned long i;
96 	size_t nw;
97 	unsigned long next, limit;
98 
99 	down_write(&mm->mmap_sem);
100 
101 	spt = mm_ctx_subpage_prot(&mm->context);
102 	if (!spt)
103 		goto err_out;
104 
105 	limit = addr + len;
106 	if (limit > spt->maxaddr)
107 		limit = spt->maxaddr;
108 	for (; addr < limit; addr = next) {
109 		next = pmd_addr_end(addr, limit);
110 		if (addr < 0x100000000UL) {
111 			spm = spt->low_prot;
112 		} else {
113 			spm = spt->protptrs[addr >> SBP_L3_SHIFT];
114 			if (!spm)
115 				continue;
116 		}
117 		spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
118 		if (!spp)
119 			continue;
120 		spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
121 
122 		i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
123 		nw = PTRS_PER_PTE - i;
124 		if (addr + (nw << PAGE_SHIFT) > next)
125 			nw = (next - addr) >> PAGE_SHIFT;
126 
127 		memset(spp, 0, nw * sizeof(u32));
128 
129 		/* now flush any existing HPTEs for the range */
130 		hpte_flush_range(mm, addr, nw);
131 	}
132 
133 err_out:
134 	up_write(&mm->mmap_sem);
135 }
136 
137 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
138 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
139 				  unsigned long end, struct mm_walk *walk)
140 {
141 	struct vm_area_struct *vma = walk->vma;
142 	split_huge_pmd(vma, pmd, addr);
143 	return 0;
144 }
145 
146 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
147 				    unsigned long len)
148 {
149 	struct vm_area_struct *vma;
150 	struct mm_walk subpage_proto_walk = {
151 		.mm = mm,
152 		.pmd_entry = subpage_walk_pmd_entry,
153 	};
154 
155 	/*
156 	 * We don't try too hard, we just mark all the vma in that range
157 	 * VM_NOHUGEPAGE and split them.
158 	 */
159 	vma = find_vma(mm, addr);
160 	/*
161 	 * If the range is in unmapped range, just return
162 	 */
163 	if (vma && ((addr + len) <= vma->vm_start))
164 		return;
165 
166 	while (vma) {
167 		if (vma->vm_start >= (addr + len))
168 			break;
169 		vma->vm_flags |= VM_NOHUGEPAGE;
170 		walk_page_vma(vma, &subpage_proto_walk);
171 		vma = vma->vm_next;
172 	}
173 }
174 #else
175 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
176 				    unsigned long len)
177 {
178 	return;
179 }
180 #endif
181 
182 /*
183  * Copy in a subpage protection map for an address range.
184  * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
185  * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
186  * 2 or 3 to prevent all accesses.
187  * Note that the normal page protections also apply; the subpage
188  * protection mechanism is an additional constraint, so putting 0
189  * in a 2-bit field won't allow writes to a page that is otherwise
190  * write-protected.
191  */
192 SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
193 		unsigned long, len, u32 __user *, map)
194 {
195 	struct mm_struct *mm = current->mm;
196 	struct subpage_prot_table *spt;
197 	u32 **spm, *spp;
198 	unsigned long i;
199 	size_t nw;
200 	unsigned long next, limit;
201 	int err;
202 
203 	if (radix_enabled())
204 		return -ENOENT;
205 
206 	/* Check parameters */
207 	if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
208 	    addr >= mm->task_size || len >= mm->task_size ||
209 	    addr + len > mm->task_size)
210 		return -EINVAL;
211 
212 	if (is_hugepage_only_range(mm, addr, len))
213 		return -EINVAL;
214 
215 	if (!map) {
216 		/* Clear out the protection map for the address range */
217 		subpage_prot_clear(addr, len);
218 		return 0;
219 	}
220 
221 	if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
222 		return -EFAULT;
223 
224 	down_write(&mm->mmap_sem);
225 
226 	spt = mm_ctx_subpage_prot(&mm->context);
227 	if (!spt) {
228 		/*
229 		 * Allocate subpage prot table if not already done.
230 		 * Do this with mmap_sem held
231 		 */
232 		spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
233 		if (!spt) {
234 			err = -ENOMEM;
235 			goto out;
236 		}
237 		mm->context.hash_context->spt = spt;
238 	}
239 
240 	subpage_mark_vma_nohuge(mm, addr, len);
241 	for (limit = addr + len; addr < limit; addr = next) {
242 		next = pmd_addr_end(addr, limit);
243 		err = -ENOMEM;
244 		if (addr < 0x100000000UL) {
245 			spm = spt->low_prot;
246 		} else {
247 			spm = spt->protptrs[addr >> SBP_L3_SHIFT];
248 			if (!spm) {
249 				spm = (u32 **)get_zeroed_page(GFP_KERNEL);
250 				if (!spm)
251 					goto out;
252 				spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
253 			}
254 		}
255 		spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
256 		spp = *spm;
257 		if (!spp) {
258 			spp = (u32 *)get_zeroed_page(GFP_KERNEL);
259 			if (!spp)
260 				goto out;
261 			*spm = spp;
262 		}
263 		spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
264 
265 		local_irq_disable();
266 		demote_segment_4k(mm, addr);
267 		local_irq_enable();
268 
269 		i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
270 		nw = PTRS_PER_PTE - i;
271 		if (addr + (nw << PAGE_SHIFT) > next)
272 			nw = (next - addr) >> PAGE_SHIFT;
273 
274 		up_write(&mm->mmap_sem);
275 		if (__copy_from_user(spp, map, nw * sizeof(u32)))
276 			return -EFAULT;
277 		map += nw;
278 		down_write(&mm->mmap_sem);
279 
280 		/* now flush any existing HPTEs for the range */
281 		hpte_flush_range(mm, addr, nw);
282 	}
283 	if (limit > spt->maxaddr)
284 		spt->maxaddr = limit;
285 	err = 0;
286  out:
287 	up_write(&mm->mmap_sem);
288 	return err;
289 }
290