1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2007-2008 Paul Mackerras, IBM Corp.
4  */
5 
6 #include <linux/errno.h>
7 #include <linux/kernel.h>
8 #include <linux/gfp.h>
9 #include <linux/types.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/syscalls.h>
13 
14 #include <asm/pgtable.h>
15 #include <linux/uaccess.h>
16 
17 /*
18  * Free all pages allocated for subpage protection maps and pointers.
19  * Also makes sure that the subpage_prot_table structure is
20  * reinitialized for the next user.
21  */
22 void subpage_prot_free(struct mm_struct *mm)
23 {
24 	struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
25 	unsigned long i, j, addr;
26 	u32 **p;
27 
28 	if (!spt)
29 		return;
30 
31 	for (i = 0; i < 4; ++i) {
32 		if (spt->low_prot[i]) {
33 			free_page((unsigned long)spt->low_prot[i]);
34 			spt->low_prot[i] = NULL;
35 		}
36 	}
37 	addr = 0;
38 	for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
39 		p = spt->protptrs[i];
40 		if (!p)
41 			continue;
42 		spt->protptrs[i] = NULL;
43 		for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
44 		     ++j, addr += PAGE_SIZE)
45 			if (p[j])
46 				free_page((unsigned long)p[j]);
47 		free_page((unsigned long)p);
48 	}
49 	spt->maxaddr = 0;
50 	kfree(spt);
51 }
52 
53 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
54 			     int npages)
55 {
56 	pgd_t *pgd;
57 	pud_t *pud;
58 	pmd_t *pmd;
59 	pte_t *pte;
60 	spinlock_t *ptl;
61 
62 	pgd = pgd_offset(mm, addr);
63 	if (pgd_none(*pgd))
64 		return;
65 	pud = pud_offset(pgd, addr);
66 	if (pud_none(*pud))
67 		return;
68 	pmd = pmd_offset(pud, addr);
69 	if (pmd_none(*pmd))
70 		return;
71 	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
72 	arch_enter_lazy_mmu_mode();
73 	for (; npages > 0; --npages) {
74 		pte_update(mm, addr, pte, 0, 0, 0);
75 		addr += PAGE_SIZE;
76 		++pte;
77 	}
78 	arch_leave_lazy_mmu_mode();
79 	pte_unmap_unlock(pte - 1, ptl);
80 }
81 
82 /*
83  * Clear the subpage protection map for an address range, allowing
84  * all accesses that are allowed by the pte permissions.
85  */
86 static void subpage_prot_clear(unsigned long addr, unsigned long len)
87 {
88 	struct mm_struct *mm = current->mm;
89 	struct subpage_prot_table *spt;
90 	u32 **spm, *spp;
91 	unsigned long i;
92 	size_t nw;
93 	unsigned long next, limit;
94 
95 	down_write(&mm->mmap_sem);
96 
97 	spt = mm_ctx_subpage_prot(&mm->context);
98 	if (!spt)
99 		goto err_out;
100 
101 	limit = addr + len;
102 	if (limit > spt->maxaddr)
103 		limit = spt->maxaddr;
104 	for (; addr < limit; addr = next) {
105 		next = pmd_addr_end(addr, limit);
106 		if (addr < 0x100000000UL) {
107 			spm = spt->low_prot;
108 		} else {
109 			spm = spt->protptrs[addr >> SBP_L3_SHIFT];
110 			if (!spm)
111 				continue;
112 		}
113 		spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
114 		if (!spp)
115 			continue;
116 		spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
117 
118 		i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
119 		nw = PTRS_PER_PTE - i;
120 		if (addr + (nw << PAGE_SHIFT) > next)
121 			nw = (next - addr) >> PAGE_SHIFT;
122 
123 		memset(spp, 0, nw * sizeof(u32));
124 
125 		/* now flush any existing HPTEs for the range */
126 		hpte_flush_range(mm, addr, nw);
127 	}
128 
129 err_out:
130 	up_write(&mm->mmap_sem);
131 }
132 
133 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
134 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
135 				  unsigned long end, struct mm_walk *walk)
136 {
137 	struct vm_area_struct *vma = walk->vma;
138 	split_huge_pmd(vma, pmd, addr);
139 	return 0;
140 }
141 
142 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
143 				    unsigned long len)
144 {
145 	struct vm_area_struct *vma;
146 	struct mm_walk subpage_proto_walk = {
147 		.mm = mm,
148 		.pmd_entry = subpage_walk_pmd_entry,
149 	};
150 
151 	/*
152 	 * We don't try too hard, we just mark all the vma in that range
153 	 * VM_NOHUGEPAGE and split them.
154 	 */
155 	vma = find_vma(mm, addr);
156 	/*
157 	 * If the range is in unmapped range, just return
158 	 */
159 	if (vma && ((addr + len) <= vma->vm_start))
160 		return;
161 
162 	while (vma) {
163 		if (vma->vm_start >= (addr + len))
164 			break;
165 		vma->vm_flags |= VM_NOHUGEPAGE;
166 		walk_page_vma(vma, &subpage_proto_walk);
167 		vma = vma->vm_next;
168 	}
169 }
170 #else
171 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
172 				    unsigned long len)
173 {
174 	return;
175 }
176 #endif
177 
178 /*
179  * Copy in a subpage protection map for an address range.
180  * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
181  * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
182  * 2 or 3 to prevent all accesses.
183  * Note that the normal page protections also apply; the subpage
184  * protection mechanism is an additional constraint, so putting 0
185  * in a 2-bit field won't allow writes to a page that is otherwise
186  * write-protected.
187  */
188 SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
189 		unsigned long, len, u32 __user *, map)
190 {
191 	struct mm_struct *mm = current->mm;
192 	struct subpage_prot_table *spt;
193 	u32 **spm, *spp;
194 	unsigned long i;
195 	size_t nw;
196 	unsigned long next, limit;
197 	int err;
198 
199 	if (radix_enabled())
200 		return -ENOENT;
201 
202 	/* Check parameters */
203 	if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
204 	    addr >= mm->task_size || len >= mm->task_size ||
205 	    addr + len > mm->task_size)
206 		return -EINVAL;
207 
208 	if (is_hugepage_only_range(mm, addr, len))
209 		return -EINVAL;
210 
211 	if (!map) {
212 		/* Clear out the protection map for the address range */
213 		subpage_prot_clear(addr, len);
214 		return 0;
215 	}
216 
217 	if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
218 		return -EFAULT;
219 
220 	down_write(&mm->mmap_sem);
221 
222 	spt = mm_ctx_subpage_prot(&mm->context);
223 	if (!spt) {
224 		/*
225 		 * Allocate subpage prot table if not already done.
226 		 * Do this with mmap_sem held
227 		 */
228 		spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
229 		if (!spt) {
230 			err = -ENOMEM;
231 			goto out;
232 		}
233 		mm->context.hash_context->spt = spt;
234 	}
235 
236 	subpage_mark_vma_nohuge(mm, addr, len);
237 	for (limit = addr + len; addr < limit; addr = next) {
238 		next = pmd_addr_end(addr, limit);
239 		err = -ENOMEM;
240 		if (addr < 0x100000000UL) {
241 			spm = spt->low_prot;
242 		} else {
243 			spm = spt->protptrs[addr >> SBP_L3_SHIFT];
244 			if (!spm) {
245 				spm = (u32 **)get_zeroed_page(GFP_KERNEL);
246 				if (!spm)
247 					goto out;
248 				spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
249 			}
250 		}
251 		spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
252 		spp = *spm;
253 		if (!spp) {
254 			spp = (u32 *)get_zeroed_page(GFP_KERNEL);
255 			if (!spp)
256 				goto out;
257 			*spm = spp;
258 		}
259 		spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
260 
261 		local_irq_disable();
262 		demote_segment_4k(mm, addr);
263 		local_irq_enable();
264 
265 		i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
266 		nw = PTRS_PER_PTE - i;
267 		if (addr + (nw << PAGE_SHIFT) > next)
268 			nw = (next - addr) >> PAGE_SHIFT;
269 
270 		up_write(&mm->mmap_sem);
271 		if (__copy_from_user(spp, map, nw * sizeof(u32)))
272 			return -EFAULT;
273 		map += nw;
274 		down_write(&mm->mmap_sem);
275 
276 		/* now flush any existing HPTEs for the range */
277 		hpte_flush_range(mm, addr, nw);
278 	}
279 	if (limit > spt->maxaddr)
280 		spt->maxaddr = limit;
281 	err = 0;
282  out:
283 	up_write(&mm->mmap_sem);
284 	return err;
285 }
286