xref: /openbmc/linux/arch/sparc/mm/hugetlbpage.c (revision 95e9fd10)
1 /*
2  * SPARC64 Huge TLB page support.
3  *
4  * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5  */
6 
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/sysctl.h>
13 
14 #include <asm/mman.h>
15 #include <asm/pgalloc.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19 #include <asm/mmu_context.h>
20 
21 /* Slightly simplified from the non-hugepage variant because by
22  * definition we don't have to worry about any page coloring stuff
23  */
24 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
25 #define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
26 
27 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
28 							unsigned long addr,
29 							unsigned long len,
30 							unsigned long pgoff,
31 							unsigned long flags)
32 {
33 	struct mm_struct *mm = current->mm;
34 	struct vm_area_struct * vma;
35 	unsigned long task_size = TASK_SIZE;
36 	unsigned long start_addr;
37 
38 	if (test_thread_flag(TIF_32BIT))
39 		task_size = STACK_TOP32;
40 	if (unlikely(len >= VA_EXCLUDE_START))
41 		return -ENOMEM;
42 
43 	if (len > mm->cached_hole_size) {
44 	        start_addr = addr = mm->free_area_cache;
45 	} else {
46 	        start_addr = addr = TASK_UNMAPPED_BASE;
47 	        mm->cached_hole_size = 0;
48 	}
49 
50 	task_size -= len;
51 
52 full_search:
53 	addr = ALIGN(addr, HPAGE_SIZE);
54 
55 	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
56 		/* At this point:  (!vma || addr < vma->vm_end). */
57 		if (addr < VA_EXCLUDE_START &&
58 		    (addr + len) >= VA_EXCLUDE_START) {
59 			addr = VA_EXCLUDE_END;
60 			vma = find_vma(mm, VA_EXCLUDE_END);
61 		}
62 		if (unlikely(task_size < addr)) {
63 			if (start_addr != TASK_UNMAPPED_BASE) {
64 				start_addr = addr = TASK_UNMAPPED_BASE;
65 				mm->cached_hole_size = 0;
66 				goto full_search;
67 			}
68 			return -ENOMEM;
69 		}
70 		if (likely(!vma || addr + len <= vma->vm_start)) {
71 			/*
72 			 * Remember the place where we stopped the search:
73 			 */
74 			mm->free_area_cache = addr + len;
75 			return addr;
76 		}
77 		if (addr + mm->cached_hole_size < vma->vm_start)
78 		        mm->cached_hole_size = vma->vm_start - addr;
79 
80 		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
81 	}
82 }
83 
84 static unsigned long
85 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86 				  const unsigned long len,
87 				  const unsigned long pgoff,
88 				  const unsigned long flags)
89 {
90 	struct vm_area_struct *vma;
91 	struct mm_struct *mm = current->mm;
92 	unsigned long addr = addr0;
93 
94 	/* This should only ever run for 32-bit processes.  */
95 	BUG_ON(!test_thread_flag(TIF_32BIT));
96 
97 	/* check if free_area_cache is useful for us */
98 	if (len <= mm->cached_hole_size) {
99  	        mm->cached_hole_size = 0;
100  		mm->free_area_cache = mm->mmap_base;
101  	}
102 
103 	/* either no address requested or can't fit in requested address hole */
104 	addr = mm->free_area_cache & HPAGE_MASK;
105 
106 	/* make sure it can fit in the remaining address space */
107 	if (likely(addr > len)) {
108 		vma = find_vma(mm, addr-len);
109 		if (!vma || addr <= vma->vm_start) {
110 			/* remember the address as a hint for next time */
111 			return (mm->free_area_cache = addr-len);
112 		}
113 	}
114 
115 	if (unlikely(mm->mmap_base < len))
116 		goto bottomup;
117 
118 	addr = (mm->mmap_base-len) & HPAGE_MASK;
119 
120 	do {
121 		/*
122 		 * Lookup failure means no vma is above this address,
123 		 * else if new region fits below vma->vm_start,
124 		 * return with success:
125 		 */
126 		vma = find_vma(mm, addr);
127 		if (likely(!vma || addr+len <= vma->vm_start)) {
128 			/* remember the address as a hint for next time */
129 			return (mm->free_area_cache = addr);
130 		}
131 
132  		/* remember the largest hole we saw so far */
133  		if (addr + mm->cached_hole_size < vma->vm_start)
134  		        mm->cached_hole_size = vma->vm_start - addr;
135 
136 		/* try just below the current vma->vm_start */
137 		addr = (vma->vm_start-len) & HPAGE_MASK;
138 	} while (likely(len < vma->vm_start));
139 
140 bottomup:
141 	/*
142 	 * A failed mmap() very likely causes application failure,
143 	 * so fall back to the bottom-up function here. This scenario
144 	 * can happen with large stack limits and large mmap()
145 	 * allocations.
146 	 */
147 	mm->cached_hole_size = ~0UL;
148   	mm->free_area_cache = TASK_UNMAPPED_BASE;
149 	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
150 	/*
151 	 * Restore the topdown base:
152 	 */
153 	mm->free_area_cache = mm->mmap_base;
154 	mm->cached_hole_size = ~0UL;
155 
156 	return addr;
157 }
158 
159 unsigned long
160 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
161 		unsigned long len, unsigned long pgoff, unsigned long flags)
162 {
163 	struct mm_struct *mm = current->mm;
164 	struct vm_area_struct *vma;
165 	unsigned long task_size = TASK_SIZE;
166 
167 	if (test_thread_flag(TIF_32BIT))
168 		task_size = STACK_TOP32;
169 
170 	if (len & ~HPAGE_MASK)
171 		return -EINVAL;
172 	if (len > task_size)
173 		return -ENOMEM;
174 
175 	if (flags & MAP_FIXED) {
176 		if (prepare_hugepage_range(file, addr, len))
177 			return -EINVAL;
178 		return addr;
179 	}
180 
181 	if (addr) {
182 		addr = ALIGN(addr, HPAGE_SIZE);
183 		vma = find_vma(mm, addr);
184 		if (task_size - len >= addr &&
185 		    (!vma || addr + len <= vma->vm_start))
186 			return addr;
187 	}
188 	if (mm->get_unmapped_area == arch_get_unmapped_area)
189 		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
190 				pgoff, flags);
191 	else
192 		return hugetlb_get_unmapped_area_topdown(file, addr, len,
193 				pgoff, flags);
194 }
195 
196 pte_t *huge_pte_alloc(struct mm_struct *mm,
197 			unsigned long addr, unsigned long sz)
198 {
199 	pgd_t *pgd;
200 	pud_t *pud;
201 	pmd_t *pmd;
202 	pte_t *pte = NULL;
203 
204 	/* We must align the address, because our caller will run
205 	 * set_huge_pte_at() on whatever we return, which writes out
206 	 * all of the sub-ptes for the hugepage range.  So we have
207 	 * to give it the first such sub-pte.
208 	 */
209 	addr &= HPAGE_MASK;
210 
211 	pgd = pgd_offset(mm, addr);
212 	pud = pud_alloc(mm, pgd, addr);
213 	if (pud) {
214 		pmd = pmd_alloc(mm, pud, addr);
215 		if (pmd)
216 			pte = pte_alloc_map(mm, NULL, pmd, addr);
217 	}
218 	return pte;
219 }
220 
221 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
222 {
223 	pgd_t *pgd;
224 	pud_t *pud;
225 	pmd_t *pmd;
226 	pte_t *pte = NULL;
227 
228 	addr &= HPAGE_MASK;
229 
230 	pgd = pgd_offset(mm, addr);
231 	if (!pgd_none(*pgd)) {
232 		pud = pud_offset(pgd, addr);
233 		if (!pud_none(*pud)) {
234 			pmd = pmd_offset(pud, addr);
235 			if (!pmd_none(*pmd))
236 				pte = pte_offset_map(pmd, addr);
237 		}
238 	}
239 	return pte;
240 }
241 
242 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
243 {
244 	return 0;
245 }
246 
247 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
248 		     pte_t *ptep, pte_t entry)
249 {
250 	int i;
251 
252 	if (!pte_present(*ptep) && pte_present(entry))
253 		mm->context.huge_pte_count++;
254 
255 	addr &= HPAGE_MASK;
256 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
257 		set_pte_at(mm, addr, ptep, entry);
258 		ptep++;
259 		addr += PAGE_SIZE;
260 		pte_val(entry) += PAGE_SIZE;
261 	}
262 }
263 
264 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
265 			      pte_t *ptep)
266 {
267 	pte_t entry;
268 	int i;
269 
270 	entry = *ptep;
271 	if (pte_present(entry))
272 		mm->context.huge_pte_count--;
273 
274 	addr &= HPAGE_MASK;
275 
276 	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
277 		pte_clear(mm, addr, ptep);
278 		addr += PAGE_SIZE;
279 		ptep++;
280 	}
281 
282 	return entry;
283 }
284 
285 struct page *follow_huge_addr(struct mm_struct *mm,
286 			      unsigned long address, int write)
287 {
288 	return ERR_PTR(-EINVAL);
289 }
290 
291 int pmd_huge(pmd_t pmd)
292 {
293 	return 0;
294 }
295 
296 int pud_huge(pud_t pud)
297 {
298 	return 0;
299 }
300 
301 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
302 			     pmd_t *pmd, int write)
303 {
304 	return NULL;
305 }
306 
307 static void context_reload(void *__data)
308 {
309 	struct mm_struct *mm = __data;
310 
311 	if (mm == current->mm)
312 		load_secondary_context(mm);
313 }
314 
315 void hugetlb_prefault_arch_hook(struct mm_struct *mm)
316 {
317 	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
318 
319 	if (likely(tp->tsb != NULL))
320 		return;
321 
322 	tsb_grow(mm, MM_TSB_HUGE, 0);
323 	tsb_context_switch(mm);
324 	smp_tsb_sync(mm);
325 
326 	/* On UltraSPARC-III+ and later, configure the second half of
327 	 * the Data-TLB for huge pages.
328 	 */
329 	if (tlb_type == cheetah_plus) {
330 		unsigned long ctx;
331 
332 		spin_lock(&ctx_alloc_lock);
333 		ctx = mm->context.sparc64_ctx_val;
334 		ctx &= ~CTX_PGSZ_MASK;
335 		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
336 		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
337 
338 		if (ctx != mm->context.sparc64_ctx_val) {
339 			/* When changing the page size fields, we
340 			 * must perform a context flush so that no
341 			 * stale entries match.  This flush must
342 			 * occur with the original context register
343 			 * settings.
344 			 */
345 			do_flush_tlb_mm(mm);
346 
347 			/* Reload the context register of all processors
348 			 * also executing in this address space.
349 			 */
350 			mm->context.sparc64_ctx_val = ctx;
351 			on_each_cpu(context_reload, mm, 0);
352 		}
353 		spin_unlock(&ctx_alloc_lock);
354 	}
355 }
356