1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
227137e52SSam Ravnborg /*
327137e52SSam Ravnborg * SPARC64 Huge TLB page support.
427137e52SSam Ravnborg *
527137e52SSam Ravnborg * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
627137e52SSam Ravnborg */
727137e52SSam Ravnborg
827137e52SSam Ravnborg #include <linux/fs.h>
927137e52SSam Ravnborg #include <linux/mm.h>
1001042607SIngo Molnar #include <linux/sched/mm.h>
1127137e52SSam Ravnborg #include <linux/hugetlb.h>
1227137e52SSam Ravnborg #include <linux/pagemap.h>
1327137e52SSam Ravnborg #include <linux/sysctl.h>
1427137e52SSam Ravnborg
1527137e52SSam Ravnborg #include <asm/mman.h>
1627137e52SSam Ravnborg #include <asm/pgalloc.h>
1727137e52SSam Ravnborg #include <asm/tlb.h>
1827137e52SSam Ravnborg #include <asm/tlbflush.h>
1927137e52SSam Ravnborg #include <asm/cacheflush.h>
2027137e52SSam Ravnborg #include <asm/mmu_context.h>
2127137e52SSam Ravnborg
2227137e52SSam Ravnborg /* Slightly simplified from the non-hugepage variant because by
2327137e52SSam Ravnborg * definition we don't have to worry about any page coloring stuff
2427137e52SSam Ravnborg */
2527137e52SSam Ravnborg
hugetlb_get_unmapped_area_bottomup(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)2627137e52SSam Ravnborg static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
2727137e52SSam Ravnborg unsigned long addr,
2827137e52SSam Ravnborg unsigned long len,
2927137e52SSam Ravnborg unsigned long pgoff,
3027137e52SSam Ravnborg unsigned long flags)
3127137e52SSam Ravnborg {
32c7d9f77dSNitin Gupta struct hstate *h = hstate_file(filp);
3327137e52SSam Ravnborg unsigned long task_size = TASK_SIZE;
342aea28b9SMichel Lespinasse struct vm_unmapped_area_info info;
3527137e52SSam Ravnborg
3627137e52SSam Ravnborg if (test_thread_flag(TIF_32BIT))
3727137e52SSam Ravnborg task_size = STACK_TOP32;
3827137e52SSam Ravnborg
392aea28b9SMichel Lespinasse info.flags = 0;
402aea28b9SMichel Lespinasse info.length = len;
412aea28b9SMichel Lespinasse info.low_limit = TASK_UNMAPPED_BASE;
422aea28b9SMichel Lespinasse info.high_limit = min(task_size, VA_EXCLUDE_START);
43c7d9f77dSNitin Gupta info.align_mask = PAGE_MASK & ~huge_page_mask(h);
442aea28b9SMichel Lespinasse info.align_offset = 0;
452aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info);
462aea28b9SMichel Lespinasse
472aea28b9SMichel Lespinasse if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
482aea28b9SMichel Lespinasse VM_BUG_ON(addr != -ENOMEM);
492aea28b9SMichel Lespinasse info.low_limit = VA_EXCLUDE_END;
502aea28b9SMichel Lespinasse info.high_limit = task_size;
512aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info);
5227137e52SSam Ravnborg }
5327137e52SSam Ravnborg
5427137e52SSam Ravnborg return addr;
5527137e52SSam Ravnborg }
5627137e52SSam Ravnborg
5727137e52SSam Ravnborg static unsigned long
hugetlb_get_unmapped_area_topdown(struct file * filp,const unsigned long addr0,const unsigned long len,const unsigned long pgoff,const unsigned long flags)5827137e52SSam Ravnborg hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5927137e52SSam Ravnborg const unsigned long len,
6027137e52SSam Ravnborg const unsigned long pgoff,
6127137e52SSam Ravnborg const unsigned long flags)
6227137e52SSam Ravnborg {
63c7d9f77dSNitin Gupta struct hstate *h = hstate_file(filp);
6427137e52SSam Ravnborg struct mm_struct *mm = current->mm;
6527137e52SSam Ravnborg unsigned long addr = addr0;
662aea28b9SMichel Lespinasse struct vm_unmapped_area_info info;
6727137e52SSam Ravnborg
6827137e52SSam Ravnborg /* This should only ever run for 32-bit processes. */
6927137e52SSam Ravnborg BUG_ON(!test_thread_flag(TIF_32BIT));
7027137e52SSam Ravnborg
712aea28b9SMichel Lespinasse info.flags = VM_UNMAPPED_AREA_TOPDOWN;
722aea28b9SMichel Lespinasse info.length = len;
732aea28b9SMichel Lespinasse info.low_limit = PAGE_SIZE;
742aea28b9SMichel Lespinasse info.high_limit = mm->mmap_base;
75c7d9f77dSNitin Gupta info.align_mask = PAGE_MASK & ~huge_page_mask(h);
762aea28b9SMichel Lespinasse info.align_offset = 0;
772aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info);
7827137e52SSam Ravnborg
7927137e52SSam Ravnborg /*
8027137e52SSam Ravnborg * A failed mmap() very likely causes application failure,
8127137e52SSam Ravnborg * so fall back to the bottom-up function here. This scenario
8227137e52SSam Ravnborg * can happen with large stack limits and large mmap()
8327137e52SSam Ravnborg * allocations.
8427137e52SSam Ravnborg */
852aea28b9SMichel Lespinasse if (addr & ~PAGE_MASK) {
862aea28b9SMichel Lespinasse VM_BUG_ON(addr != -ENOMEM);
872aea28b9SMichel Lespinasse info.flags = 0;
882aea28b9SMichel Lespinasse info.low_limit = TASK_UNMAPPED_BASE;
892aea28b9SMichel Lespinasse info.high_limit = STACK_TOP32;
902aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info);
912aea28b9SMichel Lespinasse }
9227137e52SSam Ravnborg
9327137e52SSam Ravnborg return addr;
9427137e52SSam Ravnborg }
9527137e52SSam Ravnborg
9627137e52SSam Ravnborg unsigned long
hugetlb_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)9727137e52SSam Ravnborg hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9827137e52SSam Ravnborg unsigned long len, unsigned long pgoff, unsigned long flags)
9927137e52SSam Ravnborg {
100c7d9f77dSNitin Gupta struct hstate *h = hstate_file(file);
10127137e52SSam Ravnborg struct mm_struct *mm = current->mm;
10227137e52SSam Ravnborg struct vm_area_struct *vma;
10327137e52SSam Ravnborg unsigned long task_size = TASK_SIZE;
10427137e52SSam Ravnborg
10527137e52SSam Ravnborg if (test_thread_flag(TIF_32BIT))
10627137e52SSam Ravnborg task_size = STACK_TOP32;
10727137e52SSam Ravnborg
108c7d9f77dSNitin Gupta if (len & ~huge_page_mask(h))
10927137e52SSam Ravnborg return -EINVAL;
11027137e52SSam Ravnborg if (len > task_size)
11127137e52SSam Ravnborg return -ENOMEM;
11227137e52SSam Ravnborg
11327137e52SSam Ravnborg if (flags & MAP_FIXED) {
11427137e52SSam Ravnborg if (prepare_hugepage_range(file, addr, len))
11527137e52SSam Ravnborg return -EINVAL;
11627137e52SSam Ravnborg return addr;
11727137e52SSam Ravnborg }
11827137e52SSam Ravnborg
11927137e52SSam Ravnborg if (addr) {
120c7d9f77dSNitin Gupta addr = ALIGN(addr, huge_page_size(h));
12127137e52SSam Ravnborg vma = find_vma(mm, addr);
12227137e52SSam Ravnborg if (task_size - len >= addr &&
1231be7107fSHugh Dickins (!vma || addr + len <= vm_start_gap(vma)))
12427137e52SSam Ravnborg return addr;
12527137e52SSam Ravnborg }
12627137e52SSam Ravnborg if (mm->get_unmapped_area == arch_get_unmapped_area)
12727137e52SSam Ravnborg return hugetlb_get_unmapped_area_bottomup(file, addr, len,
12827137e52SSam Ravnborg pgoff, flags);
12927137e52SSam Ravnborg else
13027137e52SSam Ravnborg return hugetlb_get_unmapped_area_topdown(file, addr, len,
13127137e52SSam Ravnborg pgoff, flags);
13227137e52SSam Ravnborg }
13327137e52SSam Ravnborg
sun4u_hugepage_shift_to_tte(pte_t entry,unsigned int shift)134c7d9f77dSNitin Gupta static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
135c7d9f77dSNitin Gupta {
136c7d9f77dSNitin Gupta return entry;
137c7d9f77dSNitin Gupta }
138c7d9f77dSNitin Gupta
sun4v_hugepage_shift_to_tte(pte_t entry,unsigned int shift)139c7d9f77dSNitin Gupta static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
140c7d9f77dSNitin Gupta {
141c7d9f77dSNitin Gupta unsigned long hugepage_size = _PAGE_SZ4MB_4V;
142c7d9f77dSNitin Gupta
143c7d9f77dSNitin Gupta pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144c7d9f77dSNitin Gupta
145c7d9f77dSNitin Gupta switch (shift) {
146df7b2155SNitin Gupta case HPAGE_16GB_SHIFT:
147df7b2155SNitin Gupta hugepage_size = _PAGE_SZ16GB_4V;
148df7b2155SNitin Gupta pte_val(entry) |= _PAGE_PUD_HUGE;
149df7b2155SNitin Gupta break;
15085b1da7cSNitin Gupta case HPAGE_2GB_SHIFT:
15185b1da7cSNitin Gupta hugepage_size = _PAGE_SZ2GB_4V;
15285b1da7cSNitin Gupta pte_val(entry) |= _PAGE_PMD_HUGE;
15385b1da7cSNitin Gupta break;
154c7d9f77dSNitin Gupta case HPAGE_256MB_SHIFT:
155c7d9f77dSNitin Gupta hugepage_size = _PAGE_SZ256MB_4V;
156c7d9f77dSNitin Gupta pte_val(entry) |= _PAGE_PMD_HUGE;
157c7d9f77dSNitin Gupta break;
158c7d9f77dSNitin Gupta case HPAGE_SHIFT:
159c7d9f77dSNitin Gupta pte_val(entry) |= _PAGE_PMD_HUGE;
160c7d9f77dSNitin Gupta break;
161dcd1912dSNitin Gupta case HPAGE_64K_SHIFT:
162dcd1912dSNitin Gupta hugepage_size = _PAGE_SZ64K_4V;
163dcd1912dSNitin Gupta break;
164c7d9f77dSNitin Gupta default:
165c7d9f77dSNitin Gupta WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
166c7d9f77dSNitin Gupta }
167c7d9f77dSNitin Gupta
168c7d9f77dSNitin Gupta pte_val(entry) = pte_val(entry) | hugepage_size;
169c7d9f77dSNitin Gupta return entry;
170c7d9f77dSNitin Gupta }
171c7d9f77dSNitin Gupta
hugepage_shift_to_tte(pte_t entry,unsigned int shift)172c7d9f77dSNitin Gupta static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
173c7d9f77dSNitin Gupta {
174c7d9f77dSNitin Gupta if (tlb_type == hypervisor)
175c7d9f77dSNitin Gupta return sun4v_hugepage_shift_to_tte(entry, shift);
176c7d9f77dSNitin Gupta else
177c7d9f77dSNitin Gupta return sun4u_hugepage_shift_to_tte(entry, shift);
178c7d9f77dSNitin Gupta }
179c7d9f77dSNitin Gupta
arch_make_huge_pte(pte_t entry,unsigned int shift,vm_flags_t flags)18079c1c594SChristophe Leroy pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
181c7d9f77dSNitin Gupta {
18274a04967SKhalid Aziz pte_t pte;
183c7d9f77dSNitin Gupta
18416785bd7SAnshuman Khandual entry = pte_mkhuge(entry);
18574a04967SKhalid Aziz pte = hugepage_shift_to_tte(entry, shift);
18674a04967SKhalid Aziz
18774a04967SKhalid Aziz #ifdef CONFIG_SPARC64
18874a04967SKhalid Aziz /* If this vma has ADI enabled on it, turn on TTE.mcd
18974a04967SKhalid Aziz */
19079c1c594SChristophe Leroy if (flags & VM_SPARC_ADI)
19174a04967SKhalid Aziz return pte_mkmcd(pte);
19274a04967SKhalid Aziz else
19374a04967SKhalid Aziz return pte_mknotmcd(pte);
19474a04967SKhalid Aziz #else
19574a04967SKhalid Aziz return pte;
19674a04967SKhalid Aziz #endif
197c7d9f77dSNitin Gupta }
198c7d9f77dSNitin Gupta
sun4v_huge_tte_to_shift(pte_t entry)199c7d9f77dSNitin Gupta static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
200c7d9f77dSNitin Gupta {
201c7d9f77dSNitin Gupta unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
202c7d9f77dSNitin Gupta unsigned int shift;
203c7d9f77dSNitin Gupta
204c7d9f77dSNitin Gupta switch (tte_szbits) {
205df7b2155SNitin Gupta case _PAGE_SZ16GB_4V:
206df7b2155SNitin Gupta shift = HPAGE_16GB_SHIFT;
207df7b2155SNitin Gupta break;
20885b1da7cSNitin Gupta case _PAGE_SZ2GB_4V:
20985b1da7cSNitin Gupta shift = HPAGE_2GB_SHIFT;
21085b1da7cSNitin Gupta break;
211c7d9f77dSNitin Gupta case _PAGE_SZ256MB_4V:
212c7d9f77dSNitin Gupta shift = HPAGE_256MB_SHIFT;
213c7d9f77dSNitin Gupta break;
214c7d9f77dSNitin Gupta case _PAGE_SZ4MB_4V:
215c7d9f77dSNitin Gupta shift = REAL_HPAGE_SHIFT;
216c7d9f77dSNitin Gupta break;
217dcd1912dSNitin Gupta case _PAGE_SZ64K_4V:
218dcd1912dSNitin Gupta shift = HPAGE_64K_SHIFT;
219dcd1912dSNitin Gupta break;
220c7d9f77dSNitin Gupta default:
221c7d9f77dSNitin Gupta shift = PAGE_SHIFT;
222c7d9f77dSNitin Gupta break;
223c7d9f77dSNitin Gupta }
224c7d9f77dSNitin Gupta return shift;
225c7d9f77dSNitin Gupta }
226c7d9f77dSNitin Gupta
sun4u_huge_tte_to_shift(pte_t entry)227c7d9f77dSNitin Gupta static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
228c7d9f77dSNitin Gupta {
229c7d9f77dSNitin Gupta unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
230c7d9f77dSNitin Gupta unsigned int shift;
231c7d9f77dSNitin Gupta
232c7d9f77dSNitin Gupta switch (tte_szbits) {
233c7d9f77dSNitin Gupta case _PAGE_SZ256MB_4U:
234c7d9f77dSNitin Gupta shift = HPAGE_256MB_SHIFT;
235c7d9f77dSNitin Gupta break;
236c7d9f77dSNitin Gupta case _PAGE_SZ4MB_4U:
237c7d9f77dSNitin Gupta shift = REAL_HPAGE_SHIFT;
238c7d9f77dSNitin Gupta break;
239dcd1912dSNitin Gupta case _PAGE_SZ64K_4U:
240dcd1912dSNitin Gupta shift = HPAGE_64K_SHIFT;
241dcd1912dSNitin Gupta break;
242c7d9f77dSNitin Gupta default:
243c7d9f77dSNitin Gupta shift = PAGE_SHIFT;
244c7d9f77dSNitin Gupta break;
245c7d9f77dSNitin Gupta }
246c7d9f77dSNitin Gupta return shift;
247c7d9f77dSNitin Gupta }
248c7d9f77dSNitin Gupta
tte_to_shift(pte_t entry)249e6e4f42eSPeter Zijlstra static unsigned long tte_to_shift(pte_t entry)
250e6e4f42eSPeter Zijlstra {
251e6e4f42eSPeter Zijlstra if (tlb_type == hypervisor)
252e6e4f42eSPeter Zijlstra return sun4v_huge_tte_to_shift(entry);
253e6e4f42eSPeter Zijlstra
254e6e4f42eSPeter Zijlstra return sun4u_huge_tte_to_shift(entry);
255e6e4f42eSPeter Zijlstra }
256e6e4f42eSPeter Zijlstra
huge_tte_to_shift(pte_t entry)257c7d9f77dSNitin Gupta static unsigned int huge_tte_to_shift(pte_t entry)
258c7d9f77dSNitin Gupta {
259e6e4f42eSPeter Zijlstra unsigned long shift = tte_to_shift(entry);
260c7d9f77dSNitin Gupta
261c7d9f77dSNitin Gupta if (shift == PAGE_SHIFT)
262c7d9f77dSNitin Gupta WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
263c7d9f77dSNitin Gupta pte_val(entry));
264c7d9f77dSNitin Gupta
265c7d9f77dSNitin Gupta return shift;
266c7d9f77dSNitin Gupta }
267c7d9f77dSNitin Gupta
huge_tte_to_size(pte_t pte)268c7d9f77dSNitin Gupta static unsigned long huge_tte_to_size(pte_t pte)
269c7d9f77dSNitin Gupta {
270c7d9f77dSNitin Gupta unsigned long size = 1UL << huge_tte_to_shift(pte);
271c7d9f77dSNitin Gupta
272c7d9f77dSNitin Gupta if (size == REAL_HPAGE_SIZE)
273c7d9f77dSNitin Gupta size = HPAGE_SIZE;
274c7d9f77dSNitin Gupta return size;
275c7d9f77dSNitin Gupta }
276c7d9f77dSNitin Gupta
pud_leaf_size(pud_t pud)277e6e4f42eSPeter Zijlstra unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
pmd_leaf_size(pmd_t pmd)278e6e4f42eSPeter Zijlstra unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
pte_leaf_size(pte_t pte)279e6e4f42eSPeter Zijlstra unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
280e6e4f42eSPeter Zijlstra
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)281aec44e0fSPeter Xu pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
28227137e52SSam Ravnborg unsigned long addr, unsigned long sz)
28327137e52SSam Ravnborg {
28427137e52SSam Ravnborg pgd_t *pgd;
2855637bc50SMike Rapoport p4d_t *p4d;
28627137e52SSam Ravnborg pud_t *pud;
287dcd1912dSNitin Gupta pmd_t *pmd;
28827137e52SSam Ravnborg
28927137e52SSam Ravnborg pgd = pgd_offset(mm, addr);
2905637bc50SMike Rapoport p4d = p4d_offset(pgd, addr);
2915637bc50SMike Rapoport pud = pud_alloc(mm, p4d, addr);
292df7b2155SNitin Gupta if (!pud)
293df7b2155SNitin Gupta return NULL;
294df7b2155SNitin Gupta if (sz >= PUD_SIZE)
2954dbe87d5SNitin Gupta return (pte_t *)pud;
296dcd1912dSNitin Gupta pmd = pmd_alloc(mm, pud, addr);
297dcd1912dSNitin Gupta if (!pmd)
298dcd1912dSNitin Gupta return NULL;
29959f1183dSNitin Gupta if (sz >= PMD_SIZE)
3004dbe87d5SNitin Gupta return (pte_t *)pmd;
301c65d09fdSHugh Dickins return pte_alloc_huge(mm, pmd, addr);
30227137e52SSam Ravnborg }
30327137e52SSam Ravnborg
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)3047868a208SPunit Agrawal pte_t *huge_pte_offset(struct mm_struct *mm,
3057868a208SPunit Agrawal unsigned long addr, unsigned long sz)
30627137e52SSam Ravnborg {
30727137e52SSam Ravnborg pgd_t *pgd;
3085637bc50SMike Rapoport p4d_t *p4d;
30927137e52SSam Ravnborg pud_t *pud;
310dcd1912dSNitin Gupta pmd_t *pmd;
31127137e52SSam Ravnborg
31227137e52SSam Ravnborg pgd = pgd_offset(mm, addr);
3134dbe87d5SNitin Gupta if (pgd_none(*pgd))
3144dbe87d5SNitin Gupta return NULL;
3155637bc50SMike Rapoport p4d = p4d_offset(pgd, addr);
3165637bc50SMike Rapoport if (p4d_none(*p4d))
3175637bc50SMike Rapoport return NULL;
3185637bc50SMike Rapoport pud = pud_offset(p4d, addr);
3194dbe87d5SNitin Gupta if (pud_none(*pud))
3204dbe87d5SNitin Gupta return NULL;
321df7b2155SNitin Gupta if (is_hugetlb_pud(*pud))
3224dbe87d5SNitin Gupta return (pte_t *)pud;
323dcd1912dSNitin Gupta pmd = pmd_offset(pud, addr);
3244dbe87d5SNitin Gupta if (pmd_none(*pmd))
3254dbe87d5SNitin Gupta return NULL;
326dcd1912dSNitin Gupta if (is_hugetlb_pmd(*pmd))
3274dbe87d5SNitin Gupta return (pte_t *)pmd;
328c65d09fdSHugh Dickins return pte_offset_huge(pmd, addr);
32927137e52SSam Ravnborg }
33027137e52SSam Ravnborg
__set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry)331*935d4f0cSRyan Roberts void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
33227137e52SSam Ravnborg pte_t *ptep, pte_t entry)
33327137e52SSam Ravnborg {
334df7b2155SNitin Gupta unsigned int nptes, orig_shift, shift;
335df7b2155SNitin Gupta unsigned long i, size;
3367bc3777cSNitin Gupta pte_t orig;
33727137e52SSam Ravnborg
338c7d9f77dSNitin Gupta size = huge_tte_to_size(entry);
339df7b2155SNitin Gupta
340df7b2155SNitin Gupta shift = PAGE_SHIFT;
341df7b2155SNitin Gupta if (size >= PUD_SIZE)
342df7b2155SNitin Gupta shift = PUD_SHIFT;
343df7b2155SNitin Gupta else if (size >= PMD_SIZE)
344df7b2155SNitin Gupta shift = PMD_SHIFT;
345df7b2155SNitin Gupta else
346df7b2155SNitin Gupta shift = PAGE_SHIFT;
347df7b2155SNitin Gupta
348dcd1912dSNitin Gupta nptes = size >> shift;
349c7d9f77dSNitin Gupta
35027137e52SSam Ravnborg if (!pte_present(*ptep) && pte_present(entry))
351c7d9f77dSNitin Gupta mm->context.hugetlb_pte_count += nptes;
35227137e52SSam Ravnborg
353c7d9f77dSNitin Gupta addr &= ~(size - 1);
3547bc3777cSNitin Gupta orig = *ptep;
355ac65e282SNitin Gupta orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
35624e49ee3SNitin Gupta
357c7d9f77dSNitin Gupta for (i = 0; i < nptes; i++)
358dcd1912dSNitin Gupta ptep[i] = __pte(pte_val(entry) + (i << shift));
359c7d9f77dSNitin Gupta
360dcd1912dSNitin Gupta maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
361c7d9f77dSNitin Gupta /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
362c7d9f77dSNitin Gupta if (size == HPAGE_SIZE)
363c7d9f77dSNitin Gupta maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
364dcd1912dSNitin Gupta orig_shift);
36527137e52SSam Ravnborg }
36627137e52SSam Ravnborg
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry,unsigned long sz)367*935d4f0cSRyan Roberts void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
368*935d4f0cSRyan Roberts pte_t *ptep, pte_t entry, unsigned long sz)
369*935d4f0cSRyan Roberts {
370*935d4f0cSRyan Roberts __set_huge_pte_at(mm, addr, ptep, entry);
371*935d4f0cSRyan Roberts }
372*935d4f0cSRyan Roberts
huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)37327137e52SSam Ravnborg pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
37427137e52SSam Ravnborg pte_t *ptep)
37527137e52SSam Ravnborg {
376df7b2155SNitin Gupta unsigned int i, nptes, orig_shift, shift;
377c7d9f77dSNitin Gupta unsigned long size;
37827137e52SSam Ravnborg pte_t entry;
37927137e52SSam Ravnborg
38027137e52SSam Ravnborg entry = *ptep;
381c7d9f77dSNitin Gupta size = huge_tte_to_size(entry);
382f10bb007SNitin Gupta
383df7b2155SNitin Gupta shift = PAGE_SHIFT;
384df7b2155SNitin Gupta if (size >= PUD_SIZE)
385df7b2155SNitin Gupta shift = PUD_SHIFT;
386df7b2155SNitin Gupta else if (size >= PMD_SIZE)
387df7b2155SNitin Gupta shift = PMD_SHIFT;
388df7b2155SNitin Gupta else
389df7b2155SNitin Gupta shift = PAGE_SHIFT;
390df7b2155SNitin Gupta
391df7b2155SNitin Gupta nptes = size >> shift;
392df7b2155SNitin Gupta orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
393c7d9f77dSNitin Gupta
39427137e52SSam Ravnborg if (pte_present(entry))
395c7d9f77dSNitin Gupta mm->context.hugetlb_pte_count -= nptes;
39627137e52SSam Ravnborg
397c7d9f77dSNitin Gupta addr &= ~(size - 1);
398c7d9f77dSNitin Gupta for (i = 0; i < nptes; i++)
399c7d9f77dSNitin Gupta ptep[i] = __pte(0UL);
40027137e52SSam Ravnborg
401df7b2155SNitin Gupta maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
402c7d9f77dSNitin Gupta /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
403c7d9f77dSNitin Gupta if (size == HPAGE_SIZE)
404c7d9f77dSNitin Gupta maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
405df7b2155SNitin Gupta orig_shift);
40624e49ee3SNitin Gupta
40727137e52SSam Ravnborg return entry;
40827137e52SSam Ravnborg }
40927137e52SSam Ravnborg
pmd_huge(pmd_t pmd)41027137e52SSam Ravnborg int pmd_huge(pmd_t pmd)
41127137e52SSam Ravnborg {
4127bc3777cSNitin Gupta return !pmd_none(pmd) &&
4137bc3777cSNitin Gupta (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
41427137e52SSam Ravnborg }
41527137e52SSam Ravnborg
pud_huge(pud_t pud)41627137e52SSam Ravnborg int pud_huge(pud_t pud)
41727137e52SSam Ravnborg {
418df7b2155SNitin Gupta return !pud_none(pud) &&
419df7b2155SNitin Gupta (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
42027137e52SSam Ravnborg }
4217bc3777cSNitin Gupta
hugetlb_free_pte_range(struct mmu_gather * tlb,pmd_t * pmd,unsigned long addr)4227bc3777cSNitin Gupta static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
4237bc3777cSNitin Gupta unsigned long addr)
4247bc3777cSNitin Gupta {
4257bc3777cSNitin Gupta pgtable_t token = pmd_pgtable(*pmd);
4267bc3777cSNitin Gupta
4277bc3777cSNitin Gupta pmd_clear(pmd);
4287bc3777cSNitin Gupta pte_free_tlb(tlb, token, addr);
429c4812909SKirill A. Shutemov mm_dec_nr_ptes(tlb->mm);
4307bc3777cSNitin Gupta }
4317bc3777cSNitin Gupta
hugetlb_free_pmd_range(struct mmu_gather * tlb,pud_t * pud,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)4327bc3777cSNitin Gupta static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
4337bc3777cSNitin Gupta unsigned long addr, unsigned long end,
4347bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling)
4357bc3777cSNitin Gupta {
4367bc3777cSNitin Gupta pmd_t *pmd;
4377bc3777cSNitin Gupta unsigned long next;
4387bc3777cSNitin Gupta unsigned long start;
4397bc3777cSNitin Gupta
4407bc3777cSNitin Gupta start = addr;
4417bc3777cSNitin Gupta pmd = pmd_offset(pud, addr);
4427bc3777cSNitin Gupta do {
4437bc3777cSNitin Gupta next = pmd_addr_end(addr, end);
4447bc3777cSNitin Gupta if (pmd_none(*pmd))
4457bc3777cSNitin Gupta continue;
4467bc3777cSNitin Gupta if (is_hugetlb_pmd(*pmd))
4477bc3777cSNitin Gupta pmd_clear(pmd);
4487bc3777cSNitin Gupta else
4497bc3777cSNitin Gupta hugetlb_free_pte_range(tlb, pmd, addr);
4507bc3777cSNitin Gupta } while (pmd++, addr = next, addr != end);
4517bc3777cSNitin Gupta
4527bc3777cSNitin Gupta start &= PUD_MASK;
4537bc3777cSNitin Gupta if (start < floor)
4547bc3777cSNitin Gupta return;
4557bc3777cSNitin Gupta if (ceiling) {
4567bc3777cSNitin Gupta ceiling &= PUD_MASK;
4577bc3777cSNitin Gupta if (!ceiling)
4587bc3777cSNitin Gupta return;
4597bc3777cSNitin Gupta }
4607bc3777cSNitin Gupta if (end - 1 > ceiling - 1)
4617bc3777cSNitin Gupta return;
4627bc3777cSNitin Gupta
4637bc3777cSNitin Gupta pmd = pmd_offset(pud, start);
4647bc3777cSNitin Gupta pud_clear(pud);
4657bc3777cSNitin Gupta pmd_free_tlb(tlb, pmd, start);
4667bc3777cSNitin Gupta mm_dec_nr_pmds(tlb->mm);
4677bc3777cSNitin Gupta }
4687bc3777cSNitin Gupta
hugetlb_free_pud_range(struct mmu_gather * tlb,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)4695637bc50SMike Rapoport static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
4707bc3777cSNitin Gupta unsigned long addr, unsigned long end,
4717bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling)
4727bc3777cSNitin Gupta {
4737bc3777cSNitin Gupta pud_t *pud;
4747bc3777cSNitin Gupta unsigned long next;
4757bc3777cSNitin Gupta unsigned long start;
4767bc3777cSNitin Gupta
4777bc3777cSNitin Gupta start = addr;
4785637bc50SMike Rapoport pud = pud_offset(p4d, addr);
4797bc3777cSNitin Gupta do {
4807bc3777cSNitin Gupta next = pud_addr_end(addr, end);
4817bc3777cSNitin Gupta if (pud_none_or_clear_bad(pud))
4827bc3777cSNitin Gupta continue;
483df7b2155SNitin Gupta if (is_hugetlb_pud(*pud))
484df7b2155SNitin Gupta pud_clear(pud);
485df7b2155SNitin Gupta else
4867bc3777cSNitin Gupta hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
4877bc3777cSNitin Gupta ceiling);
4887bc3777cSNitin Gupta } while (pud++, addr = next, addr != end);
4897bc3777cSNitin Gupta
4907bc3777cSNitin Gupta start &= PGDIR_MASK;
4917bc3777cSNitin Gupta if (start < floor)
4927bc3777cSNitin Gupta return;
4937bc3777cSNitin Gupta if (ceiling) {
4947bc3777cSNitin Gupta ceiling &= PGDIR_MASK;
4957bc3777cSNitin Gupta if (!ceiling)
4967bc3777cSNitin Gupta return;
4977bc3777cSNitin Gupta }
4987bc3777cSNitin Gupta if (end - 1 > ceiling - 1)
4997bc3777cSNitin Gupta return;
5007bc3777cSNitin Gupta
5015637bc50SMike Rapoport pud = pud_offset(p4d, start);
5025637bc50SMike Rapoport p4d_clear(p4d);
5037bc3777cSNitin Gupta pud_free_tlb(tlb, pud, start);
504b4e98d9aSKirill A. Shutemov mm_dec_nr_puds(tlb->mm);
5057bc3777cSNitin Gupta }
5067bc3777cSNitin Gupta
hugetlb_free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)5077bc3777cSNitin Gupta void hugetlb_free_pgd_range(struct mmu_gather *tlb,
5087bc3777cSNitin Gupta unsigned long addr, unsigned long end,
5097bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling)
5107bc3777cSNitin Gupta {
5117bc3777cSNitin Gupta pgd_t *pgd;
5125637bc50SMike Rapoport p4d_t *p4d;
5137bc3777cSNitin Gupta unsigned long next;
5147bc3777cSNitin Gupta
515544f8f93SNitin Gupta addr &= PMD_MASK;
516544f8f93SNitin Gupta if (addr < floor) {
517544f8f93SNitin Gupta addr += PMD_SIZE;
518544f8f93SNitin Gupta if (!addr)
519544f8f93SNitin Gupta return;
520544f8f93SNitin Gupta }
521544f8f93SNitin Gupta if (ceiling) {
522544f8f93SNitin Gupta ceiling &= PMD_MASK;
523544f8f93SNitin Gupta if (!ceiling)
524544f8f93SNitin Gupta return;
525544f8f93SNitin Gupta }
526544f8f93SNitin Gupta if (end - 1 > ceiling - 1)
527544f8f93SNitin Gupta end -= PMD_SIZE;
528544f8f93SNitin Gupta if (addr > end - 1)
529544f8f93SNitin Gupta return;
530544f8f93SNitin Gupta
5317bc3777cSNitin Gupta pgd = pgd_offset(tlb->mm, addr);
5325637bc50SMike Rapoport p4d = p4d_offset(pgd, addr);
5337bc3777cSNitin Gupta do {
5345637bc50SMike Rapoport next = p4d_addr_end(addr, end);
5355637bc50SMike Rapoport if (p4d_none_or_clear_bad(p4d))
5367bc3777cSNitin Gupta continue;
5375637bc50SMike Rapoport hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
5385637bc50SMike Rapoport } while (p4d++, addr = next, addr != end);
5397bc3777cSNitin Gupta }
540