127137e52SSam Ravnborg /* 227137e52SSam Ravnborg * SPARC64 Huge TLB page support. 327137e52SSam Ravnborg * 427137e52SSam Ravnborg * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) 527137e52SSam Ravnborg */ 627137e52SSam Ravnborg 727137e52SSam Ravnborg #include <linux/fs.h> 827137e52SSam Ravnborg #include <linux/mm.h> 901042607SIngo Molnar #include <linux/sched/mm.h> 1027137e52SSam Ravnborg #include <linux/hugetlb.h> 1127137e52SSam Ravnborg #include <linux/pagemap.h> 1227137e52SSam Ravnborg #include <linux/sysctl.h> 1327137e52SSam Ravnborg 1427137e52SSam Ravnborg #include <asm/mman.h> 1527137e52SSam Ravnborg #include <asm/pgalloc.h> 167bc3777cSNitin Gupta #include <asm/pgtable.h> 1727137e52SSam Ravnborg #include <asm/tlb.h> 1827137e52SSam Ravnborg #include <asm/tlbflush.h> 1927137e52SSam Ravnborg #include <asm/cacheflush.h> 2027137e52SSam Ravnborg #include <asm/mmu_context.h> 2127137e52SSam Ravnborg 2227137e52SSam Ravnborg /* Slightly simplified from the non-hugepage variant because by 2327137e52SSam Ravnborg * definition we don't have to worry about any page coloring stuff 2427137e52SSam Ravnborg */ 2527137e52SSam Ravnborg 2627137e52SSam Ravnborg static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, 2727137e52SSam Ravnborg unsigned long addr, 2827137e52SSam Ravnborg unsigned long len, 2927137e52SSam Ravnborg unsigned long pgoff, 3027137e52SSam Ravnborg unsigned long flags) 3127137e52SSam Ravnborg { 32c7d9f77dSNitin Gupta struct hstate *h = hstate_file(filp); 3327137e52SSam Ravnborg unsigned long task_size = TASK_SIZE; 342aea28b9SMichel Lespinasse struct vm_unmapped_area_info info; 3527137e52SSam Ravnborg 3627137e52SSam Ravnborg if (test_thread_flag(TIF_32BIT)) 3727137e52SSam Ravnborg task_size = STACK_TOP32; 3827137e52SSam Ravnborg 392aea28b9SMichel Lespinasse info.flags = 0; 402aea28b9SMichel Lespinasse info.length = len; 412aea28b9SMichel Lespinasse info.low_limit = TASK_UNMAPPED_BASE; 422aea28b9SMichel Lespinasse info.high_limit = min(task_size, VA_EXCLUDE_START); 43c7d9f77dSNitin Gupta info.align_mask = PAGE_MASK & ~huge_page_mask(h); 442aea28b9SMichel Lespinasse info.align_offset = 0; 452aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info); 462aea28b9SMichel Lespinasse 472aea28b9SMichel Lespinasse if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { 482aea28b9SMichel Lespinasse VM_BUG_ON(addr != -ENOMEM); 492aea28b9SMichel Lespinasse info.low_limit = VA_EXCLUDE_END; 502aea28b9SMichel Lespinasse info.high_limit = task_size; 512aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info); 5227137e52SSam Ravnborg } 5327137e52SSam Ravnborg 5427137e52SSam Ravnborg return addr; 5527137e52SSam Ravnborg } 5627137e52SSam Ravnborg 5727137e52SSam Ravnborg static unsigned long 5827137e52SSam Ravnborg hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 5927137e52SSam Ravnborg const unsigned long len, 6027137e52SSam Ravnborg const unsigned long pgoff, 6127137e52SSam Ravnborg const unsigned long flags) 6227137e52SSam Ravnborg { 63c7d9f77dSNitin Gupta struct hstate *h = hstate_file(filp); 6427137e52SSam Ravnborg struct mm_struct *mm = current->mm; 6527137e52SSam Ravnborg unsigned long addr = addr0; 662aea28b9SMichel Lespinasse struct vm_unmapped_area_info info; 6727137e52SSam Ravnborg 6827137e52SSam Ravnborg /* This should only ever run for 32-bit processes. */ 6927137e52SSam Ravnborg BUG_ON(!test_thread_flag(TIF_32BIT)); 7027137e52SSam Ravnborg 712aea28b9SMichel Lespinasse info.flags = VM_UNMAPPED_AREA_TOPDOWN; 722aea28b9SMichel Lespinasse info.length = len; 732aea28b9SMichel Lespinasse info.low_limit = PAGE_SIZE; 742aea28b9SMichel Lespinasse info.high_limit = mm->mmap_base; 75c7d9f77dSNitin Gupta info.align_mask = PAGE_MASK & ~huge_page_mask(h); 762aea28b9SMichel Lespinasse info.align_offset = 0; 772aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info); 7827137e52SSam Ravnborg 7927137e52SSam Ravnborg /* 8027137e52SSam Ravnborg * A failed mmap() very likely causes application failure, 8127137e52SSam Ravnborg * so fall back to the bottom-up function here. This scenario 8227137e52SSam Ravnborg * can happen with large stack limits and large mmap() 8327137e52SSam Ravnborg * allocations. 8427137e52SSam Ravnborg */ 852aea28b9SMichel Lespinasse if (addr & ~PAGE_MASK) { 862aea28b9SMichel Lespinasse VM_BUG_ON(addr != -ENOMEM); 872aea28b9SMichel Lespinasse info.flags = 0; 882aea28b9SMichel Lespinasse info.low_limit = TASK_UNMAPPED_BASE; 892aea28b9SMichel Lespinasse info.high_limit = STACK_TOP32; 902aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info); 912aea28b9SMichel Lespinasse } 9227137e52SSam Ravnborg 9327137e52SSam Ravnborg return addr; 9427137e52SSam Ravnborg } 9527137e52SSam Ravnborg 9627137e52SSam Ravnborg unsigned long 9727137e52SSam Ravnborg hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 9827137e52SSam Ravnborg unsigned long len, unsigned long pgoff, unsigned long flags) 9927137e52SSam Ravnborg { 100c7d9f77dSNitin Gupta struct hstate *h = hstate_file(file); 10127137e52SSam Ravnborg struct mm_struct *mm = current->mm; 10227137e52SSam Ravnborg struct vm_area_struct *vma; 10327137e52SSam Ravnborg unsigned long task_size = TASK_SIZE; 10427137e52SSam Ravnborg 10527137e52SSam Ravnborg if (test_thread_flag(TIF_32BIT)) 10627137e52SSam Ravnborg task_size = STACK_TOP32; 10727137e52SSam Ravnborg 108c7d9f77dSNitin Gupta if (len & ~huge_page_mask(h)) 10927137e52SSam Ravnborg return -EINVAL; 11027137e52SSam Ravnborg if (len > task_size) 11127137e52SSam Ravnborg return -ENOMEM; 11227137e52SSam Ravnborg 11327137e52SSam Ravnborg if (flags & MAP_FIXED) { 11427137e52SSam Ravnborg if (prepare_hugepage_range(file, addr, len)) 11527137e52SSam Ravnborg return -EINVAL; 11627137e52SSam Ravnborg return addr; 11727137e52SSam Ravnborg } 11827137e52SSam Ravnborg 11927137e52SSam Ravnborg if (addr) { 120c7d9f77dSNitin Gupta addr = ALIGN(addr, huge_page_size(h)); 12127137e52SSam Ravnborg vma = find_vma(mm, addr); 12227137e52SSam Ravnborg if (task_size - len >= addr && 12327137e52SSam Ravnborg (!vma || addr + len <= vma->vm_start)) 12427137e52SSam Ravnborg return addr; 12527137e52SSam Ravnborg } 12627137e52SSam Ravnborg if (mm->get_unmapped_area == arch_get_unmapped_area) 12727137e52SSam Ravnborg return hugetlb_get_unmapped_area_bottomup(file, addr, len, 12827137e52SSam Ravnborg pgoff, flags); 12927137e52SSam Ravnborg else 13027137e52SSam Ravnborg return hugetlb_get_unmapped_area_topdown(file, addr, len, 13127137e52SSam Ravnborg pgoff, flags); 13227137e52SSam Ravnborg } 13327137e52SSam Ravnborg 134c7d9f77dSNitin Gupta static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) 135c7d9f77dSNitin Gupta { 136c7d9f77dSNitin Gupta return entry; 137c7d9f77dSNitin Gupta } 138c7d9f77dSNitin Gupta 139c7d9f77dSNitin Gupta static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) 140c7d9f77dSNitin Gupta { 141c7d9f77dSNitin Gupta unsigned long hugepage_size = _PAGE_SZ4MB_4V; 142c7d9f77dSNitin Gupta 143c7d9f77dSNitin Gupta pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 144c7d9f77dSNitin Gupta 145c7d9f77dSNitin Gupta switch (shift) { 146*85b1da7cSNitin Gupta case HPAGE_2GB_SHIFT: 147*85b1da7cSNitin Gupta hugepage_size = _PAGE_SZ2GB_4V; 148*85b1da7cSNitin Gupta pte_val(entry) |= _PAGE_PMD_HUGE; 149*85b1da7cSNitin Gupta break; 150c7d9f77dSNitin Gupta case HPAGE_256MB_SHIFT: 151c7d9f77dSNitin Gupta hugepage_size = _PAGE_SZ256MB_4V; 152c7d9f77dSNitin Gupta pte_val(entry) |= _PAGE_PMD_HUGE; 153c7d9f77dSNitin Gupta break; 154c7d9f77dSNitin Gupta case HPAGE_SHIFT: 155c7d9f77dSNitin Gupta pte_val(entry) |= _PAGE_PMD_HUGE; 156c7d9f77dSNitin Gupta break; 157dcd1912dSNitin Gupta case HPAGE_64K_SHIFT: 158dcd1912dSNitin Gupta hugepage_size = _PAGE_SZ64K_4V; 159dcd1912dSNitin Gupta break; 160c7d9f77dSNitin Gupta default: 161c7d9f77dSNitin Gupta WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); 162c7d9f77dSNitin Gupta } 163c7d9f77dSNitin Gupta 164c7d9f77dSNitin Gupta pte_val(entry) = pte_val(entry) | hugepage_size; 165c7d9f77dSNitin Gupta return entry; 166c7d9f77dSNitin Gupta } 167c7d9f77dSNitin Gupta 168c7d9f77dSNitin Gupta static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift) 169c7d9f77dSNitin Gupta { 170c7d9f77dSNitin Gupta if (tlb_type == hypervisor) 171c7d9f77dSNitin Gupta return sun4v_hugepage_shift_to_tte(entry, shift); 172c7d9f77dSNitin Gupta else 173c7d9f77dSNitin Gupta return sun4u_hugepage_shift_to_tte(entry, shift); 174c7d9f77dSNitin Gupta } 175c7d9f77dSNitin Gupta 176c7d9f77dSNitin Gupta pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 177c7d9f77dSNitin Gupta struct page *page, int writeable) 178c7d9f77dSNitin Gupta { 179c7d9f77dSNitin Gupta unsigned int shift = huge_page_shift(hstate_vma(vma)); 180c7d9f77dSNitin Gupta 181c7d9f77dSNitin Gupta return hugepage_shift_to_tte(entry, shift); 182c7d9f77dSNitin Gupta } 183c7d9f77dSNitin Gupta 184c7d9f77dSNitin Gupta static unsigned int sun4v_huge_tte_to_shift(pte_t entry) 185c7d9f77dSNitin Gupta { 186c7d9f77dSNitin Gupta unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V; 187c7d9f77dSNitin Gupta unsigned int shift; 188c7d9f77dSNitin Gupta 189c7d9f77dSNitin Gupta switch (tte_szbits) { 190*85b1da7cSNitin Gupta case _PAGE_SZ2GB_4V: 191*85b1da7cSNitin Gupta shift = HPAGE_2GB_SHIFT; 192*85b1da7cSNitin Gupta break; 193c7d9f77dSNitin Gupta case _PAGE_SZ256MB_4V: 194c7d9f77dSNitin Gupta shift = HPAGE_256MB_SHIFT; 195c7d9f77dSNitin Gupta break; 196c7d9f77dSNitin Gupta case _PAGE_SZ4MB_4V: 197c7d9f77dSNitin Gupta shift = REAL_HPAGE_SHIFT; 198c7d9f77dSNitin Gupta break; 199dcd1912dSNitin Gupta case _PAGE_SZ64K_4V: 200dcd1912dSNitin Gupta shift = HPAGE_64K_SHIFT; 201dcd1912dSNitin Gupta break; 202c7d9f77dSNitin Gupta default: 203c7d9f77dSNitin Gupta shift = PAGE_SHIFT; 204c7d9f77dSNitin Gupta break; 205c7d9f77dSNitin Gupta } 206c7d9f77dSNitin Gupta return shift; 207c7d9f77dSNitin Gupta } 208c7d9f77dSNitin Gupta 209c7d9f77dSNitin Gupta static unsigned int sun4u_huge_tte_to_shift(pte_t entry) 210c7d9f77dSNitin Gupta { 211c7d9f77dSNitin Gupta unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U; 212c7d9f77dSNitin Gupta unsigned int shift; 213c7d9f77dSNitin Gupta 214c7d9f77dSNitin Gupta switch (tte_szbits) { 215c7d9f77dSNitin Gupta case _PAGE_SZ256MB_4U: 216c7d9f77dSNitin Gupta shift = HPAGE_256MB_SHIFT; 217c7d9f77dSNitin Gupta break; 218c7d9f77dSNitin Gupta case _PAGE_SZ4MB_4U: 219c7d9f77dSNitin Gupta shift = REAL_HPAGE_SHIFT; 220c7d9f77dSNitin Gupta break; 221dcd1912dSNitin Gupta case _PAGE_SZ64K_4U: 222dcd1912dSNitin Gupta shift = HPAGE_64K_SHIFT; 223dcd1912dSNitin Gupta break; 224c7d9f77dSNitin Gupta default: 225c7d9f77dSNitin Gupta shift = PAGE_SHIFT; 226c7d9f77dSNitin Gupta break; 227c7d9f77dSNitin Gupta } 228c7d9f77dSNitin Gupta return shift; 229c7d9f77dSNitin Gupta } 230c7d9f77dSNitin Gupta 231c7d9f77dSNitin Gupta static unsigned int huge_tte_to_shift(pte_t entry) 232c7d9f77dSNitin Gupta { 233c7d9f77dSNitin Gupta unsigned long shift; 234c7d9f77dSNitin Gupta 235c7d9f77dSNitin Gupta if (tlb_type == hypervisor) 236c7d9f77dSNitin Gupta shift = sun4v_huge_tte_to_shift(entry); 237c7d9f77dSNitin Gupta else 238c7d9f77dSNitin Gupta shift = sun4u_huge_tte_to_shift(entry); 239c7d9f77dSNitin Gupta 240c7d9f77dSNitin Gupta if (shift == PAGE_SHIFT) 241c7d9f77dSNitin Gupta WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n", 242c7d9f77dSNitin Gupta pte_val(entry)); 243c7d9f77dSNitin Gupta 244c7d9f77dSNitin Gupta return shift; 245c7d9f77dSNitin Gupta } 246c7d9f77dSNitin Gupta 247c7d9f77dSNitin Gupta static unsigned long huge_tte_to_size(pte_t pte) 248c7d9f77dSNitin Gupta { 249c7d9f77dSNitin Gupta unsigned long size = 1UL << huge_tte_to_shift(pte); 250c7d9f77dSNitin Gupta 251c7d9f77dSNitin Gupta if (size == REAL_HPAGE_SIZE) 252c7d9f77dSNitin Gupta size = HPAGE_SIZE; 253c7d9f77dSNitin Gupta return size; 254c7d9f77dSNitin Gupta } 255c7d9f77dSNitin Gupta 25627137e52SSam Ravnborg pte_t *huge_pte_alloc(struct mm_struct *mm, 25727137e52SSam Ravnborg unsigned long addr, unsigned long sz) 25827137e52SSam Ravnborg { 25927137e52SSam Ravnborg pgd_t *pgd; 26027137e52SSam Ravnborg pud_t *pud; 261dcd1912dSNitin Gupta pmd_t *pmd; 26227137e52SSam Ravnborg pte_t *pte = NULL; 26327137e52SSam Ravnborg 26427137e52SSam Ravnborg pgd = pgd_offset(mm, addr); 26527137e52SSam Ravnborg pud = pud_alloc(mm, pgd, addr); 266dcd1912dSNitin Gupta if (pud) { 267dcd1912dSNitin Gupta pmd = pmd_alloc(mm, pud, addr); 268dcd1912dSNitin Gupta if (!pmd) 269dcd1912dSNitin Gupta return NULL; 270dcd1912dSNitin Gupta 27159f1183dSNitin Gupta if (sz >= PMD_SIZE) 272dcd1912dSNitin Gupta pte = (pte_t *)pmd; 273dcd1912dSNitin Gupta else 274dcd1912dSNitin Gupta pte = pte_alloc_map(mm, pmd, addr); 275dcd1912dSNitin Gupta } 2767bc3777cSNitin Gupta 27727137e52SSam Ravnborg return pte; 27827137e52SSam Ravnborg } 27927137e52SSam Ravnborg 28027137e52SSam Ravnborg pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 28127137e52SSam Ravnborg { 28227137e52SSam Ravnborg pgd_t *pgd; 28327137e52SSam Ravnborg pud_t *pud; 284dcd1912dSNitin Gupta pmd_t *pmd; 28527137e52SSam Ravnborg pte_t *pte = NULL; 28627137e52SSam Ravnborg 28727137e52SSam Ravnborg pgd = pgd_offset(mm, addr); 28827137e52SSam Ravnborg if (!pgd_none(*pgd)) { 28927137e52SSam Ravnborg pud = pud_offset(pgd, addr); 290dcd1912dSNitin Gupta if (!pud_none(*pud)) { 291dcd1912dSNitin Gupta pmd = pmd_offset(pud, addr); 292dcd1912dSNitin Gupta if (!pmd_none(*pmd)) { 293dcd1912dSNitin Gupta if (is_hugetlb_pmd(*pmd)) 294dcd1912dSNitin Gupta pte = (pte_t *)pmd; 295dcd1912dSNitin Gupta else 296dcd1912dSNitin Gupta pte = pte_offset_map(pmd, addr); 29727137e52SSam Ravnborg } 298dcd1912dSNitin Gupta } 299dcd1912dSNitin Gupta } 300dcd1912dSNitin Gupta 30127137e52SSam Ravnborg return pte; 30227137e52SSam Ravnborg } 30327137e52SSam Ravnborg 30427137e52SSam Ravnborg void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 30527137e52SSam Ravnborg pte_t *ptep, pte_t entry) 30627137e52SSam Ravnborg { 307dcd1912dSNitin Gupta unsigned int i, nptes, orig_shift, shift; 308c7d9f77dSNitin Gupta unsigned long size; 3097bc3777cSNitin Gupta pte_t orig; 31027137e52SSam Ravnborg 311c7d9f77dSNitin Gupta size = huge_tte_to_size(entry); 312dcd1912dSNitin Gupta shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; 313dcd1912dSNitin Gupta nptes = size >> shift; 314c7d9f77dSNitin Gupta 31527137e52SSam Ravnborg if (!pte_present(*ptep) && pte_present(entry)) 316c7d9f77dSNitin Gupta mm->context.hugetlb_pte_count += nptes; 31727137e52SSam Ravnborg 318c7d9f77dSNitin Gupta addr &= ~(size - 1); 3197bc3777cSNitin Gupta orig = *ptep; 320ac65e282SNitin Gupta orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig); 32124e49ee3SNitin Gupta 322c7d9f77dSNitin Gupta for (i = 0; i < nptes; i++) 323dcd1912dSNitin Gupta ptep[i] = __pte(pte_val(entry) + (i << shift)); 324c7d9f77dSNitin Gupta 325dcd1912dSNitin Gupta maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift); 326c7d9f77dSNitin Gupta /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ 327c7d9f77dSNitin Gupta if (size == HPAGE_SIZE) 328c7d9f77dSNitin Gupta maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, 329dcd1912dSNitin Gupta orig_shift); 33027137e52SSam Ravnborg } 33127137e52SSam Ravnborg 33227137e52SSam Ravnborg pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 33327137e52SSam Ravnborg pte_t *ptep) 33427137e52SSam Ravnborg { 335c7d9f77dSNitin Gupta unsigned int i, nptes, hugepage_shift; 336c7d9f77dSNitin Gupta unsigned long size; 33727137e52SSam Ravnborg pte_t entry; 33827137e52SSam Ravnborg 33927137e52SSam Ravnborg entry = *ptep; 340c7d9f77dSNitin Gupta size = huge_tte_to_size(entry); 341dcd1912dSNitin Gupta if (size >= HPAGE_SIZE) 342c7d9f77dSNitin Gupta nptes = size >> PMD_SHIFT; 343dcd1912dSNitin Gupta else 344dcd1912dSNitin Gupta nptes = size >> PAGE_SHIFT; 345dcd1912dSNitin Gupta 346ac65e282SNitin Gupta hugepage_shift = pte_none(entry) ? PAGE_SHIFT : 347ac65e282SNitin Gupta huge_tte_to_shift(entry); 348c7d9f77dSNitin Gupta 34927137e52SSam Ravnborg if (pte_present(entry)) 350c7d9f77dSNitin Gupta mm->context.hugetlb_pte_count -= nptes; 35127137e52SSam Ravnborg 352c7d9f77dSNitin Gupta addr &= ~(size - 1); 353c7d9f77dSNitin Gupta for (i = 0; i < nptes; i++) 354c7d9f77dSNitin Gupta ptep[i] = __pte(0UL); 35527137e52SSam Ravnborg 356c7d9f77dSNitin Gupta maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); 357c7d9f77dSNitin Gupta /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ 358c7d9f77dSNitin Gupta if (size == HPAGE_SIZE) 359c7d9f77dSNitin Gupta maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, 360c7d9f77dSNitin Gupta hugepage_shift); 36124e49ee3SNitin Gupta 36227137e52SSam Ravnborg return entry; 36327137e52SSam Ravnborg } 36427137e52SSam Ravnborg 36527137e52SSam Ravnborg int pmd_huge(pmd_t pmd) 36627137e52SSam Ravnborg { 3677bc3777cSNitin Gupta return !pmd_none(pmd) && 3687bc3777cSNitin Gupta (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; 36927137e52SSam Ravnborg } 37027137e52SSam Ravnborg 37127137e52SSam Ravnborg int pud_huge(pud_t pud) 37227137e52SSam Ravnborg { 37327137e52SSam Ravnborg return 0; 37427137e52SSam Ravnborg } 3757bc3777cSNitin Gupta 3767bc3777cSNitin Gupta static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 3777bc3777cSNitin Gupta unsigned long addr) 3787bc3777cSNitin Gupta { 3797bc3777cSNitin Gupta pgtable_t token = pmd_pgtable(*pmd); 3807bc3777cSNitin Gupta 3817bc3777cSNitin Gupta pmd_clear(pmd); 3827bc3777cSNitin Gupta pte_free_tlb(tlb, token, addr); 3837bc3777cSNitin Gupta atomic_long_dec(&tlb->mm->nr_ptes); 3847bc3777cSNitin Gupta } 3857bc3777cSNitin Gupta 3867bc3777cSNitin Gupta static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 3877bc3777cSNitin Gupta unsigned long addr, unsigned long end, 3887bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling) 3897bc3777cSNitin Gupta { 3907bc3777cSNitin Gupta pmd_t *pmd; 3917bc3777cSNitin Gupta unsigned long next; 3927bc3777cSNitin Gupta unsigned long start; 3937bc3777cSNitin Gupta 3947bc3777cSNitin Gupta start = addr; 3957bc3777cSNitin Gupta pmd = pmd_offset(pud, addr); 3967bc3777cSNitin Gupta do { 3977bc3777cSNitin Gupta next = pmd_addr_end(addr, end); 3987bc3777cSNitin Gupta if (pmd_none(*pmd)) 3997bc3777cSNitin Gupta continue; 4007bc3777cSNitin Gupta if (is_hugetlb_pmd(*pmd)) 4017bc3777cSNitin Gupta pmd_clear(pmd); 4027bc3777cSNitin Gupta else 4037bc3777cSNitin Gupta hugetlb_free_pte_range(tlb, pmd, addr); 4047bc3777cSNitin Gupta } while (pmd++, addr = next, addr != end); 4057bc3777cSNitin Gupta 4067bc3777cSNitin Gupta start &= PUD_MASK; 4077bc3777cSNitin Gupta if (start < floor) 4087bc3777cSNitin Gupta return; 4097bc3777cSNitin Gupta if (ceiling) { 4107bc3777cSNitin Gupta ceiling &= PUD_MASK; 4117bc3777cSNitin Gupta if (!ceiling) 4127bc3777cSNitin Gupta return; 4137bc3777cSNitin Gupta } 4147bc3777cSNitin Gupta if (end - 1 > ceiling - 1) 4157bc3777cSNitin Gupta return; 4167bc3777cSNitin Gupta 4177bc3777cSNitin Gupta pmd = pmd_offset(pud, start); 4187bc3777cSNitin Gupta pud_clear(pud); 4197bc3777cSNitin Gupta pmd_free_tlb(tlb, pmd, start); 4207bc3777cSNitin Gupta mm_dec_nr_pmds(tlb->mm); 4217bc3777cSNitin Gupta } 4227bc3777cSNitin Gupta 4237bc3777cSNitin Gupta static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 4247bc3777cSNitin Gupta unsigned long addr, unsigned long end, 4257bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling) 4267bc3777cSNitin Gupta { 4277bc3777cSNitin Gupta pud_t *pud; 4287bc3777cSNitin Gupta unsigned long next; 4297bc3777cSNitin Gupta unsigned long start; 4307bc3777cSNitin Gupta 4317bc3777cSNitin Gupta start = addr; 4327bc3777cSNitin Gupta pud = pud_offset(pgd, addr); 4337bc3777cSNitin Gupta do { 4347bc3777cSNitin Gupta next = pud_addr_end(addr, end); 4357bc3777cSNitin Gupta if (pud_none_or_clear_bad(pud)) 4367bc3777cSNitin Gupta continue; 4377bc3777cSNitin Gupta hugetlb_free_pmd_range(tlb, pud, addr, next, floor, 4387bc3777cSNitin Gupta ceiling); 4397bc3777cSNitin Gupta } while (pud++, addr = next, addr != end); 4407bc3777cSNitin Gupta 4417bc3777cSNitin Gupta start &= PGDIR_MASK; 4427bc3777cSNitin Gupta if (start < floor) 4437bc3777cSNitin Gupta return; 4447bc3777cSNitin Gupta if (ceiling) { 4457bc3777cSNitin Gupta ceiling &= PGDIR_MASK; 4467bc3777cSNitin Gupta if (!ceiling) 4477bc3777cSNitin Gupta return; 4487bc3777cSNitin Gupta } 4497bc3777cSNitin Gupta if (end - 1 > ceiling - 1) 4507bc3777cSNitin Gupta return; 4517bc3777cSNitin Gupta 4527bc3777cSNitin Gupta pud = pud_offset(pgd, start); 4537bc3777cSNitin Gupta pgd_clear(pgd); 4547bc3777cSNitin Gupta pud_free_tlb(tlb, pud, start); 4557bc3777cSNitin Gupta } 4567bc3777cSNitin Gupta 4577bc3777cSNitin Gupta void hugetlb_free_pgd_range(struct mmu_gather *tlb, 4587bc3777cSNitin Gupta unsigned long addr, unsigned long end, 4597bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling) 4607bc3777cSNitin Gupta { 4617bc3777cSNitin Gupta pgd_t *pgd; 4627bc3777cSNitin Gupta unsigned long next; 4637bc3777cSNitin Gupta 4647bc3777cSNitin Gupta pgd = pgd_offset(tlb->mm, addr); 4657bc3777cSNitin Gupta do { 4667bc3777cSNitin Gupta next = pgd_addr_end(addr, end); 4677bc3777cSNitin Gupta if (pgd_none_or_clear_bad(pgd)) 4687bc3777cSNitin Gupta continue; 4697bc3777cSNitin Gupta hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); 4707bc3777cSNitin Gupta } while (pgd++, addr = next, addr != end); 4717bc3777cSNitin Gupta } 472