127137e52SSam Ravnborg /* 227137e52SSam Ravnborg * SPARC64 Huge TLB page support. 327137e52SSam Ravnborg * 427137e52SSam Ravnborg * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net) 527137e52SSam Ravnborg */ 627137e52SSam Ravnborg 727137e52SSam Ravnborg #include <linux/fs.h> 827137e52SSam Ravnborg #include <linux/mm.h> 927137e52SSam Ravnborg #include <linux/hugetlb.h> 1027137e52SSam Ravnborg #include <linux/pagemap.h> 1127137e52SSam Ravnborg #include <linux/sysctl.h> 1227137e52SSam Ravnborg 1327137e52SSam Ravnborg #include <asm/mman.h> 1427137e52SSam Ravnborg #include <asm/pgalloc.h> 157bc3777cSNitin Gupta #include <asm/pgtable.h> 1627137e52SSam Ravnborg #include <asm/tlb.h> 1727137e52SSam Ravnborg #include <asm/tlbflush.h> 1827137e52SSam Ravnborg #include <asm/cacheflush.h> 1927137e52SSam Ravnborg #include <asm/mmu_context.h> 2027137e52SSam Ravnborg 2127137e52SSam Ravnborg /* Slightly simplified from the non-hugepage variant because by 2227137e52SSam Ravnborg * definition we don't have to worry about any page coloring stuff 2327137e52SSam Ravnborg */ 2427137e52SSam Ravnborg 2527137e52SSam Ravnborg static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp, 2627137e52SSam Ravnborg unsigned long addr, 2727137e52SSam Ravnborg unsigned long len, 2827137e52SSam Ravnborg unsigned long pgoff, 2927137e52SSam Ravnborg unsigned long flags) 3027137e52SSam Ravnborg { 31c7d9f77dSNitin Gupta struct hstate *h = hstate_file(filp); 3227137e52SSam Ravnborg unsigned long task_size = TASK_SIZE; 332aea28b9SMichel Lespinasse struct vm_unmapped_area_info info; 3427137e52SSam Ravnborg 3527137e52SSam Ravnborg if (test_thread_flag(TIF_32BIT)) 3627137e52SSam Ravnborg task_size = STACK_TOP32; 3727137e52SSam Ravnborg 382aea28b9SMichel Lespinasse info.flags = 0; 392aea28b9SMichel Lespinasse info.length = len; 402aea28b9SMichel Lespinasse info.low_limit = TASK_UNMAPPED_BASE; 412aea28b9SMichel Lespinasse info.high_limit = min(task_size, VA_EXCLUDE_START); 42c7d9f77dSNitin Gupta info.align_mask = PAGE_MASK & ~huge_page_mask(h); 432aea28b9SMichel Lespinasse info.align_offset = 0; 442aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info); 452aea28b9SMichel Lespinasse 462aea28b9SMichel Lespinasse if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { 472aea28b9SMichel Lespinasse VM_BUG_ON(addr != -ENOMEM); 482aea28b9SMichel Lespinasse info.low_limit = VA_EXCLUDE_END; 492aea28b9SMichel Lespinasse info.high_limit = task_size; 502aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info); 5127137e52SSam Ravnborg } 5227137e52SSam Ravnborg 5327137e52SSam Ravnborg return addr; 5427137e52SSam Ravnborg } 5527137e52SSam Ravnborg 5627137e52SSam Ravnborg static unsigned long 5727137e52SSam Ravnborg hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 5827137e52SSam Ravnborg const unsigned long len, 5927137e52SSam Ravnborg const unsigned long pgoff, 6027137e52SSam Ravnborg const unsigned long flags) 6127137e52SSam Ravnborg { 62c7d9f77dSNitin Gupta struct hstate *h = hstate_file(filp); 6327137e52SSam Ravnborg struct mm_struct *mm = current->mm; 6427137e52SSam Ravnborg unsigned long addr = addr0; 652aea28b9SMichel Lespinasse struct vm_unmapped_area_info info; 6627137e52SSam Ravnborg 6727137e52SSam Ravnborg /* This should only ever run for 32-bit processes. */ 6827137e52SSam Ravnborg BUG_ON(!test_thread_flag(TIF_32BIT)); 6927137e52SSam Ravnborg 702aea28b9SMichel Lespinasse info.flags = VM_UNMAPPED_AREA_TOPDOWN; 712aea28b9SMichel Lespinasse info.length = len; 722aea28b9SMichel Lespinasse info.low_limit = PAGE_SIZE; 732aea28b9SMichel Lespinasse info.high_limit = mm->mmap_base; 74c7d9f77dSNitin Gupta info.align_mask = PAGE_MASK & ~huge_page_mask(h); 752aea28b9SMichel Lespinasse info.align_offset = 0; 762aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info); 7727137e52SSam Ravnborg 7827137e52SSam Ravnborg /* 7927137e52SSam Ravnborg * A failed mmap() very likely causes application failure, 8027137e52SSam Ravnborg * so fall back to the bottom-up function here. This scenario 8127137e52SSam Ravnborg * can happen with large stack limits and large mmap() 8227137e52SSam Ravnborg * allocations. 8327137e52SSam Ravnborg */ 842aea28b9SMichel Lespinasse if (addr & ~PAGE_MASK) { 852aea28b9SMichel Lespinasse VM_BUG_ON(addr != -ENOMEM); 862aea28b9SMichel Lespinasse info.flags = 0; 872aea28b9SMichel Lespinasse info.low_limit = TASK_UNMAPPED_BASE; 882aea28b9SMichel Lespinasse info.high_limit = STACK_TOP32; 892aea28b9SMichel Lespinasse addr = vm_unmapped_area(&info); 902aea28b9SMichel Lespinasse } 9127137e52SSam Ravnborg 9227137e52SSam Ravnborg return addr; 9327137e52SSam Ravnborg } 9427137e52SSam Ravnborg 9527137e52SSam Ravnborg unsigned long 9627137e52SSam Ravnborg hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 9727137e52SSam Ravnborg unsigned long len, unsigned long pgoff, unsigned long flags) 9827137e52SSam Ravnborg { 99c7d9f77dSNitin Gupta struct hstate *h = hstate_file(file); 10027137e52SSam Ravnborg struct mm_struct *mm = current->mm; 10127137e52SSam Ravnborg struct vm_area_struct *vma; 10227137e52SSam Ravnborg unsigned long task_size = TASK_SIZE; 10327137e52SSam Ravnborg 10427137e52SSam Ravnborg if (test_thread_flag(TIF_32BIT)) 10527137e52SSam Ravnborg task_size = STACK_TOP32; 10627137e52SSam Ravnborg 107c7d9f77dSNitin Gupta if (len & ~huge_page_mask(h)) 10827137e52SSam Ravnborg return -EINVAL; 10927137e52SSam Ravnborg if (len > task_size) 11027137e52SSam Ravnborg return -ENOMEM; 11127137e52SSam Ravnborg 11227137e52SSam Ravnborg if (flags & MAP_FIXED) { 11327137e52SSam Ravnborg if (prepare_hugepage_range(file, addr, len)) 11427137e52SSam Ravnborg return -EINVAL; 11527137e52SSam Ravnborg return addr; 11627137e52SSam Ravnborg } 11727137e52SSam Ravnborg 11827137e52SSam Ravnborg if (addr) { 119c7d9f77dSNitin Gupta addr = ALIGN(addr, huge_page_size(h)); 12027137e52SSam Ravnborg vma = find_vma(mm, addr); 12127137e52SSam Ravnborg if (task_size - len >= addr && 12227137e52SSam Ravnborg (!vma || addr + len <= vma->vm_start)) 12327137e52SSam Ravnborg return addr; 12427137e52SSam Ravnborg } 12527137e52SSam Ravnborg if (mm->get_unmapped_area == arch_get_unmapped_area) 12627137e52SSam Ravnborg return hugetlb_get_unmapped_area_bottomup(file, addr, len, 12727137e52SSam Ravnborg pgoff, flags); 12827137e52SSam Ravnborg else 12927137e52SSam Ravnborg return hugetlb_get_unmapped_area_topdown(file, addr, len, 13027137e52SSam Ravnborg pgoff, flags); 13127137e52SSam Ravnborg } 13227137e52SSam Ravnborg 133c7d9f77dSNitin Gupta static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift) 134c7d9f77dSNitin Gupta { 135c7d9f77dSNitin Gupta return entry; 136c7d9f77dSNitin Gupta } 137c7d9f77dSNitin Gupta 138c7d9f77dSNitin Gupta static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) 139c7d9f77dSNitin Gupta { 140c7d9f77dSNitin Gupta unsigned long hugepage_size = _PAGE_SZ4MB_4V; 141c7d9f77dSNitin Gupta 142c7d9f77dSNitin Gupta pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; 143c7d9f77dSNitin Gupta 144c7d9f77dSNitin Gupta switch (shift) { 145c7d9f77dSNitin Gupta case HPAGE_256MB_SHIFT: 146c7d9f77dSNitin Gupta hugepage_size = _PAGE_SZ256MB_4V; 147c7d9f77dSNitin Gupta pte_val(entry) |= _PAGE_PMD_HUGE; 148c7d9f77dSNitin Gupta break; 149c7d9f77dSNitin Gupta case HPAGE_SHIFT: 150c7d9f77dSNitin Gupta pte_val(entry) |= _PAGE_PMD_HUGE; 151c7d9f77dSNitin Gupta break; 152*dcd1912dSNitin Gupta case HPAGE_64K_SHIFT: 153*dcd1912dSNitin Gupta hugepage_size = _PAGE_SZ64K_4V; 154*dcd1912dSNitin Gupta break; 155c7d9f77dSNitin Gupta default: 156c7d9f77dSNitin Gupta WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift); 157c7d9f77dSNitin Gupta } 158c7d9f77dSNitin Gupta 159c7d9f77dSNitin Gupta pte_val(entry) = pte_val(entry) | hugepage_size; 160c7d9f77dSNitin Gupta return entry; 161c7d9f77dSNitin Gupta } 162c7d9f77dSNitin Gupta 163c7d9f77dSNitin Gupta static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift) 164c7d9f77dSNitin Gupta { 165c7d9f77dSNitin Gupta if (tlb_type == hypervisor) 166c7d9f77dSNitin Gupta return sun4v_hugepage_shift_to_tte(entry, shift); 167c7d9f77dSNitin Gupta else 168c7d9f77dSNitin Gupta return sun4u_hugepage_shift_to_tte(entry, shift); 169c7d9f77dSNitin Gupta } 170c7d9f77dSNitin Gupta 171c7d9f77dSNitin Gupta pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, 172c7d9f77dSNitin Gupta struct page *page, int writeable) 173c7d9f77dSNitin Gupta { 174c7d9f77dSNitin Gupta unsigned int shift = huge_page_shift(hstate_vma(vma)); 175c7d9f77dSNitin Gupta 176c7d9f77dSNitin Gupta return hugepage_shift_to_tte(entry, shift); 177c7d9f77dSNitin Gupta } 178c7d9f77dSNitin Gupta 179c7d9f77dSNitin Gupta static unsigned int sun4v_huge_tte_to_shift(pte_t entry) 180c7d9f77dSNitin Gupta { 181c7d9f77dSNitin Gupta unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V; 182c7d9f77dSNitin Gupta unsigned int shift; 183c7d9f77dSNitin Gupta 184c7d9f77dSNitin Gupta switch (tte_szbits) { 185c7d9f77dSNitin Gupta case _PAGE_SZ256MB_4V: 186c7d9f77dSNitin Gupta shift = HPAGE_256MB_SHIFT; 187c7d9f77dSNitin Gupta break; 188c7d9f77dSNitin Gupta case _PAGE_SZ4MB_4V: 189c7d9f77dSNitin Gupta shift = REAL_HPAGE_SHIFT; 190c7d9f77dSNitin Gupta break; 191*dcd1912dSNitin Gupta case _PAGE_SZ64K_4V: 192*dcd1912dSNitin Gupta shift = HPAGE_64K_SHIFT; 193*dcd1912dSNitin Gupta break; 194c7d9f77dSNitin Gupta default: 195c7d9f77dSNitin Gupta shift = PAGE_SHIFT; 196c7d9f77dSNitin Gupta break; 197c7d9f77dSNitin Gupta } 198c7d9f77dSNitin Gupta return shift; 199c7d9f77dSNitin Gupta } 200c7d9f77dSNitin Gupta 201c7d9f77dSNitin Gupta static unsigned int sun4u_huge_tte_to_shift(pte_t entry) 202c7d9f77dSNitin Gupta { 203c7d9f77dSNitin Gupta unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U; 204c7d9f77dSNitin Gupta unsigned int shift; 205c7d9f77dSNitin Gupta 206c7d9f77dSNitin Gupta switch (tte_szbits) { 207c7d9f77dSNitin Gupta case _PAGE_SZ256MB_4U: 208c7d9f77dSNitin Gupta shift = HPAGE_256MB_SHIFT; 209c7d9f77dSNitin Gupta break; 210c7d9f77dSNitin Gupta case _PAGE_SZ4MB_4U: 211c7d9f77dSNitin Gupta shift = REAL_HPAGE_SHIFT; 212c7d9f77dSNitin Gupta break; 213*dcd1912dSNitin Gupta case _PAGE_SZ64K_4U: 214*dcd1912dSNitin Gupta shift = HPAGE_64K_SHIFT; 215*dcd1912dSNitin Gupta break; 216c7d9f77dSNitin Gupta default: 217c7d9f77dSNitin Gupta shift = PAGE_SHIFT; 218c7d9f77dSNitin Gupta break; 219c7d9f77dSNitin Gupta } 220c7d9f77dSNitin Gupta return shift; 221c7d9f77dSNitin Gupta } 222c7d9f77dSNitin Gupta 223c7d9f77dSNitin Gupta static unsigned int huge_tte_to_shift(pte_t entry) 224c7d9f77dSNitin Gupta { 225c7d9f77dSNitin Gupta unsigned long shift; 226c7d9f77dSNitin Gupta 227c7d9f77dSNitin Gupta if (tlb_type == hypervisor) 228c7d9f77dSNitin Gupta shift = sun4v_huge_tte_to_shift(entry); 229c7d9f77dSNitin Gupta else 230c7d9f77dSNitin Gupta shift = sun4u_huge_tte_to_shift(entry); 231c7d9f77dSNitin Gupta 232c7d9f77dSNitin Gupta if (shift == PAGE_SHIFT) 233c7d9f77dSNitin Gupta WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n", 234c7d9f77dSNitin Gupta pte_val(entry)); 235c7d9f77dSNitin Gupta 236c7d9f77dSNitin Gupta return shift; 237c7d9f77dSNitin Gupta } 238c7d9f77dSNitin Gupta 239c7d9f77dSNitin Gupta static unsigned long huge_tte_to_size(pte_t pte) 240c7d9f77dSNitin Gupta { 241c7d9f77dSNitin Gupta unsigned long size = 1UL << huge_tte_to_shift(pte); 242c7d9f77dSNitin Gupta 243c7d9f77dSNitin Gupta if (size == REAL_HPAGE_SIZE) 244c7d9f77dSNitin Gupta size = HPAGE_SIZE; 245c7d9f77dSNitin Gupta return size; 246c7d9f77dSNitin Gupta } 247c7d9f77dSNitin Gupta 24827137e52SSam Ravnborg pte_t *huge_pte_alloc(struct mm_struct *mm, 24927137e52SSam Ravnborg unsigned long addr, unsigned long sz) 25027137e52SSam Ravnborg { 25127137e52SSam Ravnborg pgd_t *pgd; 25227137e52SSam Ravnborg pud_t *pud; 253*dcd1912dSNitin Gupta pmd_t *pmd; 25427137e52SSam Ravnborg pte_t *pte = NULL; 25527137e52SSam Ravnborg 25627137e52SSam Ravnborg pgd = pgd_offset(mm, addr); 25727137e52SSam Ravnborg pud = pud_alloc(mm, pgd, addr); 258*dcd1912dSNitin Gupta if (pud) { 259*dcd1912dSNitin Gupta pmd = pmd_alloc(mm, pud, addr); 260*dcd1912dSNitin Gupta if (!pmd) 261*dcd1912dSNitin Gupta return NULL; 262*dcd1912dSNitin Gupta 263*dcd1912dSNitin Gupta if (sz == PMD_SHIFT) 264*dcd1912dSNitin Gupta pte = (pte_t *)pmd; 265*dcd1912dSNitin Gupta else 266*dcd1912dSNitin Gupta pte = pte_alloc_map(mm, pmd, addr); 267*dcd1912dSNitin Gupta } 2687bc3777cSNitin Gupta 26927137e52SSam Ravnborg return pte; 27027137e52SSam Ravnborg } 27127137e52SSam Ravnborg 27227137e52SSam Ravnborg pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 27327137e52SSam Ravnborg { 27427137e52SSam Ravnborg pgd_t *pgd; 27527137e52SSam Ravnborg pud_t *pud; 276*dcd1912dSNitin Gupta pmd_t *pmd; 27727137e52SSam Ravnborg pte_t *pte = NULL; 27827137e52SSam Ravnborg 27927137e52SSam Ravnborg pgd = pgd_offset(mm, addr); 28027137e52SSam Ravnborg if (!pgd_none(*pgd)) { 28127137e52SSam Ravnborg pud = pud_offset(pgd, addr); 282*dcd1912dSNitin Gupta if (!pud_none(*pud)) { 283*dcd1912dSNitin Gupta pmd = pmd_offset(pud, addr); 284*dcd1912dSNitin Gupta if (!pmd_none(*pmd)) { 285*dcd1912dSNitin Gupta if (is_hugetlb_pmd(*pmd)) 286*dcd1912dSNitin Gupta pte = (pte_t *)pmd; 287*dcd1912dSNitin Gupta else 288*dcd1912dSNitin Gupta pte = pte_offset_map(pmd, addr); 28927137e52SSam Ravnborg } 290*dcd1912dSNitin Gupta } 291*dcd1912dSNitin Gupta } 292*dcd1912dSNitin Gupta 29327137e52SSam Ravnborg return pte; 29427137e52SSam Ravnborg } 29527137e52SSam Ravnborg 29627137e52SSam Ravnborg void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 29727137e52SSam Ravnborg pte_t *ptep, pte_t entry) 29827137e52SSam Ravnborg { 299*dcd1912dSNitin Gupta unsigned int i, nptes, orig_shift, shift; 300c7d9f77dSNitin Gupta unsigned long size; 3017bc3777cSNitin Gupta pte_t orig; 30227137e52SSam Ravnborg 303c7d9f77dSNitin Gupta size = huge_tte_to_size(entry); 304*dcd1912dSNitin Gupta shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; 305*dcd1912dSNitin Gupta nptes = size >> shift; 306c7d9f77dSNitin Gupta 30727137e52SSam Ravnborg if (!pte_present(*ptep) && pte_present(entry)) 308c7d9f77dSNitin Gupta mm->context.hugetlb_pte_count += nptes; 30927137e52SSam Ravnborg 310c7d9f77dSNitin Gupta addr &= ~(size - 1); 3117bc3777cSNitin Gupta orig = *ptep; 312*dcd1912dSNitin Gupta orig_shift = pte_none(orig) ? PAGE_SIZE : huge_tte_to_shift(orig); 31324e49ee3SNitin Gupta 314c7d9f77dSNitin Gupta for (i = 0; i < nptes; i++) 315*dcd1912dSNitin Gupta ptep[i] = __pte(pte_val(entry) + (i << shift)); 316c7d9f77dSNitin Gupta 317*dcd1912dSNitin Gupta maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift); 318c7d9f77dSNitin Gupta /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ 319c7d9f77dSNitin Gupta if (size == HPAGE_SIZE) 320c7d9f77dSNitin Gupta maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, 321*dcd1912dSNitin Gupta orig_shift); 32227137e52SSam Ravnborg } 32327137e52SSam Ravnborg 32427137e52SSam Ravnborg pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 32527137e52SSam Ravnborg pte_t *ptep) 32627137e52SSam Ravnborg { 327c7d9f77dSNitin Gupta unsigned int i, nptes, hugepage_shift; 328c7d9f77dSNitin Gupta unsigned long size; 32927137e52SSam Ravnborg pte_t entry; 33027137e52SSam Ravnborg 33127137e52SSam Ravnborg entry = *ptep; 332c7d9f77dSNitin Gupta size = huge_tte_to_size(entry); 333*dcd1912dSNitin Gupta if (size >= HPAGE_SIZE) 334c7d9f77dSNitin Gupta nptes = size >> PMD_SHIFT; 335*dcd1912dSNitin Gupta else 336*dcd1912dSNitin Gupta nptes = size >> PAGE_SHIFT; 337*dcd1912dSNitin Gupta 338c7d9f77dSNitin Gupta hugepage_shift = pte_none(entry) ? PAGE_SIZE : huge_tte_to_shift(entry); 339c7d9f77dSNitin Gupta 34027137e52SSam Ravnborg if (pte_present(entry)) 341c7d9f77dSNitin Gupta mm->context.hugetlb_pte_count -= nptes; 34227137e52SSam Ravnborg 343c7d9f77dSNitin Gupta addr &= ~(size - 1); 344c7d9f77dSNitin Gupta for (i = 0; i < nptes; i++) 345c7d9f77dSNitin Gupta ptep[i] = __pte(0UL); 34627137e52SSam Ravnborg 347c7d9f77dSNitin Gupta maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); 348c7d9f77dSNitin Gupta /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ 349c7d9f77dSNitin Gupta if (size == HPAGE_SIZE) 350c7d9f77dSNitin Gupta maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, 351c7d9f77dSNitin Gupta hugepage_shift); 35224e49ee3SNitin Gupta 35327137e52SSam Ravnborg return entry; 35427137e52SSam Ravnborg } 35527137e52SSam Ravnborg 35627137e52SSam Ravnborg int pmd_huge(pmd_t pmd) 35727137e52SSam Ravnborg { 3587bc3777cSNitin Gupta return !pmd_none(pmd) && 3597bc3777cSNitin Gupta (pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID; 36027137e52SSam Ravnborg } 36127137e52SSam Ravnborg 36227137e52SSam Ravnborg int pud_huge(pud_t pud) 36327137e52SSam Ravnborg { 36427137e52SSam Ravnborg return 0; 36527137e52SSam Ravnborg } 3667bc3777cSNitin Gupta 3677bc3777cSNitin Gupta static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, 3687bc3777cSNitin Gupta unsigned long addr) 3697bc3777cSNitin Gupta { 3707bc3777cSNitin Gupta pgtable_t token = pmd_pgtable(*pmd); 3717bc3777cSNitin Gupta 3727bc3777cSNitin Gupta pmd_clear(pmd); 3737bc3777cSNitin Gupta pte_free_tlb(tlb, token, addr); 3747bc3777cSNitin Gupta atomic_long_dec(&tlb->mm->nr_ptes); 3757bc3777cSNitin Gupta } 3767bc3777cSNitin Gupta 3777bc3777cSNitin Gupta static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 3787bc3777cSNitin Gupta unsigned long addr, unsigned long end, 3797bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling) 3807bc3777cSNitin Gupta { 3817bc3777cSNitin Gupta pmd_t *pmd; 3827bc3777cSNitin Gupta unsigned long next; 3837bc3777cSNitin Gupta unsigned long start; 3847bc3777cSNitin Gupta 3857bc3777cSNitin Gupta start = addr; 3867bc3777cSNitin Gupta pmd = pmd_offset(pud, addr); 3877bc3777cSNitin Gupta do { 3887bc3777cSNitin Gupta next = pmd_addr_end(addr, end); 3897bc3777cSNitin Gupta if (pmd_none(*pmd)) 3907bc3777cSNitin Gupta continue; 3917bc3777cSNitin Gupta if (is_hugetlb_pmd(*pmd)) 3927bc3777cSNitin Gupta pmd_clear(pmd); 3937bc3777cSNitin Gupta else 3947bc3777cSNitin Gupta hugetlb_free_pte_range(tlb, pmd, addr); 3957bc3777cSNitin Gupta } while (pmd++, addr = next, addr != end); 3967bc3777cSNitin Gupta 3977bc3777cSNitin Gupta start &= PUD_MASK; 3987bc3777cSNitin Gupta if (start < floor) 3997bc3777cSNitin Gupta return; 4007bc3777cSNitin Gupta if (ceiling) { 4017bc3777cSNitin Gupta ceiling &= PUD_MASK; 4027bc3777cSNitin Gupta if (!ceiling) 4037bc3777cSNitin Gupta return; 4047bc3777cSNitin Gupta } 4057bc3777cSNitin Gupta if (end - 1 > ceiling - 1) 4067bc3777cSNitin Gupta return; 4077bc3777cSNitin Gupta 4087bc3777cSNitin Gupta pmd = pmd_offset(pud, start); 4097bc3777cSNitin Gupta pud_clear(pud); 4107bc3777cSNitin Gupta pmd_free_tlb(tlb, pmd, start); 4117bc3777cSNitin Gupta mm_dec_nr_pmds(tlb->mm); 4127bc3777cSNitin Gupta } 4137bc3777cSNitin Gupta 4147bc3777cSNitin Gupta static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 4157bc3777cSNitin Gupta unsigned long addr, unsigned long end, 4167bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling) 4177bc3777cSNitin Gupta { 4187bc3777cSNitin Gupta pud_t *pud; 4197bc3777cSNitin Gupta unsigned long next; 4207bc3777cSNitin Gupta unsigned long start; 4217bc3777cSNitin Gupta 4227bc3777cSNitin Gupta start = addr; 4237bc3777cSNitin Gupta pud = pud_offset(pgd, addr); 4247bc3777cSNitin Gupta do { 4257bc3777cSNitin Gupta next = pud_addr_end(addr, end); 4267bc3777cSNitin Gupta if (pud_none_or_clear_bad(pud)) 4277bc3777cSNitin Gupta continue; 4287bc3777cSNitin Gupta hugetlb_free_pmd_range(tlb, pud, addr, next, floor, 4297bc3777cSNitin Gupta ceiling); 4307bc3777cSNitin Gupta } while (pud++, addr = next, addr != end); 4317bc3777cSNitin Gupta 4327bc3777cSNitin Gupta start &= PGDIR_MASK; 4337bc3777cSNitin Gupta if (start < floor) 4347bc3777cSNitin Gupta return; 4357bc3777cSNitin Gupta if (ceiling) { 4367bc3777cSNitin Gupta ceiling &= PGDIR_MASK; 4377bc3777cSNitin Gupta if (!ceiling) 4387bc3777cSNitin Gupta return; 4397bc3777cSNitin Gupta } 4407bc3777cSNitin Gupta if (end - 1 > ceiling - 1) 4417bc3777cSNitin Gupta return; 4427bc3777cSNitin Gupta 4437bc3777cSNitin Gupta pud = pud_offset(pgd, start); 4447bc3777cSNitin Gupta pgd_clear(pgd); 4457bc3777cSNitin Gupta pud_free_tlb(tlb, pud, start); 4467bc3777cSNitin Gupta } 4477bc3777cSNitin Gupta 4487bc3777cSNitin Gupta void hugetlb_free_pgd_range(struct mmu_gather *tlb, 4497bc3777cSNitin Gupta unsigned long addr, unsigned long end, 4507bc3777cSNitin Gupta unsigned long floor, unsigned long ceiling) 4517bc3777cSNitin Gupta { 4527bc3777cSNitin Gupta pgd_t *pgd; 4537bc3777cSNitin Gupta unsigned long next; 4547bc3777cSNitin Gupta 4557bc3777cSNitin Gupta pgd = pgd_offset(tlb->mm, addr); 4567bc3777cSNitin Gupta do { 4577bc3777cSNitin Gupta next = pgd_addr_end(addr, end); 4587bc3777cSNitin Gupta if (pgd_none_or_clear_bad(pgd)) 4597bc3777cSNitin Gupta continue; 4607bc3777cSNitin Gupta hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); 4617bc3777cSNitin Gupta } while (pgd++, addr = next, addr != end); 4627bc3777cSNitin Gupta } 463