Lines Matching +full:i +full:- +full:tlb +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
3 * SPARC64 Huge TLB page support.
17 #include <asm/tlb.h>
22 /* Slightly simplified from the non-hugepage variant because by
48 VM_BUG_ON(addr != -ENOMEM); in hugetlb_get_unmapped_area_bottomup()
64 struct mm_struct *mm = current->mm; in hugetlb_get_unmapped_area_topdown()
68 /* This should only ever run for 32-bit processes. */ in hugetlb_get_unmapped_area_topdown()
74 info.high_limit = mm->mmap_base; in hugetlb_get_unmapped_area_topdown()
81 * so fall back to the bottom-up function here. This scenario in hugetlb_get_unmapped_area_topdown()
86 VM_BUG_ON(addr != -ENOMEM); in hugetlb_get_unmapped_area_topdown()
101 struct mm_struct *mm = current->mm; in hugetlb_get_unmapped_area()
109 return -EINVAL; in hugetlb_get_unmapped_area()
111 return -ENOMEM; in hugetlb_get_unmapped_area()
115 return -EINVAL; in hugetlb_get_unmapped_area()
122 if (task_size - len >= addr && in hugetlb_get_unmapped_area()
126 if (mm->get_unmapped_area == arch_get_unmapped_area) in hugetlb_get_unmapped_area()
270 unsigned long size = 1UL << huge_tte_to_shift(pte); in huge_tte_to_size() local
272 if (size == REAL_HPAGE_SIZE) in huge_tte_to_size()
273 size = HPAGE_SIZE; in huge_tte_to_size()
274 return size; in huge_tte_to_size()
335 unsigned long i, size; in __set_huge_pte_at() local
338 size = huge_tte_to_size(entry); in __set_huge_pte_at()
341 if (size >= PUD_SIZE) in __set_huge_pte_at()
343 else if (size >= PMD_SIZE) in __set_huge_pte_at()
348 nptes = size >> shift; in __set_huge_pte_at()
351 mm->context.hugetlb_pte_count += nptes; in __set_huge_pte_at()
353 addr &= ~(size - 1); in __set_huge_pte_at()
357 for (i = 0; i < nptes; i++) in __set_huge_pte_at()
358 ptep[i] = __pte(pte_val(entry) + (i << shift)); in __set_huge_pte_at()
362 if (size == HPAGE_SIZE) in __set_huge_pte_at()
376 unsigned int i, nptes, orig_shift, shift; in huge_ptep_get_and_clear() local
377 unsigned long size; in huge_ptep_get_and_clear() local
381 size = huge_tte_to_size(entry); in huge_ptep_get_and_clear()
384 if (size >= PUD_SIZE) in huge_ptep_get_and_clear()
386 else if (size >= PMD_SIZE) in huge_ptep_get_and_clear()
391 nptes = size >> shift; in huge_ptep_get_and_clear()
395 mm->context.hugetlb_pte_count -= nptes; in huge_ptep_get_and_clear()
397 addr &= ~(size - 1); in huge_ptep_get_and_clear()
398 for (i = 0; i < nptes; i++) in huge_ptep_get_and_clear()
399 ptep[i] = __pte(0UL); in huge_ptep_get_and_clear()
403 if (size == HPAGE_SIZE) in huge_ptep_get_and_clear()
422 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in hugetlb_free_pte_range() argument
428 pte_free_tlb(tlb, token, addr); in hugetlb_free_pte_range()
429 mm_dec_nr_ptes(tlb->mm); in hugetlb_free_pte_range()
432 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in hugetlb_free_pmd_range() argument
449 hugetlb_free_pte_range(tlb, pmd, addr); in hugetlb_free_pmd_range()
460 if (end - 1 > ceiling - 1) in hugetlb_free_pmd_range()
465 pmd_free_tlb(tlb, pmd, start); in hugetlb_free_pmd_range()
466 mm_dec_nr_pmds(tlb->mm); in hugetlb_free_pmd_range()
469 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, in hugetlb_free_pud_range() argument
486 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, in hugetlb_free_pud_range()
498 if (end - 1 > ceiling - 1) in hugetlb_free_pud_range()
503 pud_free_tlb(tlb, pud, start); in hugetlb_free_pud_range()
504 mm_dec_nr_puds(tlb->mm); in hugetlb_free_pud_range()
507 void hugetlb_free_pgd_range(struct mmu_gather *tlb, in hugetlb_free_pgd_range() argument
526 if (end - 1 > ceiling - 1) in hugetlb_free_pgd_range()
527 end -= PMD_SIZE; in hugetlb_free_pgd_range()
528 if (addr > end - 1) in hugetlb_free_pgd_range()
531 pgd = pgd_offset(tlb->mm, addr); in hugetlb_free_pgd_range()
537 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling); in hugetlb_free_pgd_range()