1 /* 2 * PARISC64 Huge TLB page support. 3 * 4 * This parisc implementation is heavily based on the SPARC and x86 code. 5 * 6 * Copyright (C) 2015 Helge Deller <deller@gmx.de> 7 */ 8 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/sched/mm.h> 12 #include <linux/hugetlb.h> 13 #include <linux/pagemap.h> 14 #include <linux/sysctl.h> 15 16 #include <asm/mman.h> 17 #include <asm/pgalloc.h> 18 #include <asm/tlb.h> 19 #include <asm/tlbflush.h> 20 #include <asm/cacheflush.h> 21 #include <asm/mmu_context.h> 22 23 24 unsigned long 25 hugetlb_get_unmapped_area(struct file *file, unsigned long addr, 26 unsigned long len, unsigned long pgoff, unsigned long flags) 27 { 28 struct hstate *h = hstate_file(file); 29 30 if (len & ~huge_page_mask(h)) 31 return -EINVAL; 32 if (len > TASK_SIZE) 33 return -ENOMEM; 34 35 if (flags & MAP_FIXED) 36 if (prepare_hugepage_range(file, addr, len)) 37 return -EINVAL; 38 39 if (addr) 40 addr = ALIGN(addr, huge_page_size(h)); 41 42 /* we need to make sure the colouring is OK */ 43 return arch_get_unmapped_area(file, addr, len, pgoff, flags); 44 } 45 46 47 pte_t *huge_pte_alloc(struct mm_struct *mm, 48 unsigned long addr, unsigned long sz) 49 { 50 pgd_t *pgd; 51 pud_t *pud; 52 pmd_t *pmd; 53 pte_t *pte = NULL; 54 55 /* We must align the address, because our caller will run 56 * set_huge_pte_at() on whatever we return, which writes out 57 * all of the sub-ptes for the hugepage range. So we have 58 * to give it the first such sub-pte. 59 */ 60 addr &= HPAGE_MASK; 61 62 pgd = pgd_offset(mm, addr); 63 pud = pud_alloc(mm, pgd, addr); 64 if (pud) { 65 pmd = pmd_alloc(mm, pud, addr); 66 if (pmd) 67 pte = pte_alloc_map(mm, pmd, addr); 68 } 69 return pte; 70 } 71 72 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 73 { 74 pgd_t *pgd; 75 pud_t *pud; 76 pmd_t *pmd; 77 pte_t *pte = NULL; 78 79 addr &= HPAGE_MASK; 80 81 pgd = pgd_offset(mm, addr); 82 if (!pgd_none(*pgd)) { 83 pud = pud_offset(pgd, addr); 84 if (!pud_none(*pud)) { 85 pmd = pmd_offset(pud, addr); 86 if (!pmd_none(*pmd)) 87 pte = pte_offset_map(pmd, addr); 88 } 89 } 90 return pte; 91 } 92 93 /* Purge data and instruction TLB entries. Must be called holding 94 * the pa_tlb_lock. The TLB purge instructions are slow on SMP 95 * machines since the purge must be broadcast to all CPUs. 96 */ 97 static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) 98 { 99 int i; 100 101 /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate 102 * Linux standard huge pages (e.g. 2 MB) */ 103 BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT); 104 105 addr &= HPAGE_MASK; 106 addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT; 107 108 for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) { 109 purge_tlb_entries(mm, addr); 110 addr += (1UL << REAL_HPAGE_SHIFT); 111 } 112 } 113 114 /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */ 115 static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 116 pte_t *ptep, pte_t entry) 117 { 118 unsigned long addr_start; 119 int i; 120 121 addr &= HPAGE_MASK; 122 addr_start = addr; 123 124 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 125 set_pte(ptep, entry); 126 ptep++; 127 128 addr += PAGE_SIZE; 129 pte_val(entry) += PAGE_SIZE; 130 } 131 132 purge_tlb_entries_huge(mm, addr_start); 133 } 134 135 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 136 pte_t *ptep, pte_t entry) 137 { 138 unsigned long flags; 139 140 purge_tlb_start(flags); 141 __set_huge_pte_at(mm, addr, ptep, entry); 142 purge_tlb_end(flags); 143 } 144 145 146 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 147 pte_t *ptep) 148 { 149 unsigned long flags; 150 pte_t entry; 151 152 purge_tlb_start(flags); 153 entry = *ptep; 154 __set_huge_pte_at(mm, addr, ptep, __pte(0)); 155 purge_tlb_end(flags); 156 157 return entry; 158 } 159 160 161 void huge_ptep_set_wrprotect(struct mm_struct *mm, 162 unsigned long addr, pte_t *ptep) 163 { 164 unsigned long flags; 165 pte_t old_pte; 166 167 purge_tlb_start(flags); 168 old_pte = *ptep; 169 __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 170 purge_tlb_end(flags); 171 } 172 173 int huge_ptep_set_access_flags(struct vm_area_struct *vma, 174 unsigned long addr, pte_t *ptep, 175 pte_t pte, int dirty) 176 { 177 unsigned long flags; 178 int changed; 179 180 purge_tlb_start(flags); 181 changed = !pte_same(*ptep, pte); 182 if (changed) { 183 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 184 } 185 purge_tlb_end(flags); 186 return changed; 187 } 188 189 190 int pmd_huge(pmd_t pmd) 191 { 192 return 0; 193 } 194 195 int pud_huge(pud_t pud) 196 { 197 return 0; 198 } 199