1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PPC Huge TLB Page Support for Book3E MMU 4 * 5 * Copyright (C) 2009 David Gibson, IBM Corporation. 6 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor 7 * 8 */ 9 #include <linux/mm.h> 10 #include <linux/hugetlb.h> 11 12 #include <asm/mmu.h> 13 14 #ifdef CONFIG_PPC64 15 #include <asm/paca.h> 16 17 static inline int tlb1_next(void) 18 { 19 struct paca_struct *paca = get_paca(); 20 struct tlb_core_data *tcd; 21 int this, next; 22 23 tcd = paca->tcd_ptr; 24 this = tcd->esel_next; 25 26 next = this + 1; 27 if (next >= tcd->esel_max) 28 next = tcd->esel_first; 29 30 tcd->esel_next = next; 31 return this; 32 } 33 34 static inline void book3e_tlb_lock(void) 35 { 36 struct paca_struct *paca = get_paca(); 37 unsigned long tmp; 38 int token = smp_processor_id() + 1; 39 40 /* 41 * Besides being unnecessary in the absence of SMT, this 42 * check prevents trying to do lbarx/stbcx. on e5500 which 43 * doesn't implement either feature. 44 */ 45 if (!cpu_has_feature(CPU_FTR_SMT)) 46 return; 47 48 asm volatile("1: lbarx %0, 0, %1;" 49 "cmpwi %0, 0;" 50 "bne 2f;" 51 "stbcx. %2, 0, %1;" 52 "bne 1b;" 53 "b 3f;" 54 "2: lbzx %0, 0, %1;" 55 "cmpwi %0, 0;" 56 "bne 2b;" 57 "b 1b;" 58 "3:" 59 : "=&r" (tmp) 60 : "r" (&paca->tcd_ptr->lock), "r" (token) 61 : "memory"); 62 } 63 64 static inline void book3e_tlb_unlock(void) 65 { 66 struct paca_struct *paca = get_paca(); 67 68 if (!cpu_has_feature(CPU_FTR_SMT)) 69 return; 70 71 isync(); 72 paca->tcd_ptr->lock = 0; 73 } 74 #else 75 static inline int tlb1_next(void) 76 { 77 int index, ncams; 78 79 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 80 81 index = this_cpu_read(next_tlbcam_idx); 82 83 /* Just round-robin the entries and wrap when we hit the end */ 84 if (unlikely(index == ncams - 1)) 85 __this_cpu_write(next_tlbcam_idx, tlbcam_index); 86 else 87 __this_cpu_inc(next_tlbcam_idx); 88 89 return index; 90 } 91 92 static inline void book3e_tlb_lock(void) 93 { 94 } 95 96 static inline void book3e_tlb_unlock(void) 97 { 98 } 99 #endif 100 101 static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid) 102 { 103 int found = 0; 104 105 mtspr(SPRN_MAS6, pid << 16); 106 asm volatile( 107 "tlbsx 0,%1\n" 108 "mfspr %0,0x271\n" 109 "srwi %0,%0,31\n" 110 : "=&r"(found) : "r"(ea)); 111 112 return found; 113 } 114 115 static void 116 book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte) 117 { 118 unsigned long mas1, mas2; 119 u64 mas7_3; 120 unsigned long psize, tsize, shift; 121 unsigned long flags; 122 struct mm_struct *mm; 123 int index; 124 125 if (unlikely(is_kernel_addr(ea))) 126 return; 127 128 mm = vma->vm_mm; 129 130 psize = vma_mmu_pagesize(vma); 131 shift = __ilog2(psize); 132 tsize = shift - 10; 133 /* 134 * We can't be interrupted while we're setting up the MAS 135 * registers or after we've confirmed that no tlb exists. 136 */ 137 local_irq_save(flags); 138 139 book3e_tlb_lock(); 140 141 if (unlikely(book3e_tlb_exists(ea, mm->context.id))) { 142 book3e_tlb_unlock(); 143 local_irq_restore(flags); 144 return; 145 } 146 147 /* We have to use the CAM(TLB1) on FSL parts for hugepages */ 148 index = tlb1_next(); 149 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1)); 150 151 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize); 152 mas2 = ea & ~((1UL << shift) - 1); 153 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; 154 mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT; 155 mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK; 156 if (!pte_dirty(pte)) 157 mas7_3 &= ~(MAS3_SW|MAS3_UW); 158 159 mtspr(SPRN_MAS1, mas1); 160 mtspr(SPRN_MAS2, mas2); 161 162 if (mmu_has_feature(MMU_FTR_BIG_PHYS)) 163 mtspr(SPRN_MAS7, upper_32_bits(mas7_3)); 164 mtspr(SPRN_MAS3, lower_32_bits(mas7_3)); 165 166 asm volatile ("tlbwe"); 167 168 book3e_tlb_unlock(); 169 local_irq_restore(flags); 170 } 171 172 /* 173 * This is called at the end of handling a user page fault, when the 174 * fault has been handled by updating a PTE in the linux page tables. 175 * 176 * This must always be called with the pte lock held. 177 */ 178 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 179 { 180 if (is_vm_hugetlb_page(vma)) 181 book3e_hugetlb_preload(vma, address, *ptep); 182 } 183 184 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 185 { 186 struct hstate *hstate = hstate_file(vma->vm_file); 187 unsigned long tsize = huge_page_shift(hstate) - 10; 188 189 __flush_tlb_page(vma->vm_mm, vmaddr, tsize, 0); 190 } 191