11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds * This file is subject to the terms and conditions of the GNU General Public
31da177e4SLinus Torvalds * License. See the file "COPYING" in the main directory of this archive
41da177e4SLinus Torvalds * for more details.
51da177e4SLinus Torvalds *
679add627SJustin P. Mattock * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
71da177e4SLinus Torvalds * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
81da177e4SLinus Torvalds * Carsten Langgaard, carstenl@mips.com
91da177e4SLinus Torvalds * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
101da177e4SLinus Torvalds */
11eaa38d63SJames Hogan #include <linux/cpu_pm.h>
121da177e4SLinus Torvalds #include <linux/init.h>
131da177e4SLinus Torvalds #include <linux/sched.h>
14631330f5SRalf Baechle #include <linux/smp.h>
151da177e4SLinus Torvalds #include <linux/mm.h>
16fd062c84SDavid Daney #include <linux/hugetlb.h>
17d9ba5778SPaul Gortmaker #include <linux/export.h>
181da177e4SLinus Torvalds
191da177e4SLinus Torvalds #include <asm/cpu.h>
2069f24d17SRalf Baechle #include <asm/cpu-type.h>
211da177e4SLinus Torvalds #include <asm/bootinfo.h>
22091bc3a4SPaul Burton #include <asm/hazards.h>
231da177e4SLinus Torvalds #include <asm/mmu_context.h>
24c01905eeSMarkos Chandras #include <asm/tlb.h>
253d18c983SRalf Baechle #include <asm/tlbmisc.h>
261da177e4SLinus Torvalds
271da177e4SLinus Torvalds extern void build_tlb_refill_handler(void);
281da177e4SLinus Torvalds
292a21c730SFuxin Zhang /*
3006e4814eSHuacai Chen * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
3106e4814eSHuacai Chen * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
3206e4814eSHuacai Chen * itlb/dtlb are not totally transparent to software.
332a21c730SFuxin Zhang */
flush_micro_tlb(void)3406e4814eSHuacai Chen static inline void flush_micro_tlb(void)
3514bd8c08SRalf Baechle {
3614bd8c08SRalf Baechle switch (current_cpu_type()) {
37268a2d60SJiaxun Yang case CPU_LOONGSON2EF:
3806e4814eSHuacai Chen write_c0_diag(LOONGSON_DIAG_ITLB);
3906e4814eSHuacai Chen break;
40268a2d60SJiaxun Yang case CPU_LOONGSON64:
4106e4814eSHuacai Chen write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
4214bd8c08SRalf Baechle break;
4314bd8c08SRalf Baechle default:
4414bd8c08SRalf Baechle break;
4514bd8c08SRalf Baechle }
4614bd8c08SRalf Baechle }
472a21c730SFuxin Zhang
flush_micro_tlb_vm(struct vm_area_struct * vma)4806e4814eSHuacai Chen static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
4914bd8c08SRalf Baechle {
5014bd8c08SRalf Baechle if (vma->vm_flags & VM_EXEC)
5106e4814eSHuacai Chen flush_micro_tlb();
5214bd8c08SRalf Baechle }
532a21c730SFuxin Zhang
local_flush_tlb_all(void)541da177e4SLinus Torvalds void local_flush_tlb_all(void)
551da177e4SLinus Torvalds {
561da177e4SLinus Torvalds unsigned long flags;
571da177e4SLinus Torvalds unsigned long old_ctx;
5875b5b5e0SLeonid Yegoshin int entry, ftlbhighset;
591da177e4SLinus Torvalds
60b633648cSRalf Baechle local_irq_save(flags);
611da177e4SLinus Torvalds /* Save old context and create impossible VPN2 value */
621da177e4SLinus Torvalds old_ctx = read_c0_entryhi();
63f1014d1bSMarkos Chandras htw_stop();
641da177e4SLinus Torvalds write_c0_entrylo0(0);
651da177e4SLinus Torvalds write_c0_entrylo1(0);
661da177e4SLinus Torvalds
6710313980SPaul Burton entry = num_wired_entries();
681da177e4SLinus Torvalds
69e710d666SMatt Redfearn /*
70e710d666SMatt Redfearn * Blast 'em all away.
71e710d666SMatt Redfearn * If there are any wired entries, fall back to iterating
72e710d666SMatt Redfearn */
73e710d666SMatt Redfearn if (cpu_has_tlbinv && !entry) {
7475b5b5e0SLeonid Yegoshin if (current_cpu_data.tlbsizevtlb) {
75601cfa7bSLeonid Yegoshin write_c0_index(0);
76601cfa7bSLeonid Yegoshin mtc0_tlbw_hazard();
77601cfa7bSLeonid Yegoshin tlbinvf(); /* invalidate VTLB */
7875b5b5e0SLeonid Yegoshin }
7975b5b5e0SLeonid Yegoshin ftlbhighset = current_cpu_data.tlbsizevtlb +
8075b5b5e0SLeonid Yegoshin current_cpu_data.tlbsizeftlbsets;
8175b5b5e0SLeonid Yegoshin for (entry = current_cpu_data.tlbsizevtlb;
8275b5b5e0SLeonid Yegoshin entry < ftlbhighset;
8375b5b5e0SLeonid Yegoshin entry++) {
8475b5b5e0SLeonid Yegoshin write_c0_index(entry);
8575b5b5e0SLeonid Yegoshin mtc0_tlbw_hazard();
8675b5b5e0SLeonid Yegoshin tlbinvf(); /* invalidate one FTLB set */
8775b5b5e0SLeonid Yegoshin }
88601cfa7bSLeonid Yegoshin } else {
891da177e4SLinus Torvalds while (entry < current_cpu_data.tlbsize) {
90172546bfSThiemo Seufer /* Make sure all entries differ. */
91172546bfSThiemo Seufer write_c0_entryhi(UNIQUE_ENTRYHI(entry));
921da177e4SLinus Torvalds write_c0_index(entry);
931da177e4SLinus Torvalds mtc0_tlbw_hazard();
941da177e4SLinus Torvalds tlb_write_indexed();
951da177e4SLinus Torvalds entry++;
961da177e4SLinus Torvalds }
97601cfa7bSLeonid Yegoshin }
981da177e4SLinus Torvalds tlbw_use_hazard();
991da177e4SLinus Torvalds write_c0_entryhi(old_ctx);
100f1014d1bSMarkos Chandras htw_start();
10106e4814eSHuacai Chen flush_micro_tlb();
102b633648cSRalf Baechle local_irq_restore(flags);
1031da177e4SLinus Torvalds }
104f2e3656dSSanjay Lal EXPORT_SYMBOL(local_flush_tlb_all);
1051da177e4SLinus Torvalds
local_flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)1061da177e4SLinus Torvalds void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
1071da177e4SLinus Torvalds unsigned long end)
1081da177e4SLinus Torvalds {
1091da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm;
1101da177e4SLinus Torvalds int cpu = smp_processor_id();
1111da177e4SLinus Torvalds
1121da177e4SLinus Torvalds if (cpu_context(cpu, mm) != 0) {
113a5e696e5SGreg Ungerer unsigned long size, flags;
1141da177e4SLinus Torvalds
115b633648cSRalf Baechle local_irq_save(flags);
116f467e4bfSHillf Danton start = round_down(start, PAGE_SIZE << 1);
117f467e4bfSHillf Danton end = round_up(end, PAGE_SIZE << 1);
118f467e4bfSHillf Danton size = (end - start) >> (PAGE_SHIFT + 1);
11975b5b5e0SLeonid Yegoshin if (size <= (current_cpu_data.tlbsizeftlbsets ?
12075b5b5e0SLeonid Yegoshin current_cpu_data.tlbsize / 8 :
12175b5b5e0SLeonid Yegoshin current_cpu_data.tlbsize / 2)) {
1223f649ab7SKees Cook unsigned long old_entryhi, old_mmid;
1231da177e4SLinus Torvalds int newpid = cpu_asid(cpu, mm);
1241da177e4SLinus Torvalds
125c8790d65SPaul Burton old_entryhi = read_c0_entryhi();
126c8790d65SPaul Burton if (cpu_has_mmid) {
127c8790d65SPaul Burton old_mmid = read_c0_memorymapid();
128c8790d65SPaul Burton write_c0_memorymapid(newpid);
129c8790d65SPaul Burton }
130c8790d65SPaul Burton
131f1014d1bSMarkos Chandras htw_stop();
1321da177e4SLinus Torvalds while (start < end) {
1331da177e4SLinus Torvalds int idx;
1341da177e4SLinus Torvalds
135c8790d65SPaul Burton if (cpu_has_mmid)
136c8790d65SPaul Burton write_c0_entryhi(start);
137c8790d65SPaul Burton else
1381da177e4SLinus Torvalds write_c0_entryhi(start | newpid);
1391da177e4SLinus Torvalds start += (PAGE_SIZE << 1);
1401da177e4SLinus Torvalds mtc0_tlbw_hazard();
1411da177e4SLinus Torvalds tlb_probe();
142432bef2aSRalf Baechle tlb_probe_hazard();
1431da177e4SLinus Torvalds idx = read_c0_index();
1441da177e4SLinus Torvalds write_c0_entrylo0(0);
1451da177e4SLinus Torvalds write_c0_entrylo1(0);
1461da177e4SLinus Torvalds if (idx < 0)
1471da177e4SLinus Torvalds continue;
1481da177e4SLinus Torvalds /* Make sure all entries differ. */
149172546bfSThiemo Seufer write_c0_entryhi(UNIQUE_ENTRYHI(idx));
1501da177e4SLinus Torvalds mtc0_tlbw_hazard();
1511da177e4SLinus Torvalds tlb_write_indexed();
1521da177e4SLinus Torvalds }
1531da177e4SLinus Torvalds tlbw_use_hazard();
154c8790d65SPaul Burton write_c0_entryhi(old_entryhi);
155c8790d65SPaul Burton if (cpu_has_mmid)
156c8790d65SPaul Burton write_c0_memorymapid(old_mmid);
157f1014d1bSMarkos Chandras htw_start();
1581da177e4SLinus Torvalds } else {
1599a27324fSPaul Burton drop_mmu_context(mm);
1601da177e4SLinus Torvalds }
16106e4814eSHuacai Chen flush_micro_tlb();
162b633648cSRalf Baechle local_irq_restore(flags);
1631da177e4SLinus Torvalds }
1641da177e4SLinus Torvalds }
1651da177e4SLinus Torvalds
local_flush_tlb_kernel_range(unsigned long start,unsigned long end)1661da177e4SLinus Torvalds void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1671da177e4SLinus Torvalds {
168a5e696e5SGreg Ungerer unsigned long size, flags;
1691da177e4SLinus Torvalds
170b633648cSRalf Baechle local_irq_save(flags);
1711da177e4SLinus Torvalds size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1721da177e4SLinus Torvalds size = (size + 1) >> 1;
17375b5b5e0SLeonid Yegoshin if (size <= (current_cpu_data.tlbsizeftlbsets ?
17475b5b5e0SLeonid Yegoshin current_cpu_data.tlbsize / 8 :
17575b5b5e0SLeonid Yegoshin current_cpu_data.tlbsize / 2)) {
1761da177e4SLinus Torvalds int pid = read_c0_entryhi();
1771da177e4SLinus Torvalds
1781da177e4SLinus Torvalds start &= (PAGE_MASK << 1);
1791da177e4SLinus Torvalds end += ((PAGE_SIZE << 1) - 1);
1801da177e4SLinus Torvalds end &= (PAGE_MASK << 1);
181f1014d1bSMarkos Chandras htw_stop();
1821da177e4SLinus Torvalds
1831da177e4SLinus Torvalds while (start < end) {
1841da177e4SLinus Torvalds int idx;
1851da177e4SLinus Torvalds
1861da177e4SLinus Torvalds write_c0_entryhi(start);
1871da177e4SLinus Torvalds start += (PAGE_SIZE << 1);
1881da177e4SLinus Torvalds mtc0_tlbw_hazard();
1891da177e4SLinus Torvalds tlb_probe();
190432bef2aSRalf Baechle tlb_probe_hazard();
1911da177e4SLinus Torvalds idx = read_c0_index();
1921da177e4SLinus Torvalds write_c0_entrylo0(0);
1931da177e4SLinus Torvalds write_c0_entrylo1(0);
1941da177e4SLinus Torvalds if (idx < 0)
1951da177e4SLinus Torvalds continue;
1961da177e4SLinus Torvalds /* Make sure all entries differ. */
197172546bfSThiemo Seufer write_c0_entryhi(UNIQUE_ENTRYHI(idx));
1981da177e4SLinus Torvalds mtc0_tlbw_hazard();
1991da177e4SLinus Torvalds tlb_write_indexed();
2001da177e4SLinus Torvalds }
2011da177e4SLinus Torvalds tlbw_use_hazard();
2021da177e4SLinus Torvalds write_c0_entryhi(pid);
203f1014d1bSMarkos Chandras htw_start();
2041da177e4SLinus Torvalds } else {
2051da177e4SLinus Torvalds local_flush_tlb_all();
2061da177e4SLinus Torvalds }
20706e4814eSHuacai Chen flush_micro_tlb();
208b633648cSRalf Baechle local_irq_restore(flags);
2091da177e4SLinus Torvalds }
2101da177e4SLinus Torvalds
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)2111da177e4SLinus Torvalds void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
2121da177e4SLinus Torvalds {
2131da177e4SLinus Torvalds int cpu = smp_processor_id();
2141da177e4SLinus Torvalds
2151da177e4SLinus Torvalds if (cpu_context(cpu, vma->vm_mm) != 0) {
2163f649ab7SKees Cook unsigned long old_mmid;
217c8790d65SPaul Burton unsigned long flags, old_entryhi;
218c8790d65SPaul Burton int idx;
2191da177e4SLinus Torvalds
2201da177e4SLinus Torvalds page &= (PAGE_MASK << 1);
221b633648cSRalf Baechle local_irq_save(flags);
222c8790d65SPaul Burton old_entryhi = read_c0_entryhi();
223f1014d1bSMarkos Chandras htw_stop();
224c8790d65SPaul Burton if (cpu_has_mmid) {
225c8790d65SPaul Burton old_mmid = read_c0_memorymapid();
226c8790d65SPaul Burton write_c0_entryhi(page);
227c8790d65SPaul Burton write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
228c8790d65SPaul Burton } else {
229c8790d65SPaul Burton write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
230c8790d65SPaul Burton }
2311da177e4SLinus Torvalds mtc0_tlbw_hazard();
2321da177e4SLinus Torvalds tlb_probe();
233432bef2aSRalf Baechle tlb_probe_hazard();
2341da177e4SLinus Torvalds idx = read_c0_index();
2351da177e4SLinus Torvalds write_c0_entrylo0(0);
2361da177e4SLinus Torvalds write_c0_entrylo1(0);
2371da177e4SLinus Torvalds if (idx < 0)
2381da177e4SLinus Torvalds goto finish;
2391da177e4SLinus Torvalds /* Make sure all entries differ. */
240172546bfSThiemo Seufer write_c0_entryhi(UNIQUE_ENTRYHI(idx));
2411da177e4SLinus Torvalds mtc0_tlbw_hazard();
2421da177e4SLinus Torvalds tlb_write_indexed();
2431da177e4SLinus Torvalds tlbw_use_hazard();
2441da177e4SLinus Torvalds
2451da177e4SLinus Torvalds finish:
246c8790d65SPaul Burton write_c0_entryhi(old_entryhi);
247c8790d65SPaul Burton if (cpu_has_mmid)
248c8790d65SPaul Burton write_c0_memorymapid(old_mmid);
249f1014d1bSMarkos Chandras htw_start();
25006e4814eSHuacai Chen flush_micro_tlb_vm(vma);
251b633648cSRalf Baechle local_irq_restore(flags);
2521da177e4SLinus Torvalds }
2531da177e4SLinus Torvalds }
2541da177e4SLinus Torvalds
2551da177e4SLinus Torvalds /*
2561da177e4SLinus Torvalds * This one is only used for pages with the global bit set so we don't care
2571da177e4SLinus Torvalds * much about the ASID.
2581da177e4SLinus Torvalds */
local_flush_tlb_one(unsigned long page)2591da177e4SLinus Torvalds void local_flush_tlb_one(unsigned long page)
2601da177e4SLinus Torvalds {
2611da177e4SLinus Torvalds unsigned long flags;
2621da177e4SLinus Torvalds int oldpid, idx;
2631da177e4SLinus Torvalds
264b633648cSRalf Baechle local_irq_save(flags);
2651da177e4SLinus Torvalds oldpid = read_c0_entryhi();
266f1014d1bSMarkos Chandras htw_stop();
267172546bfSThiemo Seufer page &= (PAGE_MASK << 1);
2681da177e4SLinus Torvalds write_c0_entryhi(page);
2691da177e4SLinus Torvalds mtc0_tlbw_hazard();
2701da177e4SLinus Torvalds tlb_probe();
271432bef2aSRalf Baechle tlb_probe_hazard();
2721da177e4SLinus Torvalds idx = read_c0_index();
2731da177e4SLinus Torvalds write_c0_entrylo0(0);
2741da177e4SLinus Torvalds write_c0_entrylo1(0);
2751da177e4SLinus Torvalds if (idx >= 0) {
2761da177e4SLinus Torvalds /* Make sure all entries differ. */
277172546bfSThiemo Seufer write_c0_entryhi(UNIQUE_ENTRYHI(idx));
2781da177e4SLinus Torvalds mtc0_tlbw_hazard();
2791da177e4SLinus Torvalds tlb_write_indexed();
2801da177e4SLinus Torvalds tlbw_use_hazard();
2811da177e4SLinus Torvalds }
2821da177e4SLinus Torvalds write_c0_entryhi(oldpid);
283f1014d1bSMarkos Chandras htw_start();
28406e4814eSHuacai Chen flush_micro_tlb();
285b633648cSRalf Baechle local_irq_restore(flags);
2861da177e4SLinus Torvalds }
2871da177e4SLinus Torvalds
2881da177e4SLinus Torvalds /*
2891da177e4SLinus Torvalds * We will need multiple versions of update_mmu_cache(), one that just
2901da177e4SLinus Torvalds * updates the TLB with the new pte(s), and another which also checks
2911da177e4SLinus Torvalds * for the R4k "end of page" hardware bug and does the needy.
2921da177e4SLinus Torvalds */
__update_tlb(struct vm_area_struct * vma,unsigned long address,pte_t pte)2931da177e4SLinus Torvalds void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
2941da177e4SLinus Torvalds {
2951da177e4SLinus Torvalds unsigned long flags;
2961da177e4SLinus Torvalds pgd_t *pgdp;
2972bee1b58SMike Rapoport p4d_t *p4dp;
298c6e8b587SRalf Baechle pud_t *pudp;
2991da177e4SLinus Torvalds pmd_t *pmdp;
300*17b25a38SHugh Dickins pte_t *ptep, *ptemap = NULL;
3011da177e4SLinus Torvalds int idx, pid;
3021da177e4SLinus Torvalds
3031da177e4SLinus Torvalds /*
3041da177e4SLinus Torvalds * Handle debugger faulting in for debugee.
3051da177e4SLinus Torvalds */
3061da177e4SLinus Torvalds if (current->active_mm != vma->vm_mm)
3071da177e4SLinus Torvalds return;
3081da177e4SLinus Torvalds
309b633648cSRalf Baechle local_irq_save(flags);
310172546bfSThiemo Seufer
3116a8dff6aSMarkos Chandras htw_stop();
3121da177e4SLinus Torvalds address &= (PAGE_MASK << 1);
313c8790d65SPaul Burton if (cpu_has_mmid) {
314c8790d65SPaul Burton write_c0_entryhi(address);
315c8790d65SPaul Burton } else {
316c8790d65SPaul Burton pid = read_c0_entryhi() & cpu_asid_mask(¤t_cpu_data);
3171da177e4SLinus Torvalds write_c0_entryhi(address | pid);
318c8790d65SPaul Burton }
3191da177e4SLinus Torvalds pgdp = pgd_offset(vma->vm_mm, address);
3201da177e4SLinus Torvalds mtc0_tlbw_hazard();
3211da177e4SLinus Torvalds tlb_probe();
322432bef2aSRalf Baechle tlb_probe_hazard();
3232bee1b58SMike Rapoport p4dp = p4d_offset(pgdp, address);
3242bee1b58SMike Rapoport pudp = pud_offset(p4dp, address);
325c6e8b587SRalf Baechle pmdp = pmd_offset(pudp, address);
3261da177e4SLinus Torvalds idx = read_c0_index();
327aa1762f4SDavid Daney #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
328fd062c84SDavid Daney /* this could be a huge page */
329fd062c84SDavid Daney if (pmd_huge(*pmdp)) {
330fd062c84SDavid Daney unsigned long lo;
331fd062c84SDavid Daney write_c0_pagemask(PM_HUGE_MASK);
332fd062c84SDavid Daney ptep = (pte_t *)pmdp;
3336dd9344cSDavid Daney lo = pte_to_entrylo(pte_val(*ptep));
334fd062c84SDavid Daney write_c0_entrylo0(lo);
335fd062c84SDavid Daney write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
336fd062c84SDavid Daney
337fd062c84SDavid Daney mtc0_tlbw_hazard();
338fd062c84SDavid Daney if (idx < 0)
339fd062c84SDavid Daney tlb_write_random();
340fd062c84SDavid Daney else
341fd062c84SDavid Daney tlb_write_indexed();
342fb944c9bSRalf Baechle tlbw_use_hazard();
343fd062c84SDavid Daney write_c0_pagemask(PM_DEFAULT_MASK);
344fd062c84SDavid Daney } else
345fd062c84SDavid Daney #endif
346fd062c84SDavid Daney {
347*17b25a38SHugh Dickins ptemap = ptep = pte_offset_map(pmdp, address);
348*17b25a38SHugh Dickins /*
349*17b25a38SHugh Dickins * update_mmu_cache() is called between pte_offset_map_lock()
350*17b25a38SHugh Dickins * and pte_unmap_unlock(), so we can assume that ptep is not
351*17b25a38SHugh Dickins * NULL here: and what should be done below if it were NULL?
352*17b25a38SHugh Dickins */
3531da177e4SLinus Torvalds
35434adb28dSRalf Baechle #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
355c5b36783SSteven J. Hill #ifdef CONFIG_XPA
356c5b36783SSteven J. Hill write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
3574b6f99d3SJames Hogan if (cpu_has_xpa)
358c5b36783SSteven J. Hill writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
359c5b36783SSteven J. Hill ptep++;
360c5b36783SSteven J. Hill write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
3614b6f99d3SJames Hogan if (cpu_has_xpa)
362c5b36783SSteven J. Hill writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
363c5b36783SSteven J. Hill #else
3641da177e4SLinus Torvalds write_c0_entrylo0(ptep->pte_high);
3651da177e4SLinus Torvalds ptep++;
3661da177e4SLinus Torvalds write_c0_entrylo1(ptep->pte_high);
367c5b36783SSteven J. Hill #endif
3681da177e4SLinus Torvalds #else
3696dd9344cSDavid Daney write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
3706dd9344cSDavid Daney write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
3711da177e4SLinus Torvalds #endif
3721da177e4SLinus Torvalds mtc0_tlbw_hazard();
3731da177e4SLinus Torvalds if (idx < 0)
3741da177e4SLinus Torvalds tlb_write_random();
3751da177e4SLinus Torvalds else
3761da177e4SLinus Torvalds tlb_write_indexed();
377fd062c84SDavid Daney }
3781da177e4SLinus Torvalds tlbw_use_hazard();
3796a8dff6aSMarkos Chandras htw_start();
38006e4814eSHuacai Chen flush_micro_tlb_vm(vma);
381*17b25a38SHugh Dickins
382*17b25a38SHugh Dickins if (ptemap)
383*17b25a38SHugh Dickins pte_unmap(ptemap);
384b633648cSRalf Baechle local_irq_restore(flags);
3851da177e4SLinus Torvalds }
3861da177e4SLinus Torvalds
add_wired_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)387694b8c35SManuel Lauss void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
3881da177e4SLinus Torvalds unsigned long entryhi, unsigned long pagemask)
3891da177e4SLinus Torvalds {
390c5b36783SSteven J. Hill #ifdef CONFIG_XPA
391c5b36783SSteven J. Hill panic("Broken for XPA kernels");
392c5b36783SSteven J. Hill #else
3933f649ab7SKees Cook unsigned int old_mmid;
3941da177e4SLinus Torvalds unsigned long flags;
3951da177e4SLinus Torvalds unsigned long wired;
3961da177e4SLinus Torvalds unsigned long old_pagemask;
3971da177e4SLinus Torvalds unsigned long old_ctx;
3981da177e4SLinus Torvalds
399b633648cSRalf Baechle local_irq_save(flags);
400c8790d65SPaul Burton if (cpu_has_mmid) {
401c8790d65SPaul Burton old_mmid = read_c0_memorymapid();
402c8790d65SPaul Burton write_c0_memorymapid(MMID_KERNEL_WIRED);
403c8790d65SPaul Burton }
4041da177e4SLinus Torvalds /* Save old context and create impossible VPN2 value */
4051da177e4SLinus Torvalds old_ctx = read_c0_entryhi();
406f1014d1bSMarkos Chandras htw_stop();
4071da177e4SLinus Torvalds old_pagemask = read_c0_pagemask();
40810313980SPaul Burton wired = num_wired_entries();
4091da177e4SLinus Torvalds write_c0_wired(wired + 1);
4101da177e4SLinus Torvalds write_c0_index(wired);
411432bef2aSRalf Baechle tlbw_use_hazard(); /* What is the hazard here? */
4121da177e4SLinus Torvalds write_c0_pagemask(pagemask);
4131da177e4SLinus Torvalds write_c0_entryhi(entryhi);
4141da177e4SLinus Torvalds write_c0_entrylo0(entrylo0);
4151da177e4SLinus Torvalds write_c0_entrylo1(entrylo1);
4161da177e4SLinus Torvalds mtc0_tlbw_hazard();
4171da177e4SLinus Torvalds tlb_write_indexed();
4181da177e4SLinus Torvalds tlbw_use_hazard();
4191da177e4SLinus Torvalds
4201da177e4SLinus Torvalds write_c0_entryhi(old_ctx);
421c8790d65SPaul Burton if (cpu_has_mmid)
422c8790d65SPaul Burton write_c0_memorymapid(old_mmid);
423432bef2aSRalf Baechle tlbw_use_hazard(); /* What is the hazard here? */
424f1014d1bSMarkos Chandras htw_start();
4251da177e4SLinus Torvalds write_c0_pagemask(old_pagemask);
4261da177e4SLinus Torvalds local_flush_tlb_all();
427b633648cSRalf Baechle local_irq_restore(flags);
428c5b36783SSteven J. Hill #endif
4291da177e4SLinus Torvalds }
4301da177e4SLinus Torvalds
431970d032fSRalf Baechle #ifdef CONFIG_TRANSPARENT_HUGEPAGE
432970d032fSRalf Baechle
has_transparent_hugepage(void)433fd8cfd30SHugh Dickins int has_transparent_hugepage(void)
434970d032fSRalf Baechle {
435fd8cfd30SHugh Dickins static unsigned int mask = -1;
436fd8cfd30SHugh Dickins
437fd8cfd30SHugh Dickins if (mask == -1) { /* first call comes during __init */
438970d032fSRalf Baechle unsigned long flags;
439970d032fSRalf Baechle
440b633648cSRalf Baechle local_irq_save(flags);
441970d032fSRalf Baechle write_c0_pagemask(PM_HUGE_MASK);
442970d032fSRalf Baechle back_to_back_c0_hazard();
443970d032fSRalf Baechle mask = read_c0_pagemask();
444970d032fSRalf Baechle write_c0_pagemask(PM_DEFAULT_MASK);
445b633648cSRalf Baechle local_irq_restore(flags);
446fd8cfd30SHugh Dickins }
447970d032fSRalf Baechle return mask == PM_HUGE_MASK;
448970d032fSRalf Baechle }
44931b4d8e1SRandy Dunlap EXPORT_SYMBOL(has_transparent_hugepage);
450970d032fSRalf Baechle
451970d032fSRalf Baechle #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
452970d032fSRalf Baechle
453d377732cSRafał Miłecki /*
454d377732cSRafał Miłecki * Used for loading TLB entries before trap_init() has started, when we
455d377732cSRafał Miłecki * don't actually want to add a wired entry which remains throughout the
456d377732cSRafał Miłecki * lifetime of the system
457d377732cSRafał Miłecki */
458d377732cSRafał Miłecki
459b1f7e112SPaul Gortmaker int temp_tlb_entry;
460d377732cSRafał Miłecki
add_temporary_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)461d377732cSRafał Miłecki __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
462d377732cSRafał Miłecki unsigned long entryhi, unsigned long pagemask)
463d377732cSRafał Miłecki {
464d377732cSRafał Miłecki int ret = 0;
465d377732cSRafał Miłecki unsigned long flags;
466d377732cSRafał Miłecki unsigned long wired;
467d377732cSRafał Miłecki unsigned long old_pagemask;
468d377732cSRafał Miłecki unsigned long old_ctx;
469d377732cSRafał Miłecki
470d377732cSRafał Miłecki local_irq_save(flags);
471d377732cSRafał Miłecki /* Save old context and create impossible VPN2 value */
4726a8dff6aSMarkos Chandras htw_stop();
473d377732cSRafał Miłecki old_ctx = read_c0_entryhi();
474d377732cSRafał Miłecki old_pagemask = read_c0_pagemask();
47510313980SPaul Burton wired = num_wired_entries();
476d377732cSRafał Miłecki if (--temp_tlb_entry < wired) {
477d377732cSRafał Miłecki printk(KERN_WARNING
478d377732cSRafał Miłecki "No TLB space left for add_temporary_entry\n");
479d377732cSRafał Miłecki ret = -ENOSPC;
480d377732cSRafał Miłecki goto out;
481d377732cSRafał Miłecki }
482d377732cSRafał Miłecki
483d377732cSRafał Miłecki write_c0_index(temp_tlb_entry);
484d377732cSRafał Miłecki write_c0_pagemask(pagemask);
485d377732cSRafał Miłecki write_c0_entryhi(entryhi);
486d377732cSRafał Miłecki write_c0_entrylo0(entrylo0);
487d377732cSRafał Miłecki write_c0_entrylo1(entrylo1);
488d377732cSRafał Miłecki mtc0_tlbw_hazard();
489d377732cSRafał Miłecki tlb_write_indexed();
490d377732cSRafał Miłecki tlbw_use_hazard();
491d377732cSRafał Miłecki
492d377732cSRafał Miłecki write_c0_entryhi(old_ctx);
493d377732cSRafał Miłecki write_c0_pagemask(old_pagemask);
4946a8dff6aSMarkos Chandras htw_start();
495d377732cSRafał Miłecki out:
496d377732cSRafał Miłecki local_irq_restore(flags);
497d377732cSRafał Miłecki return ret;
498d377732cSRafał Miłecki }
499d377732cSRafał Miłecki
500078a55fcSPaul Gortmaker static int ntlb;
set_ntlb(char * str)50141c594abSRalf Baechle static int __init set_ntlb(char *str)
50241c594abSRalf Baechle {
50341c594abSRalf Baechle get_option(&str, &ntlb);
50441c594abSRalf Baechle return 1;
50541c594abSRalf Baechle }
50641c594abSRalf Baechle
50741c594abSRalf Baechle __setup("ntlb=", set_ntlb);
50841c594abSRalf Baechle
509eaa38d63SJames Hogan /*
510eaa38d63SJames Hogan * Configure TLB (for init or after a CPU has been powered off).
511eaa38d63SJames Hogan */
r4k_tlb_configure(void)512eaa38d63SJames Hogan static void r4k_tlb_configure(void)
5131da177e4SLinus Torvalds {
5141da177e4SLinus Torvalds /*
5151da177e4SLinus Torvalds * You should never change this register:
5161da177e4SLinus Torvalds * - On R4600 1.7 the tlbp never hits for pages smaller than
5171da177e4SLinus Torvalds * the value in the c0_pagemask register.
5181da177e4SLinus Torvalds * - The entire mm handling assumes the c0_pagemask register to
519a7c2996eSThiemo Seufer * be set to fixed-size pages.
5201da177e4SLinus Torvalds */
5211da177e4SLinus Torvalds write_c0_pagemask(PM_DEFAULT_MASK);
522091bc3a4SPaul Burton back_to_back_c0_hazard();
523091bc3a4SPaul Burton if (read_c0_pagemask() != PM_DEFAULT_MASK)
524091bc3a4SPaul Burton panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
525091bc3a4SPaul Burton
5261da177e4SLinus Torvalds write_c0_wired(0);
527cde15b59SRalf Baechle if (current_cpu_type() == CPU_R10000 ||
528cde15b59SRalf Baechle current_cpu_type() == CPU_R12000 ||
52930577391SJoshua Kinard current_cpu_type() == CPU_R14000 ||
53030577391SJoshua Kinard current_cpu_type() == CPU_R16000)
531c6281edbSThiemo Seufer write_c0_framemask(0);
5326dd9344cSDavid Daney
53305857c64SSteven J. Hill if (cpu_has_rixi) {
5346dd9344cSDavid Daney /*
535e05cb568SJames Hogan * Enable the no read, no exec bits, and enable large physical
5366dd9344cSDavid Daney * address.
5376dd9344cSDavid Daney */
5386dd9344cSDavid Daney #ifdef CONFIG_64BIT
539a5770df0SSteven J. Hill set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
540a5770df0SSteven J. Hill #else
541a5770df0SSteven J. Hill set_c0_pagegrain(PG_RIE | PG_XIE);
5426dd9344cSDavid Daney #endif
5436dd9344cSDavid Daney }
5446dd9344cSDavid Daney
545d377732cSRafał Miłecki temp_tlb_entry = current_cpu_data.tlbsize - 1;
546d377732cSRafał Miłecki
547c6281edbSThiemo Seufer /* From this point on the ARC firmware is dead. */
5481da177e4SLinus Torvalds local_flush_tlb_all();
5491da177e4SLinus Torvalds
550c6281edbSThiemo Seufer /* Did I tell you that ARC SUCKS? */
551eaa38d63SJames Hogan }
552eaa38d63SJames Hogan
tlb_init(void)553eaa38d63SJames Hogan void tlb_init(void)
554eaa38d63SJames Hogan {
555eaa38d63SJames Hogan r4k_tlb_configure();
556c6281edbSThiemo Seufer
55741c594abSRalf Baechle if (ntlb) {
55841c594abSRalf Baechle if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
55941c594abSRalf Baechle int wired = current_cpu_data.tlbsize - ntlb;
56041c594abSRalf Baechle write_c0_wired(wired);
56141c594abSRalf Baechle write_c0_index(wired-1);
56241c594abSRalf Baechle printk("Restricting TLB to %d entries\n", ntlb);
56341c594abSRalf Baechle } else
56441c594abSRalf Baechle printk("Ignoring invalid argument ntlb=%d\n", ntlb);
56541c594abSRalf Baechle }
56641c594abSRalf Baechle
5671da177e4SLinus Torvalds build_tlb_refill_handler();
5681da177e4SLinus Torvalds }
569eaa38d63SJames Hogan
r4k_tlb_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)570eaa38d63SJames Hogan static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
571eaa38d63SJames Hogan void *v)
572eaa38d63SJames Hogan {
573eaa38d63SJames Hogan switch (cmd) {
574eaa38d63SJames Hogan case CPU_PM_ENTER_FAILED:
575eaa38d63SJames Hogan case CPU_PM_EXIT:
576eaa38d63SJames Hogan r4k_tlb_configure();
577eaa38d63SJames Hogan break;
578eaa38d63SJames Hogan }
579eaa38d63SJames Hogan
580eaa38d63SJames Hogan return NOTIFY_OK;
581eaa38d63SJames Hogan }
582eaa38d63SJames Hogan
583eaa38d63SJames Hogan static struct notifier_block r4k_tlb_pm_notifier_block = {
584eaa38d63SJames Hogan .notifier_call = r4k_tlb_pm_notifier,
585eaa38d63SJames Hogan };
586eaa38d63SJames Hogan
r4k_tlb_init_pm(void)587eaa38d63SJames Hogan static int __init r4k_tlb_init_pm(void)
588eaa38d63SJames Hogan {
589eaa38d63SJames Hogan return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
590eaa38d63SJames Hogan }
591eaa38d63SJames Hogan arch_initcall(r4k_tlb_init_pm);
592