xref: /openbmc/linux/arch/openrisc/mm/tlb.c (revision 07c7c6bf)
1 /*
2  * OpenRISC tlb.c
3  *
4  * Linux architectural port borrowing liberally from similar works of
5  * others.  All original copyrights apply as per the original source
6  * declaration.
7  *
8  * Modifications for the OpenRISC architecture:
9  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10  * Copyright (C) 2010-2011 Julius Baxter <julius.baxter@orsoc.se>
11  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12  *
13  *      This program is free software; you can redistribute it and/or
14  *      modify it under the terms of the GNU General Public License
15  *      as published by the Free Software Foundation; either version
16  *      2 of the License, or (at your option) any later version.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/string.h>
23 #include <linux/types.h>
24 #include <linux/ptrace.h>
25 #include <linux/mman.h>
26 #include <linux/mm.h>
27 #include <linux/init.h>
28 
29 #include <asm/tlbflush.h>
30 #include <asm/pgtable.h>
31 #include <asm/mmu_context.h>
32 #include <asm/spr_defs.h>
33 
34 #define NO_CONTEXT -1
35 
36 #define NUM_DTLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
37 			    SPR_DMMUCFGR_NTS_OFF))
38 #define NUM_ITLB_SETS (1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> \
39 			    SPR_IMMUCFGR_NTS_OFF))
40 #define DTLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_DTLB_SETS-1))
41 #define ITLB_OFFSET(addr) (((addr) >> PAGE_SHIFT) & (NUM_ITLB_SETS-1))
42 /*
43  * Invalidate all TLB entries.
44  *
45  * This comes down to setting the 'valid' bit for all xTLBMR registers to 0.
46  * Easiest way to accomplish this is to just zero out the xTLBMR register
47  * completely.
48  *
49  */
50 
51 void local_flush_tlb_all(void)
52 {
53 	int i;
54 	unsigned long num_tlb_sets;
55 
56 	/* Determine number of sets for IMMU. */
57 	/* FIXME: Assumption is I & D nsets equal. */
58 	num_tlb_sets = NUM_ITLB_SETS;
59 
60 	for (i = 0; i < num_tlb_sets; i++) {
61 		mtspr_off(SPR_DTLBMR_BASE(0), i, 0);
62 		mtspr_off(SPR_ITLBMR_BASE(0), i, 0);
63 	}
64 }
65 
66 #define have_dtlbeir (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_TEIRI)
67 #define have_itlbeir (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_TEIRI)
68 
69 /*
70  * Invalidate a single page.  This is what the xTLBEIR register is for.
71  *
72  * There's no point in checking the vma for PAGE_EXEC to determine whether it's
73  * the data or instruction TLB that should be flushed... that would take more
74  * than the few instructions that the following compiles down to!
75  *
76  * The case where we don't have the xTLBEIR register really only works for
77  * MMU's with a single way and is hard-coded that way.
78  */
79 
80 #define flush_dtlb_page_eir(addr) mtspr(SPR_DTLBEIR, addr)
81 #define flush_dtlb_page_no_eir(addr) \
82 	mtspr_off(SPR_DTLBMR_BASE(0), DTLB_OFFSET(addr), 0);
83 
84 #define flush_itlb_page_eir(addr) mtspr(SPR_ITLBEIR, addr)
85 #define flush_itlb_page_no_eir(addr) \
86 	mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
87 
88 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
89 {
90 	if (have_dtlbeir)
91 		flush_dtlb_page_eir(addr);
92 	else
93 		flush_dtlb_page_no_eir(addr);
94 
95 	if (have_itlbeir)
96 		flush_itlb_page_eir(addr);
97 	else
98 		flush_itlb_page_no_eir(addr);
99 }
100 
101 void local_flush_tlb_range(struct vm_area_struct *vma,
102 			   unsigned long start, unsigned long end)
103 {
104 	int addr;
105 	bool dtlbeir;
106 	bool itlbeir;
107 
108 	dtlbeir = have_dtlbeir;
109 	itlbeir = have_itlbeir;
110 
111 	for (addr = start; addr < end; addr += PAGE_SIZE) {
112 		if (dtlbeir)
113 			flush_dtlb_page_eir(addr);
114 		else
115 			flush_dtlb_page_no_eir(addr);
116 
117 		if (itlbeir)
118 			flush_itlb_page_eir(addr);
119 		else
120 			flush_itlb_page_no_eir(addr);
121 	}
122 }
123 
124 /*
125  * Invalidate the selected mm context only.
126  *
127  * FIXME: Due to some bug here, we're flushing everything for now.
128  * This should be changed to loop over over mm and call flush_tlb_range.
129  */
130 
131 void local_flush_tlb_mm(struct mm_struct *mm)
132 {
133 
134 	/* Was seeing bugs with the mm struct passed to us. Scrapped most of
135 	   this function. */
136 	/* Several architctures do this */
137 	local_flush_tlb_all();
138 }
139 
140 /* called in schedule() just before actually doing the switch_to */
141 
142 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
143 	       struct task_struct *next_tsk)
144 {
145 	/* remember the pgd for the fault handlers
146 	 * this is similar to the pgd register in some other CPU's.
147 	 * we need our own copy of it because current and active_mm
148 	 * might be invalid at points where we still need to derefer
149 	 * the pgd.
150 	 */
151 	current_pgd[smp_processor_id()] = next->pgd;
152 
153 	/* We don't have context support implemented, so flush all
154 	 * entries belonging to previous map
155 	 */
156 
157 	if (prev != next)
158 		local_flush_tlb_mm(prev);
159 
160 }
161 
162 /*
163  * Initialize the context related info for a new mm_struct
164  * instance.
165  */
166 
167 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
168 {
169 	mm->context = NO_CONTEXT;
170 	return 0;
171 }
172 
173 /* called by __exit_mm to destroy the used MMU context if any before
174  * destroying the mm itself. this is only called when the last user of the mm
175  * drops it.
176  */
177 
178 void destroy_context(struct mm_struct *mm)
179 {
180 	flush_tlb_mm(mm);
181 
182 }
183 
184 /* called once during VM initialization, from init.c */
185 
186 void __init tlb_init(void)
187 {
188 	/* Do nothing... */
189 	/* invalidate the entire TLB */
190 	/* flush_tlb_all(); */
191 }
192