xref: /openbmc/linux/arch/mips/mm/c-octeon.c (revision 160b8e75)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2005-2007 Cavium Networks
7  */
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/smp.h>
12 #include <linux/mm.h>
13 #include <linux/bitops.h>
14 #include <linux/cpu.h>
15 #include <linux/io.h>
16 
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu-features.h>
21 #include <asm/cpu-type.h>
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/r4kcache.h>
25 #include <asm/traps.h>
26 #include <asm/mmu_context.h>
27 #include <asm/war.h>
28 
29 #include <asm/octeon/octeon.h>
30 
31 unsigned long long cache_err_dcache[NR_CPUS];
32 EXPORT_SYMBOL_GPL(cache_err_dcache);
33 
34 /**
35  * Octeon automatically flushes the dcache on tlb changes, so
36  * from Linux's viewpoint it acts much like a physically
37  * tagged cache. No flushing is needed
38  *
39  */
40 static void octeon_flush_data_cache_page(unsigned long addr)
41 {
42     /* Nothing to do */
43 }
44 
45 static inline void octeon_local_flush_icache(void)
46 {
47 	asm volatile ("synci 0($0)");
48 }
49 
50 /*
51  * Flush local I-cache for the specified range.
52  */
53 static void local_octeon_flush_icache_range(unsigned long start,
54 					    unsigned long end)
55 {
56 	octeon_local_flush_icache();
57 }
58 
59 /**
60  * Flush caches as necessary for all cores affected by a
61  * vma. If no vma is supplied, all cores are flushed.
62  *
63  * @vma:    VMA to flush or NULL to flush all icaches.
64  */
65 static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
66 {
67 	extern void octeon_send_ipi_single(int cpu, unsigned int action);
68 #ifdef CONFIG_SMP
69 	int cpu;
70 	cpumask_t mask;
71 #endif
72 
73 	mb();
74 	octeon_local_flush_icache();
75 #ifdef CONFIG_SMP
76 	preempt_disable();
77 	cpu = smp_processor_id();
78 
79 	/*
80 	 * If we have a vma structure, we only need to worry about
81 	 * cores it has been used on
82 	 */
83 	if (vma)
84 		mask = *mm_cpumask(vma->vm_mm);
85 	else
86 		mask = *cpu_online_mask;
87 	cpumask_clear_cpu(cpu, &mask);
88 	for_each_cpu(cpu, &mask)
89 		octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
90 
91 	preempt_enable();
92 #endif
93 }
94 
95 
96 /**
97  * Called to flush the icache on all cores
98  */
99 static void octeon_flush_icache_all(void)
100 {
101 	octeon_flush_icache_all_cores(NULL);
102 }
103 
104 
105 /**
106  * Called to flush all memory associated with a memory
107  * context.
108  *
109  * @mm:	    Memory context to flush
110  */
111 static void octeon_flush_cache_mm(struct mm_struct *mm)
112 {
113 	/*
114 	 * According to the R4K version of this file, CPUs without
115 	 * dcache aliases don't need to do anything here
116 	 */
117 }
118 
119 
120 /**
121  * Flush a range of kernel addresses out of the icache
122  *
123  */
124 static void octeon_flush_icache_range(unsigned long start, unsigned long end)
125 {
126 	octeon_flush_icache_all_cores(NULL);
127 }
128 
129 
130 /**
131  * Flush the icache for a trampoline. These are used for interrupt
132  * and exception hooking.
133  *
134  * @addr:   Address to flush
135  */
136 static void octeon_flush_cache_sigtramp(unsigned long addr)
137 {
138 	struct vm_area_struct *vma;
139 
140 	down_read(&current->mm->mmap_sem);
141 	vma = find_vma(current->mm, addr);
142 	octeon_flush_icache_all_cores(vma);
143 	up_read(&current->mm->mmap_sem);
144 }
145 
146 
147 /**
148  * Flush a range out of a vma
149  *
150  * @vma:    VMA to flush
151  * @start:
152  * @end:
153  */
154 static void octeon_flush_cache_range(struct vm_area_struct *vma,
155 				     unsigned long start, unsigned long end)
156 {
157 	if (vma->vm_flags & VM_EXEC)
158 		octeon_flush_icache_all_cores(vma);
159 }
160 
161 
162 /**
163  * Flush a specific page of a vma
164  *
165  * @vma:    VMA to flush page for
166  * @page:   Page to flush
167  * @pfn:
168  */
169 static void octeon_flush_cache_page(struct vm_area_struct *vma,
170 				    unsigned long page, unsigned long pfn)
171 {
172 	if (vma->vm_flags & VM_EXEC)
173 		octeon_flush_icache_all_cores(vma);
174 }
175 
176 static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
177 {
178 	BUG();
179 }
180 
181 /**
182  * Probe Octeon's caches
183  *
184  */
185 static void probe_octeon(void)
186 {
187 	unsigned long icache_size;
188 	unsigned long dcache_size;
189 	unsigned int config1;
190 	struct cpuinfo_mips *c = &current_cpu_data;
191 	int cputype = current_cpu_type();
192 
193 	config1 = read_c0_config1();
194 	switch (cputype) {
195 	case CPU_CAVIUM_OCTEON:
196 	case CPU_CAVIUM_OCTEON_PLUS:
197 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
198 		c->icache.sets = 64 << ((config1 >> 22) & 7);
199 		c->icache.ways = 1 + ((config1 >> 16) & 7);
200 		c->icache.flags |= MIPS_CACHE_VTAG;
201 		icache_size =
202 			c->icache.sets * c->icache.ways * c->icache.linesz;
203 		c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
204 		c->dcache.linesz = 128;
205 		if (cputype == CPU_CAVIUM_OCTEON_PLUS)
206 			c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
207 		else
208 			c->dcache.sets = 1; /* CN3XXX has one Dcache set */
209 		c->dcache.ways = 64;
210 		dcache_size =
211 			c->dcache.sets * c->dcache.ways * c->dcache.linesz;
212 		c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
213 		c->options |= MIPS_CPU_PREFETCH;
214 		break;
215 
216 	case CPU_CAVIUM_OCTEON2:
217 		c->icache.linesz = 2 << ((config1 >> 19) & 7);
218 		c->icache.sets = 8;
219 		c->icache.ways = 37;
220 		c->icache.flags |= MIPS_CACHE_VTAG;
221 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
222 
223 		c->dcache.linesz = 128;
224 		c->dcache.ways = 32;
225 		c->dcache.sets = 8;
226 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
227 		c->options |= MIPS_CPU_PREFETCH;
228 		break;
229 
230 	case CPU_CAVIUM_OCTEON3:
231 		c->icache.linesz = 128;
232 		c->icache.sets = 16;
233 		c->icache.ways = 39;
234 		c->icache.flags |= MIPS_CACHE_VTAG;
235 		icache_size = c->icache.sets * c->icache.ways * c->icache.linesz;
236 
237 		c->dcache.linesz = 128;
238 		c->dcache.ways = 32;
239 		c->dcache.sets = 8;
240 		dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz;
241 		c->options |= MIPS_CPU_PREFETCH;
242 		break;
243 
244 	default:
245 		panic("Unsupported Cavium Networks CPU type");
246 		break;
247 	}
248 
249 	/* compute a couple of other cache variables */
250 	c->icache.waysize = icache_size / c->icache.ways;
251 	c->dcache.waysize = dcache_size / c->dcache.ways;
252 
253 	c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
254 	c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
255 
256 	if (smp_processor_id() == 0) {
257 		pr_notice("Primary instruction cache %ldkB, %s, %d way, "
258 			  "%d sets, linesize %d bytes.\n",
259 			  icache_size >> 10,
260 			  cpu_has_vtag_icache ?
261 				"virtually tagged" : "physically tagged",
262 			  c->icache.ways, c->icache.sets, c->icache.linesz);
263 
264 		pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
265 			  "linesize %d bytes.\n",
266 			  dcache_size >> 10, c->dcache.ways,
267 			  c->dcache.sets, c->dcache.linesz);
268 	}
269 }
270 
271 static void  octeon_cache_error_setup(void)
272 {
273 	extern char except_vec2_octeon;
274 	set_handler(0x100, &except_vec2_octeon, 0x80);
275 }
276 
277 /**
278  * Setup the Octeon cache flush routines
279  *
280  */
281 void octeon_cache_init(void)
282 {
283 	probe_octeon();
284 
285 	shm_align_mask = PAGE_SIZE - 1;
286 
287 	flush_cache_all			= octeon_flush_icache_all;
288 	__flush_cache_all		= octeon_flush_icache_all;
289 	flush_cache_mm			= octeon_flush_cache_mm;
290 	flush_cache_page		= octeon_flush_cache_page;
291 	flush_cache_range		= octeon_flush_cache_range;
292 	flush_cache_sigtramp		= octeon_flush_cache_sigtramp;
293 	flush_icache_all		= octeon_flush_icache_all;
294 	flush_data_cache_page		= octeon_flush_data_cache_page;
295 	flush_icache_range		= octeon_flush_icache_range;
296 	local_flush_icache_range	= local_octeon_flush_icache_range;
297 	__flush_icache_user_range	= octeon_flush_icache_range;
298 	__local_flush_icache_user_range	= local_octeon_flush_icache_range;
299 
300 	__flush_kernel_vmap_range	= octeon_flush_kernel_vmap_range;
301 
302 	build_clear_page();
303 	build_copy_page();
304 
305 	board_cache_error_setup = octeon_cache_error_setup;
306 }
307 
308 /*
309  * Handle a cache error exception
310  */
311 static RAW_NOTIFIER_HEAD(co_cache_error_chain);
312 
313 int register_co_cache_error_notifier(struct notifier_block *nb)
314 {
315 	return raw_notifier_chain_register(&co_cache_error_chain, nb);
316 }
317 EXPORT_SYMBOL_GPL(register_co_cache_error_notifier);
318 
319 int unregister_co_cache_error_notifier(struct notifier_block *nb)
320 {
321 	return raw_notifier_chain_unregister(&co_cache_error_chain, nb);
322 }
323 EXPORT_SYMBOL_GPL(unregister_co_cache_error_notifier);
324 
325 static void co_cache_error_call_notifiers(unsigned long val)
326 {
327 	int rv = raw_notifier_call_chain(&co_cache_error_chain, val, NULL);
328 	if ((rv & ~NOTIFY_STOP_MASK) != NOTIFY_OK) {
329 		u64 dcache_err;
330 		unsigned long coreid = cvmx_get_core_num();
331 		u64 icache_err = read_octeon_c0_icacheerr();
332 
333 		if (val) {
334 			dcache_err = cache_err_dcache[coreid];
335 			cache_err_dcache[coreid] = 0;
336 		} else {
337 			dcache_err = read_octeon_c0_dcacheerr();
338 		}
339 
340 		pr_err("Core%lu: Cache error exception:\n", coreid);
341 		pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
342 		if (icache_err & 1) {
343 			pr_err("CacheErr (Icache) == %llx\n",
344 			       (unsigned long long)icache_err);
345 			write_octeon_c0_icacheerr(0);
346 		}
347 		if (dcache_err & 1) {
348 			pr_err("CacheErr (Dcache) == %llx\n",
349 			       (unsigned long long)dcache_err);
350 		}
351 	}
352 }
353 
354 /*
355  * Called when the the exception is recoverable
356  */
357 
358 asmlinkage void cache_parity_error_octeon_recoverable(void)
359 {
360 	co_cache_error_call_notifiers(0);
361 }
362 
363 /**
364  * Called when the the exception is not recoverable
365  */
366 
367 asmlinkage void cache_parity_error_octeon_non_recoverable(void)
368 {
369 	co_cache_error_call_notifiers(1);
370 	panic("Can't handle cache error: nested exception");
371 }
372