1 /*
2 * Cache control for MicroBlaze cache memories
3 *
4 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2007-2009 PetaLogix
6 * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details.
11 */
12
13 #include <asm/cacheflush.h>
14 #include <linux/cache.h>
15 #include <asm/cpuinfo.h>
16 #include <asm/pvr.h>
17
__enable_icache_msr(void)18 static inline void __enable_icache_msr(void)
19 {
20 __asm__ __volatile__ (" msrset r0, %0;" \
21 "nop;" \
22 : : "i" (MSR_ICE) : "memory");
23 }
24
__disable_icache_msr(void)25 static inline void __disable_icache_msr(void)
26 {
27 __asm__ __volatile__ (" msrclr r0, %0;" \
28 "nop;" \
29 : : "i" (MSR_ICE) : "memory");
30 }
31
__enable_dcache_msr(void)32 static inline void __enable_dcache_msr(void)
33 {
34 __asm__ __volatile__ (" msrset r0, %0;" \
35 "nop;" \
36 : : "i" (MSR_DCE) : "memory");
37 }
38
__disable_dcache_msr(void)39 static inline void __disable_dcache_msr(void)
40 {
41 __asm__ __volatile__ (" msrclr r0, %0;" \
42 "nop; " \
43 : : "i" (MSR_DCE) : "memory");
44 }
45
__enable_icache_nomsr(void)46 static inline void __enable_icache_nomsr(void)
47 {
48 __asm__ __volatile__ (" mfs r12, rmsr;" \
49 "nop;" \
50 "ori r12, r12, %0;" \
51 "mts rmsr, r12;" \
52 "nop;" \
53 : : "i" (MSR_ICE) : "memory", "r12");
54 }
55
__disable_icache_nomsr(void)56 static inline void __disable_icache_nomsr(void)
57 {
58 __asm__ __volatile__ (" mfs r12, rmsr;" \
59 "nop;" \
60 "andi r12, r12, ~%0;" \
61 "mts rmsr, r12;" \
62 "nop;" \
63 : : "i" (MSR_ICE) : "memory", "r12");
64 }
65
__enable_dcache_nomsr(void)66 static inline void __enable_dcache_nomsr(void)
67 {
68 __asm__ __volatile__ (" mfs r12, rmsr;" \
69 "nop;" \
70 "ori r12, r12, %0;" \
71 "mts rmsr, r12;" \
72 "nop;" \
73 : : "i" (MSR_DCE) : "memory", "r12");
74 }
75
__disable_dcache_nomsr(void)76 static inline void __disable_dcache_nomsr(void)
77 {
78 __asm__ __volatile__ (" mfs r12, rmsr;" \
79 "nop;" \
80 "andi r12, r12, ~%0;" \
81 "mts rmsr, r12;" \
82 "nop;" \
83 : : "i" (MSR_DCE) : "memory", "r12");
84 }
85
86
87 /* Helper macro for computing the limits of cache range loops
88 *
89 * End address can be unaligned which is OK for C implementation.
90 * ASM implementation align it in ASM macros
91 */
92 #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
93 do { \
94 int align = ~(cache_line_length - 1); \
95 if (start < UINT_MAX - cache_size) \
96 end = min(start + cache_size, end); \
97 start &= align; \
98 } while (0)
99
100 /*
101 * Helper macro to loop over the specified cache_size/line_length and
102 * execute 'op' on that cacheline
103 */
104 #define CACHE_ALL_LOOP(cache_size, line_length, op) \
105 do { \
106 unsigned int len = cache_size - line_length; \
107 int step = -line_length; \
108 WARN_ON(step >= 0); \
109 \
110 __asm__ __volatile__ (" 1: " #op " %0, r0;" \
111 "bgtid %0, 1b;" \
112 "addk %0, %0, %1;" \
113 : : "r" (len), "r" (step) \
114 : "memory"); \
115 } while (0)
116
117 /* Used for wdc.flush/clear which can use rB for offset which is not possible
118 * to use for simple wdc or wic.
119 *
120 * start address is cache aligned
121 * end address is not aligned, if end is aligned then I have to subtract
122 * cacheline length because I can't flush/invalidate the next cacheline.
123 * If is not, I align it because I will flush/invalidate whole line.
124 */
125 #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
126 do { \
127 int step = -line_length; \
128 int align = ~(line_length - 1); \
129 int count; \
130 end = ((end & align) == end) ? end - line_length : end & align; \
131 count = end - start; \
132 WARN_ON(count < 0); \
133 \
134 __asm__ __volatile__ (" 1: " #op " %0, %1;" \
135 "bgtid %1, 1b;" \
136 "addk %1, %1, %2;" \
137 : : "r" (start), "r" (count), \
138 "r" (step) : "memory"); \
139 } while (0)
140
141 /* It is used only first parameter for OP - for wic, wdc */
142 #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
143 do { \
144 unsigned int volatile temp = 0; \
145 unsigned int align = ~(line_length - 1); \
146 end = ((end & align) == end) ? end - line_length : end & align; \
147 WARN_ON(end < start); \
148 \
149 __asm__ __volatile__ (" 1: " #op " %1, r0;" \
150 "cmpu %0, %1, %2;" \
151 "bgtid %0, 1b;" \
152 "addk %1, %1, %3;" \
153 : : "r" (temp), "r" (start), "r" (end), \
154 "r" (line_length) : "memory"); \
155 } while (0)
156
157 #define ASM_LOOP
158
__flush_icache_range_msr_irq(unsigned long start,unsigned long end)159 static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
160 {
161 unsigned long flags;
162 #ifndef ASM_LOOP
163 int i;
164 #endif
165 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
166 (unsigned int)start, (unsigned int) end);
167
168 CACHE_LOOP_LIMITS(start, end,
169 cpuinfo.icache_line_length, cpuinfo.icache_size);
170
171 local_irq_save(flags);
172 __disable_icache_msr();
173
174 #ifdef ASM_LOOP
175 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
176 #else
177 for (i = start; i < end; i += cpuinfo.icache_line_length)
178 __asm__ __volatile__ ("wic %0, r0;" \
179 : : "r" (i));
180 #endif
181 __enable_icache_msr();
182 local_irq_restore(flags);
183 }
184
__flush_icache_range_nomsr_irq(unsigned long start,unsigned long end)185 static void __flush_icache_range_nomsr_irq(unsigned long start,
186 unsigned long end)
187 {
188 unsigned long flags;
189 #ifndef ASM_LOOP
190 int i;
191 #endif
192 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
193 (unsigned int)start, (unsigned int) end);
194
195 CACHE_LOOP_LIMITS(start, end,
196 cpuinfo.icache_line_length, cpuinfo.icache_size);
197
198 local_irq_save(flags);
199 __disable_icache_nomsr();
200
201 #ifdef ASM_LOOP
202 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
203 #else
204 for (i = start; i < end; i += cpuinfo.icache_line_length)
205 __asm__ __volatile__ ("wic %0, r0;" \
206 : : "r" (i));
207 #endif
208
209 __enable_icache_nomsr();
210 local_irq_restore(flags);
211 }
212
__flush_icache_range_noirq(unsigned long start,unsigned long end)213 static void __flush_icache_range_noirq(unsigned long start,
214 unsigned long end)
215 {
216 #ifndef ASM_LOOP
217 int i;
218 #endif
219 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
220 (unsigned int)start, (unsigned int) end);
221
222 CACHE_LOOP_LIMITS(start, end,
223 cpuinfo.icache_line_length, cpuinfo.icache_size);
224 #ifdef ASM_LOOP
225 CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
226 #else
227 for (i = start; i < end; i += cpuinfo.icache_line_length)
228 __asm__ __volatile__ ("wic %0, r0;" \
229 : : "r" (i));
230 #endif
231 }
232
__flush_icache_all_msr_irq(void)233 static void __flush_icache_all_msr_irq(void)
234 {
235 unsigned long flags;
236 #ifndef ASM_LOOP
237 int i;
238 #endif
239 pr_debug("%s\n", __func__);
240
241 local_irq_save(flags);
242 __disable_icache_msr();
243 #ifdef ASM_LOOP
244 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
245 #else
246 for (i = 0; i < cpuinfo.icache_size;
247 i += cpuinfo.icache_line_length)
248 __asm__ __volatile__ ("wic %0, r0;" \
249 : : "r" (i));
250 #endif
251 __enable_icache_msr();
252 local_irq_restore(flags);
253 }
254
__flush_icache_all_nomsr_irq(void)255 static void __flush_icache_all_nomsr_irq(void)
256 {
257 unsigned long flags;
258 #ifndef ASM_LOOP
259 int i;
260 #endif
261 pr_debug("%s\n", __func__);
262
263 local_irq_save(flags);
264 __disable_icache_nomsr();
265 #ifdef ASM_LOOP
266 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
267 #else
268 for (i = 0; i < cpuinfo.icache_size;
269 i += cpuinfo.icache_line_length)
270 __asm__ __volatile__ ("wic %0, r0;" \
271 : : "r" (i));
272 #endif
273 __enable_icache_nomsr();
274 local_irq_restore(flags);
275 }
276
__flush_icache_all_noirq(void)277 static void __flush_icache_all_noirq(void)
278 {
279 #ifndef ASM_LOOP
280 int i;
281 #endif
282 pr_debug("%s\n", __func__);
283 #ifdef ASM_LOOP
284 CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
285 #else
286 for (i = 0; i < cpuinfo.icache_size;
287 i += cpuinfo.icache_line_length)
288 __asm__ __volatile__ ("wic %0, r0;" \
289 : : "r" (i));
290 #endif
291 }
292
__invalidate_dcache_all_msr_irq(void)293 static void __invalidate_dcache_all_msr_irq(void)
294 {
295 unsigned long flags;
296 #ifndef ASM_LOOP
297 int i;
298 #endif
299 pr_debug("%s\n", __func__);
300
301 local_irq_save(flags);
302 __disable_dcache_msr();
303 #ifdef ASM_LOOP
304 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
305 #else
306 for (i = 0; i < cpuinfo.dcache_size;
307 i += cpuinfo.dcache_line_length)
308 __asm__ __volatile__ ("wdc %0, r0;" \
309 : : "r" (i));
310 #endif
311 __enable_dcache_msr();
312 local_irq_restore(flags);
313 }
314
__invalidate_dcache_all_nomsr_irq(void)315 static void __invalidate_dcache_all_nomsr_irq(void)
316 {
317 unsigned long flags;
318 #ifndef ASM_LOOP
319 int i;
320 #endif
321 pr_debug("%s\n", __func__);
322
323 local_irq_save(flags);
324 __disable_dcache_nomsr();
325 #ifdef ASM_LOOP
326 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
327 #else
328 for (i = 0; i < cpuinfo.dcache_size;
329 i += cpuinfo.dcache_line_length)
330 __asm__ __volatile__ ("wdc %0, r0;" \
331 : : "r" (i));
332 #endif
333 __enable_dcache_nomsr();
334 local_irq_restore(flags);
335 }
336
__invalidate_dcache_all_noirq_wt(void)337 static void __invalidate_dcache_all_noirq_wt(void)
338 {
339 #ifndef ASM_LOOP
340 int i;
341 #endif
342 pr_debug("%s\n", __func__);
343 #ifdef ASM_LOOP
344 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
345 #else
346 for (i = 0; i < cpuinfo.dcache_size;
347 i += cpuinfo.dcache_line_length)
348 __asm__ __volatile__ ("wdc %0, r0;" \
349 : : "r" (i));
350 #endif
351 }
352
353 /*
354 * FIXME It is blindly invalidation as is expected
355 * but can't be called on noMMU in microblaze_cache_init below
356 *
357 * MS: noMMU kernel won't boot if simple wdc is used
358 * The reason should be that there are discared data which kernel needs
359 */
__invalidate_dcache_all_wb(void)360 static void __invalidate_dcache_all_wb(void)
361 {
362 #ifndef ASM_LOOP
363 int i;
364 #endif
365 pr_debug("%s\n", __func__);
366 #ifdef ASM_LOOP
367 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
368 wdc);
369 #else
370 for (i = 0; i < cpuinfo.dcache_size;
371 i += cpuinfo.dcache_line_length)
372 __asm__ __volatile__ ("wdc %0, r0;" \
373 : : "r" (i));
374 #endif
375 }
376
__invalidate_dcache_range_wb(unsigned long start,unsigned long end)377 static void __invalidate_dcache_range_wb(unsigned long start,
378 unsigned long end)
379 {
380 #ifndef ASM_LOOP
381 int i;
382 #endif
383 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
384 (unsigned int)start, (unsigned int) end);
385
386 CACHE_LOOP_LIMITS(start, end,
387 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
388 #ifdef ASM_LOOP
389 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
390 #else
391 for (i = start; i < end; i += cpuinfo.dcache_line_length)
392 __asm__ __volatile__ ("wdc.clear %0, r0;" \
393 : : "r" (i));
394 #endif
395 }
396
__invalidate_dcache_range_nomsr_wt(unsigned long start,unsigned long end)397 static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
398 unsigned long end)
399 {
400 #ifndef ASM_LOOP
401 int i;
402 #endif
403 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
404 (unsigned int)start, (unsigned int) end);
405 CACHE_LOOP_LIMITS(start, end,
406 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
407
408 #ifdef ASM_LOOP
409 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
410 #else
411 for (i = start; i < end; i += cpuinfo.dcache_line_length)
412 __asm__ __volatile__ ("wdc %0, r0;" \
413 : : "r" (i));
414 #endif
415 }
416
__invalidate_dcache_range_msr_irq_wt(unsigned long start,unsigned long end)417 static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
418 unsigned long end)
419 {
420 unsigned long flags;
421 #ifndef ASM_LOOP
422 int i;
423 #endif
424 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
425 (unsigned int)start, (unsigned int) end);
426 CACHE_LOOP_LIMITS(start, end,
427 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
428
429 local_irq_save(flags);
430 __disable_dcache_msr();
431
432 #ifdef ASM_LOOP
433 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
434 #else
435 for (i = start; i < end; i += cpuinfo.dcache_line_length)
436 __asm__ __volatile__ ("wdc %0, r0;" \
437 : : "r" (i));
438 #endif
439
440 __enable_dcache_msr();
441 local_irq_restore(flags);
442 }
443
__invalidate_dcache_range_nomsr_irq(unsigned long start,unsigned long end)444 static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
445 unsigned long end)
446 {
447 unsigned long flags;
448 #ifndef ASM_LOOP
449 int i;
450 #endif
451 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
452 (unsigned int)start, (unsigned int) end);
453
454 CACHE_LOOP_LIMITS(start, end,
455 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
456
457 local_irq_save(flags);
458 __disable_dcache_nomsr();
459
460 #ifdef ASM_LOOP
461 CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
462 #else
463 for (i = start; i < end; i += cpuinfo.dcache_line_length)
464 __asm__ __volatile__ ("wdc %0, r0;" \
465 : : "r" (i));
466 #endif
467
468 __enable_dcache_nomsr();
469 local_irq_restore(flags);
470 }
471
__flush_dcache_all_wb(void)472 static void __flush_dcache_all_wb(void)
473 {
474 #ifndef ASM_LOOP
475 int i;
476 #endif
477 pr_debug("%s\n", __func__);
478 #ifdef ASM_LOOP
479 CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
480 wdc.flush);
481 #else
482 for (i = 0; i < cpuinfo.dcache_size;
483 i += cpuinfo.dcache_line_length)
484 __asm__ __volatile__ ("wdc.flush %0, r0;" \
485 : : "r" (i));
486 #endif
487 }
488
__flush_dcache_range_wb(unsigned long start,unsigned long end)489 static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
490 {
491 #ifndef ASM_LOOP
492 int i;
493 #endif
494 pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
495 (unsigned int)start, (unsigned int) end);
496
497 CACHE_LOOP_LIMITS(start, end,
498 cpuinfo.dcache_line_length, cpuinfo.dcache_size);
499 #ifdef ASM_LOOP
500 CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
501 #else
502 for (i = start; i < end; i += cpuinfo.dcache_line_length)
503 __asm__ __volatile__ ("wdc.flush %0, r0;" \
504 : : "r" (i));
505 #endif
506 }
507
508 /* struct for wb caches and for wt caches */
509 struct scache *mbc;
510
511 /* new wb cache model */
512 static const struct scache wb_msr = {
513 .ie = __enable_icache_msr,
514 .id = __disable_icache_msr,
515 .ifl = __flush_icache_all_noirq,
516 .iflr = __flush_icache_range_noirq,
517 .iin = __flush_icache_all_noirq,
518 .iinr = __flush_icache_range_noirq,
519 .de = __enable_dcache_msr,
520 .dd = __disable_dcache_msr,
521 .dfl = __flush_dcache_all_wb,
522 .dflr = __flush_dcache_range_wb,
523 .din = __invalidate_dcache_all_wb,
524 .dinr = __invalidate_dcache_range_wb,
525 };
526
527 /* There is only difference in ie, id, de, dd functions */
528 static const struct scache wb_nomsr = {
529 .ie = __enable_icache_nomsr,
530 .id = __disable_icache_nomsr,
531 .ifl = __flush_icache_all_noirq,
532 .iflr = __flush_icache_range_noirq,
533 .iin = __flush_icache_all_noirq,
534 .iinr = __flush_icache_range_noirq,
535 .de = __enable_dcache_nomsr,
536 .dd = __disable_dcache_nomsr,
537 .dfl = __flush_dcache_all_wb,
538 .dflr = __flush_dcache_range_wb,
539 .din = __invalidate_dcache_all_wb,
540 .dinr = __invalidate_dcache_range_wb,
541 };
542
543 /* Old wt cache model with disabling irq and turn off cache */
544 static const struct scache wt_msr = {
545 .ie = __enable_icache_msr,
546 .id = __disable_icache_msr,
547 .ifl = __flush_icache_all_msr_irq,
548 .iflr = __flush_icache_range_msr_irq,
549 .iin = __flush_icache_all_msr_irq,
550 .iinr = __flush_icache_range_msr_irq,
551 .de = __enable_dcache_msr,
552 .dd = __disable_dcache_msr,
553 .dfl = __invalidate_dcache_all_msr_irq,
554 .dflr = __invalidate_dcache_range_msr_irq_wt,
555 .din = __invalidate_dcache_all_msr_irq,
556 .dinr = __invalidate_dcache_range_msr_irq_wt,
557 };
558
559 static const struct scache wt_nomsr = {
560 .ie = __enable_icache_nomsr,
561 .id = __disable_icache_nomsr,
562 .ifl = __flush_icache_all_nomsr_irq,
563 .iflr = __flush_icache_range_nomsr_irq,
564 .iin = __flush_icache_all_nomsr_irq,
565 .iinr = __flush_icache_range_nomsr_irq,
566 .de = __enable_dcache_nomsr,
567 .dd = __disable_dcache_nomsr,
568 .dfl = __invalidate_dcache_all_nomsr_irq,
569 .dflr = __invalidate_dcache_range_nomsr_irq,
570 .din = __invalidate_dcache_all_nomsr_irq,
571 .dinr = __invalidate_dcache_range_nomsr_irq,
572 };
573
574 /* New wt cache model for newer Microblaze versions */
575 static const struct scache wt_msr_noirq = {
576 .ie = __enable_icache_msr,
577 .id = __disable_icache_msr,
578 .ifl = __flush_icache_all_noirq,
579 .iflr = __flush_icache_range_noirq,
580 .iin = __flush_icache_all_noirq,
581 .iinr = __flush_icache_range_noirq,
582 .de = __enable_dcache_msr,
583 .dd = __disable_dcache_msr,
584 .dfl = __invalidate_dcache_all_noirq_wt,
585 .dflr = __invalidate_dcache_range_nomsr_wt,
586 .din = __invalidate_dcache_all_noirq_wt,
587 .dinr = __invalidate_dcache_range_nomsr_wt,
588 };
589
590 static const struct scache wt_nomsr_noirq = {
591 .ie = __enable_icache_nomsr,
592 .id = __disable_icache_nomsr,
593 .ifl = __flush_icache_all_noirq,
594 .iflr = __flush_icache_range_noirq,
595 .iin = __flush_icache_all_noirq,
596 .iinr = __flush_icache_range_noirq,
597 .de = __enable_dcache_nomsr,
598 .dd = __disable_dcache_nomsr,
599 .dfl = __invalidate_dcache_all_noirq_wt,
600 .dflr = __invalidate_dcache_range_nomsr_wt,
601 .din = __invalidate_dcache_all_noirq_wt,
602 .dinr = __invalidate_dcache_range_nomsr_wt,
603 };
604
605 /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
606 #define CPUVER_7_20_A 0x0c
607 #define CPUVER_7_20_D 0x0f
608
microblaze_cache_init(void)609 void microblaze_cache_init(void)
610 {
611 if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
612 if (cpuinfo.dcache_wb) {
613 pr_info("wb_msr\n");
614 mbc = (struct scache *)&wb_msr;
615 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
616 /* MS: problem with signal handling - hw bug */
617 pr_info("WB won't work properly\n");
618 }
619 } else {
620 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
621 pr_info("wt_msr_noirq\n");
622 mbc = (struct scache *)&wt_msr_noirq;
623 } else {
624 pr_info("wt_msr\n");
625 mbc = (struct scache *)&wt_msr;
626 }
627 }
628 } else {
629 if (cpuinfo.dcache_wb) {
630 pr_info("wb_nomsr\n");
631 mbc = (struct scache *)&wb_nomsr;
632 if (cpuinfo.ver_code <= CPUVER_7_20_D) {
633 /* MS: problem with signal handling - hw bug */
634 pr_info("WB won't work properly\n");
635 }
636 } else {
637 if (cpuinfo.ver_code >= CPUVER_7_20_A) {
638 pr_info("wt_nomsr_noirq\n");
639 mbc = (struct scache *)&wt_nomsr_noirq;
640 } else {
641 pr_info("wt_nomsr\n");
642 mbc = (struct scache *)&wt_nomsr;
643 }
644 }
645 }
646 /*
647 * FIXME Invalidation is done in U-BOOT
648 * WT cache: Data is already written to main memory
649 * WB cache: Discard data on noMMU which caused that kernel doesn't boot
650 */
651 /* invalidate_dcache(); */
652 enable_dcache();
653
654 invalidate_icache();
655 enable_icache();
656 }
657