xref: /openbmc/linux/arch/sh/mm/cache-sh4.c (revision c21b37f6)
1 /*
2  * arch/sh/mm/cache-sh4.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2001 - 2006  Paul Mundt
6  * Copyright (C) 2003  Richard Curnow
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/io.h>
15 #include <linux/mutex.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18 
19 /*
20  * The maximum number of pages we support up to when doing ranged dcache
21  * flushing. Anything exceeding this will simply flush the dcache in its
22  * entirety.
23  */
24 #define MAX_DCACHE_PAGES	64	/* XXX: Tune for ways */
25 
26 static void __flush_dcache_segment_1way(unsigned long start,
27 					unsigned long extent);
28 static void __flush_dcache_segment_2way(unsigned long start,
29 					unsigned long extent);
30 static void __flush_dcache_segment_4way(unsigned long start,
31 					unsigned long extent);
32 
33 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
34 			       unsigned long exec_offset);
35 
36 /*
37  * This is initialised here to ensure that it is not placed in the BSS.  If
38  * that were to happen, note that cache_init gets called before the BSS is
39  * cleared, so this would get nulled out which would be hopeless.
40  */
41 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
42 	(void (*)(unsigned long, unsigned long))0xdeadbeef;
43 
44 static void compute_alias(struct cache_info *c)
45 {
46 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
47 	c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
48 }
49 
50 static void __init emit_cache_params(void)
51 {
52 	printk("PVR=%08x CVR=%08x PRR=%08x\n",
53 		ctrl_inl(CCN_PVR),
54 		ctrl_inl(CCN_CVR),
55 		ctrl_inl(CCN_PRR));
56 	printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 		current_cpu_data.icache.ways,
58 		current_cpu_data.icache.sets,
59 		current_cpu_data.icache.way_incr);
60 	printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 		current_cpu_data.icache.entry_mask,
62 		current_cpu_data.icache.alias_mask,
63 		current_cpu_data.icache.n_aliases);
64 	printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 		current_cpu_data.dcache.ways,
66 		current_cpu_data.dcache.sets,
67 		current_cpu_data.dcache.way_incr);
68 	printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 		current_cpu_data.dcache.entry_mask,
70 		current_cpu_data.dcache.alias_mask,
71 		current_cpu_data.dcache.n_aliases);
72 
73 	if (!__flush_dcache_segment_fn)
74 		panic("unknown number of cache ways\n");
75 }
76 
77 /*
78  * SH-4 has virtually indexed and physically tagged cache.
79  */
80 void __init p3_cache_init(void)
81 {
82 	compute_alias(&current_cpu_data.icache);
83 	compute_alias(&current_cpu_data.dcache);
84 
85 	switch (current_cpu_data.dcache.ways) {
86 	case 1:
87 		__flush_dcache_segment_fn = __flush_dcache_segment_1way;
88 		break;
89 	case 2:
90 		__flush_dcache_segment_fn = __flush_dcache_segment_2way;
91 		break;
92 	case 4:
93 		__flush_dcache_segment_fn = __flush_dcache_segment_4way;
94 		break;
95 	default:
96 		__flush_dcache_segment_fn = NULL;
97 		break;
98 	}
99 
100 	emit_cache_params();
101 }
102 
103 /*
104  * Write back the dirty D-caches, but not invalidate them.
105  *
106  * START: Virtual Address (U0, P1, or P3)
107  * SIZE: Size of the region.
108  */
109 void __flush_wback_region(void *start, int size)
110 {
111 	unsigned long v;
112 	unsigned long begin, end;
113 
114 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
115 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
116 		& ~(L1_CACHE_BYTES-1);
117 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
118 		asm volatile("ocbwb	%0"
119 			     : /* no output */
120 			     : "m" (__m(v)));
121 	}
122 }
123 
124 /*
125  * Write back the dirty D-caches and invalidate them.
126  *
127  * START: Virtual Address (U0, P1, or P3)
128  * SIZE: Size of the region.
129  */
130 void __flush_purge_region(void *start, int size)
131 {
132 	unsigned long v;
133 	unsigned long begin, end;
134 
135 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
136 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
137 		& ~(L1_CACHE_BYTES-1);
138 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
139 		asm volatile("ocbp	%0"
140 			     : /* no output */
141 			     : "m" (__m(v)));
142 	}
143 }
144 
145 /*
146  * No write back please
147  */
148 void __flush_invalidate_region(void *start, int size)
149 {
150 	unsigned long v;
151 	unsigned long begin, end;
152 
153 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
154 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
155 		& ~(L1_CACHE_BYTES-1);
156 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
157 		asm volatile("ocbi	%0"
158 			     : /* no output */
159 			     : "m" (__m(v)));
160 	}
161 }
162 
163 /*
164  * Write back the range of D-cache, and purge the I-cache.
165  *
166  * Called from kernel/module.c:sys_init_module and routine for a.out format.
167  */
168 void flush_icache_range(unsigned long start, unsigned long end)
169 {
170 	flush_cache_all();
171 }
172 
173 /*
174  * Write back the D-cache and purge the I-cache for signal trampoline.
175  * .. which happens to be the same behavior as flush_icache_range().
176  * So, we simply flush out a line.
177  */
178 void flush_cache_sigtramp(unsigned long addr)
179 {
180 	unsigned long v, index;
181 	unsigned long flags;
182 	int i;
183 
184 	v = addr & ~(L1_CACHE_BYTES-1);
185 	asm volatile("ocbwb	%0"
186 		     : /* no output */
187 		     : "m" (__m(v)));
188 
189 	index = CACHE_IC_ADDRESS_ARRAY |
190 			(v & current_cpu_data.icache.entry_mask);
191 
192 	local_irq_save(flags);
193 	jump_to_P2();
194 
195 	for (i = 0; i < current_cpu_data.icache.ways;
196 	     i++, index += current_cpu_data.icache.way_incr)
197 		ctrl_outl(0, index);	/* Clear out Valid-bit */
198 
199 	back_to_P1();
200 	wmb();
201 	local_irq_restore(flags);
202 }
203 
204 static inline void flush_cache_4096(unsigned long start,
205 				    unsigned long phys)
206 {
207 	unsigned long flags, exec_offset = 0;
208 
209 	/*
210 	 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
211 	 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
212 	 */
213 	if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
214 	    (start < CACHE_OC_ADDRESS_ARRAY))
215 		exec_offset = 0x20000000;
216 
217 	local_irq_save(flags);
218 	__flush_cache_4096(start | SH_CACHE_ASSOC,
219 			   P1SEGADDR(phys), exec_offset);
220 	local_irq_restore(flags);
221 }
222 
223 /*
224  * Write back & invalidate the D-cache of the page.
225  * (To avoid "alias" issues)
226  */
227 void flush_dcache_page(struct page *page)
228 {
229 	if (test_bit(PG_mapped, &page->flags)) {
230 		unsigned long phys = PHYSADDR(page_address(page));
231 		unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
232 		int i, n;
233 
234 		/* Loop all the D-cache */
235 		n = current_cpu_data.dcache.n_aliases;
236 		for (i = 0; i < n; i++, addr += 4096)
237 			flush_cache_4096(addr, phys);
238 	}
239 
240 	wmb();
241 }
242 
243 /* TODO: Selective icache invalidation through IC address array.. */
244 static inline void flush_icache_all(void)
245 {
246 	unsigned long flags, ccr;
247 
248 	local_irq_save(flags);
249 	jump_to_P2();
250 
251 	/* Flush I-cache */
252 	ccr = ctrl_inl(CCR);
253 	ccr |= CCR_CACHE_ICI;
254 	ctrl_outl(ccr, CCR);
255 
256 	/*
257 	 * back_to_P1() will take care of the barrier for us, don't add
258 	 * another one!
259 	 */
260 
261 	back_to_P1();
262 	local_irq_restore(flags);
263 }
264 
265 void flush_dcache_all(void)
266 {
267 	(*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
268 	wmb();
269 }
270 
271 void flush_cache_all(void)
272 {
273 	flush_dcache_all();
274 	flush_icache_all();
275 }
276 
277 static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
278 			     unsigned long end)
279 {
280 	unsigned long d = 0, p = start & PAGE_MASK;
281 	unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
282 	unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
283 	unsigned long select_bit;
284 	unsigned long all_aliases_mask;
285 	unsigned long addr_offset;
286 	pgd_t *dir;
287 	pmd_t *pmd;
288 	pud_t *pud;
289 	pte_t *pte;
290 	int i;
291 
292 	dir = pgd_offset(mm, p);
293 	pud = pud_offset(dir, p);
294 	pmd = pmd_offset(pud, p);
295 	end = PAGE_ALIGN(end);
296 
297 	all_aliases_mask = (1 << n_aliases) - 1;
298 
299 	do {
300 		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
301 			p &= PMD_MASK;
302 			p += PMD_SIZE;
303 			pmd++;
304 
305 			continue;
306 		}
307 
308 		pte = pte_offset_kernel(pmd, p);
309 
310 		do {
311 			unsigned long phys;
312 			pte_t entry = *pte;
313 
314 			if (!(pte_val(entry) & _PAGE_PRESENT)) {
315 				pte++;
316 				p += PAGE_SIZE;
317 				continue;
318 			}
319 
320 			phys = pte_val(entry) & PTE_PHYS_MASK;
321 
322 			if ((p ^ phys) & alias_mask) {
323 				d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
324 				d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
325 
326 				if (d == all_aliases_mask)
327 					goto loop_exit;
328 			}
329 
330 			pte++;
331 			p += PAGE_SIZE;
332 		} while (p < end && ((unsigned long)pte & ~PAGE_MASK));
333 		pmd++;
334 	} while (p < end);
335 
336 loop_exit:
337 	addr_offset = 0;
338 	select_bit = 1;
339 
340 	for (i = 0; i < n_aliases; i++) {
341 		if (d & select_bit) {
342 			(*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
343 			wmb();
344 		}
345 
346 		select_bit <<= 1;
347 		addr_offset += PAGE_SIZE;
348 	}
349 }
350 
351 /*
352  * Note : (RPC) since the caches are physically tagged, the only point
353  * of flush_cache_mm for SH-4 is to get rid of aliases from the
354  * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
355  * lines can stay resident so long as the virtual address they were
356  * accessed with (hence cache set) is in accord with the physical
357  * address (i.e. tag).  It's no different here.  So I reckon we don't
358  * need to flush the I-cache, since aliases don't matter for that.  We
359  * should try that.
360  *
361  * Caller takes mm->mmap_sem.
362  */
363 void flush_cache_mm(struct mm_struct *mm)
364 {
365 	/*
366 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
367 	 * the cache is physically tagged, the data can just be left in there.
368 	 */
369 	if (current_cpu_data.dcache.n_aliases == 0)
370 		return;
371 
372 	/*
373 	 * Don't bother groveling around the dcache for the VMA ranges
374 	 * if there are too many PTEs to make it worthwhile.
375 	 */
376 	if (mm->nr_ptes >= MAX_DCACHE_PAGES)
377 		flush_dcache_all();
378 	else {
379 		struct vm_area_struct *vma;
380 
381 		/*
382 		 * In this case there are reasonably sized ranges to flush,
383 		 * iterate through the VMA list and take care of any aliases.
384 		 */
385 		for (vma = mm->mmap; vma; vma = vma->vm_next)
386 			__flush_cache_mm(mm, vma->vm_start, vma->vm_end);
387 	}
388 
389 	/* Only touch the icache if one of the VMAs has VM_EXEC set. */
390 	if (mm->exec_vm)
391 		flush_icache_all();
392 }
393 
394 /*
395  * Write back and invalidate I/D-caches for the page.
396  *
397  * ADDR: Virtual Address (U0 address)
398  * PFN: Physical page number
399  */
400 void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
401 		      unsigned long pfn)
402 {
403 	unsigned long phys = pfn << PAGE_SHIFT;
404 	unsigned int alias_mask;
405 
406 	alias_mask = current_cpu_data.dcache.alias_mask;
407 
408 	/* We only need to flush D-cache when we have alias */
409 	if ((address^phys) & alias_mask) {
410 		/* Loop 4K of the D-cache */
411 		flush_cache_4096(
412 			CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
413 			phys);
414 		/* Loop another 4K of the D-cache */
415 		flush_cache_4096(
416 			CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
417 			phys);
418 	}
419 
420 	alias_mask = current_cpu_data.icache.alias_mask;
421 	if (vma->vm_flags & VM_EXEC) {
422 		/*
423 		 * Evict entries from the portion of the cache from which code
424 		 * may have been executed at this address (virtual).  There's
425 		 * no need to evict from the portion corresponding to the
426 		 * physical address as for the D-cache, because we know the
427 		 * kernel has never executed the code through its identity
428 		 * translation.
429 		 */
430 		flush_cache_4096(
431 			CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
432 			phys);
433 	}
434 }
435 
436 /*
437  * Write back and invalidate D-caches.
438  *
439  * START, END: Virtual Address (U0 address)
440  *
441  * NOTE: We need to flush the _physical_ page entry.
442  * Flushing the cache lines for U0 only isn't enough.
443  * We need to flush for P1 too, which may contain aliases.
444  */
445 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
446 		       unsigned long end)
447 {
448 	/*
449 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
450 	 * the cache is physically tagged, the data can just be left in there.
451 	 */
452 	if (current_cpu_data.dcache.n_aliases == 0)
453 		return;
454 
455 	/*
456 	 * Don't bother with the lookup and alias check if we have a
457 	 * wide range to cover, just blow away the dcache in its
458 	 * entirety instead. -- PFM.
459 	 */
460 	if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
461 		flush_dcache_all();
462 	else
463 		__flush_cache_mm(vma->vm_mm, start, end);
464 
465 	if (vma->vm_flags & VM_EXEC) {
466 		/*
467 		 * TODO: Is this required???  Need to look at how I-cache
468 		 * coherency is assured when new programs are loaded to see if
469 		 * this matters.
470 		 */
471 		flush_icache_all();
472 	}
473 }
474 
475 /*
476  * flush_icache_user_range
477  * @vma: VMA of the process
478  * @page: page
479  * @addr: U0 address
480  * @len: length of the range (< page size)
481  */
482 void flush_icache_user_range(struct vm_area_struct *vma,
483 			     struct page *page, unsigned long addr, int len)
484 {
485 	flush_cache_page(vma, addr, page_to_pfn(page));
486 	mb();
487 }
488 
489 /**
490  * __flush_cache_4096
491  *
492  * @addr:  address in memory mapped cache array
493  * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
494  *         set i.e. associative write)
495  * @exec_offset: set to 0x20000000 if flush has to be executed from P2
496  *               region else 0x0
497  *
498  * The offset into the cache array implied by 'addr' selects the
499  * 'colour' of the virtual address range that will be flushed.  The
500  * operation (purge/write-back) is selected by the lower 2 bits of
501  * 'phys'.
502  */
503 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
504 			       unsigned long exec_offset)
505 {
506 	int way_count;
507 	unsigned long base_addr = addr;
508 	struct cache_info *dcache;
509 	unsigned long way_incr;
510 	unsigned long a, ea, p;
511 	unsigned long temp_pc;
512 
513 	dcache = &current_cpu_data.dcache;
514 	/* Write this way for better assembly. */
515 	way_count = dcache->ways;
516 	way_incr = dcache->way_incr;
517 
518 	/*
519 	 * Apply exec_offset (i.e. branch to P2 if required.).
520 	 *
521 	 * FIXME:
522 	 *
523 	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
524 	 *	trashing exec_offset before it's been added on - why?  Hence
525 	 *	"=&r" as a 'workaround'
526 	 */
527 	asm volatile("mov.l 1f, %0\n\t"
528 		     "add   %1, %0\n\t"
529 		     "jmp   @%0\n\t"
530 		     "nop\n\t"
531 		     ".balign 4\n\t"
532 		     "1:  .long 2f\n\t"
533 		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
534 
535 	/*
536 	 * We know there will be >=1 iteration, so write as do-while to avoid
537 	 * pointless nead-of-loop check for 0 iterations.
538 	 */
539 	do {
540 		ea = base_addr + PAGE_SIZE;
541 		a = base_addr;
542 		p = phys;
543 
544 		do {
545 			*(volatile unsigned long *)a = p;
546 			/*
547 			 * Next line: intentionally not p+32, saves an add, p
548 			 * will do since only the cache tag bits need to
549 			 * match.
550 			 */
551 			*(volatile unsigned long *)(a+32) = p;
552 			a += 64;
553 			p += 64;
554 		} while (a < ea);
555 
556 		base_addr += way_incr;
557 	} while (--way_count != 0);
558 }
559 
560 /*
561  * Break the 1, 2 and 4 way variants of this out into separate functions to
562  * avoid nearly all the overhead of having the conditional stuff in the function
563  * bodies (+ the 1 and 2 way cases avoid saving any registers too).
564  */
565 static void __flush_dcache_segment_1way(unsigned long start,
566 					unsigned long extent_per_way)
567 {
568 	unsigned long orig_sr, sr_with_bl;
569 	unsigned long base_addr;
570 	unsigned long way_incr, linesz, way_size;
571 	struct cache_info *dcache;
572 	register unsigned long a0, a0e;
573 
574 	asm volatile("stc sr, %0" : "=r" (orig_sr));
575 	sr_with_bl = orig_sr | (1<<28);
576 	base_addr = ((unsigned long)&empty_zero_page[0]);
577 
578 	/*
579 	 * The previous code aligned base_addr to 16k, i.e. the way_size of all
580 	 * existing SH-4 D-caches.  Whilst I don't see a need to have this
581 	 * aligned to any better than the cache line size (which it will be
582 	 * anyway by construction), let's align it to at least the way_size of
583 	 * any existing or conceivable SH-4 D-cache.  -- RPC
584 	 */
585 	base_addr = ((base_addr >> 16) << 16);
586 	base_addr |= start;
587 
588 	dcache = &current_cpu_data.dcache;
589 	linesz = dcache->linesz;
590 	way_incr = dcache->way_incr;
591 	way_size = dcache->way_size;
592 
593 	a0 = base_addr;
594 	a0e = base_addr + extent_per_way;
595 	do {
596 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
597 		asm volatile("movca.l r0, @%0\n\t"
598 			     "ocbi @%0" : : "r" (a0));
599 		a0 += linesz;
600 		asm volatile("movca.l r0, @%0\n\t"
601 			     "ocbi @%0" : : "r" (a0));
602 		a0 += linesz;
603 		asm volatile("movca.l r0, @%0\n\t"
604 			     "ocbi @%0" : : "r" (a0));
605 		a0 += linesz;
606 		asm volatile("movca.l r0, @%0\n\t"
607 			     "ocbi @%0" : : "r" (a0));
608 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
609 		a0 += linesz;
610 	} while (a0 < a0e);
611 }
612 
613 static void __flush_dcache_segment_2way(unsigned long start,
614 					unsigned long extent_per_way)
615 {
616 	unsigned long orig_sr, sr_with_bl;
617 	unsigned long base_addr;
618 	unsigned long way_incr, linesz, way_size;
619 	struct cache_info *dcache;
620 	register unsigned long a0, a1, a0e;
621 
622 	asm volatile("stc sr, %0" : "=r" (orig_sr));
623 	sr_with_bl = orig_sr | (1<<28);
624 	base_addr = ((unsigned long)&empty_zero_page[0]);
625 
626 	/* See comment under 1-way above */
627 	base_addr = ((base_addr >> 16) << 16);
628 	base_addr |= start;
629 
630 	dcache = &current_cpu_data.dcache;
631 	linesz = dcache->linesz;
632 	way_incr = dcache->way_incr;
633 	way_size = dcache->way_size;
634 
635 	a0 = base_addr;
636 	a1 = a0 + way_incr;
637 	a0e = base_addr + extent_per_way;
638 	do {
639 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
640 		asm volatile("movca.l r0, @%0\n\t"
641 			     "movca.l r0, @%1\n\t"
642 			     "ocbi @%0\n\t"
643 			     "ocbi @%1" : :
644 			     "r" (a0), "r" (a1));
645 		a0 += linesz;
646 		a1 += linesz;
647 		asm volatile("movca.l r0, @%0\n\t"
648 			     "movca.l r0, @%1\n\t"
649 			     "ocbi @%0\n\t"
650 			     "ocbi @%1" : :
651 			     "r" (a0), "r" (a1));
652 		a0 += linesz;
653 		a1 += linesz;
654 		asm volatile("movca.l r0, @%0\n\t"
655 			     "movca.l r0, @%1\n\t"
656 			     "ocbi @%0\n\t"
657 			     "ocbi @%1" : :
658 			     "r" (a0), "r" (a1));
659 		a0 += linesz;
660 		a1 += linesz;
661 		asm volatile("movca.l r0, @%0\n\t"
662 			     "movca.l r0, @%1\n\t"
663 			     "ocbi @%0\n\t"
664 			     "ocbi @%1" : :
665 			     "r" (a0), "r" (a1));
666 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
667 		a0 += linesz;
668 		a1 += linesz;
669 	} while (a0 < a0e);
670 }
671 
672 static void __flush_dcache_segment_4way(unsigned long start,
673 					unsigned long extent_per_way)
674 {
675 	unsigned long orig_sr, sr_with_bl;
676 	unsigned long base_addr;
677 	unsigned long way_incr, linesz, way_size;
678 	struct cache_info *dcache;
679 	register unsigned long a0, a1, a2, a3, a0e;
680 
681 	asm volatile("stc sr, %0" : "=r" (orig_sr));
682 	sr_with_bl = orig_sr | (1<<28);
683 	base_addr = ((unsigned long)&empty_zero_page[0]);
684 
685 	/* See comment under 1-way above */
686 	base_addr = ((base_addr >> 16) << 16);
687 	base_addr |= start;
688 
689 	dcache = &current_cpu_data.dcache;
690 	linesz = dcache->linesz;
691 	way_incr = dcache->way_incr;
692 	way_size = dcache->way_size;
693 
694 	a0 = base_addr;
695 	a1 = a0 + way_incr;
696 	a2 = a1 + way_incr;
697 	a3 = a2 + way_incr;
698 	a0e = base_addr + extent_per_way;
699 	do {
700 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
701 		asm volatile("movca.l r0, @%0\n\t"
702 			     "movca.l r0, @%1\n\t"
703 			     "movca.l r0, @%2\n\t"
704 			     "movca.l r0, @%3\n\t"
705 			     "ocbi @%0\n\t"
706 			     "ocbi @%1\n\t"
707 			     "ocbi @%2\n\t"
708 			     "ocbi @%3\n\t" : :
709 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
710 		a0 += linesz;
711 		a1 += linesz;
712 		a2 += linesz;
713 		a3 += linesz;
714 		asm volatile("movca.l r0, @%0\n\t"
715 			     "movca.l r0, @%1\n\t"
716 			     "movca.l r0, @%2\n\t"
717 			     "movca.l r0, @%3\n\t"
718 			     "ocbi @%0\n\t"
719 			     "ocbi @%1\n\t"
720 			     "ocbi @%2\n\t"
721 			     "ocbi @%3\n\t" : :
722 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
723 		a0 += linesz;
724 		a1 += linesz;
725 		a2 += linesz;
726 		a3 += linesz;
727 		asm volatile("movca.l r0, @%0\n\t"
728 			     "movca.l r0, @%1\n\t"
729 			     "movca.l r0, @%2\n\t"
730 			     "movca.l r0, @%3\n\t"
731 			     "ocbi @%0\n\t"
732 			     "ocbi @%1\n\t"
733 			     "ocbi @%2\n\t"
734 			     "ocbi @%3\n\t" : :
735 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
736 		a0 += linesz;
737 		a1 += linesz;
738 		a2 += linesz;
739 		a3 += linesz;
740 		asm volatile("movca.l r0, @%0\n\t"
741 			     "movca.l r0, @%1\n\t"
742 			     "movca.l r0, @%2\n\t"
743 			     "movca.l r0, @%3\n\t"
744 			     "ocbi @%0\n\t"
745 			     "ocbi @%1\n\t"
746 			     "ocbi @%2\n\t"
747 			     "ocbi @%3\n\t" : :
748 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
749 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
750 		a0 += linesz;
751 		a1 += linesz;
752 		a2 += linesz;
753 		a3 += linesz;
754 	} while (a0 < a0e);
755 }
756