xref: /openbmc/linux/arch/sh/mm/cache-sh4.c (revision 64c70b1c)
1 /*
2  * arch/sh/mm/cache-sh4.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2001 - 2006  Paul Mundt
6  * Copyright (C) 2003  Richard Curnow
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/init.h>
13 #include <linux/mm.h>
14 #include <linux/io.h>
15 #include <linux/mutex.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18 
19 /*
20  * The maximum number of pages we support up to when doing ranged dcache
21  * flushing. Anything exceeding this will simply flush the dcache in its
22  * entirety.
23  */
24 #define MAX_DCACHE_PAGES	64	/* XXX: Tune for ways */
25 
26 static void __flush_dcache_segment_1way(unsigned long start,
27 					unsigned long extent);
28 static void __flush_dcache_segment_2way(unsigned long start,
29 					unsigned long extent);
30 static void __flush_dcache_segment_4way(unsigned long start,
31 					unsigned long extent);
32 
33 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
34 			       unsigned long exec_offset);
35 
36 /*
37  * This is initialised here to ensure that it is not placed in the BSS.  If
38  * that were to happen, note that cache_init gets called before the BSS is
39  * cleared, so this would get nulled out which would be hopeless.
40  */
41 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
42 	(void (*)(unsigned long, unsigned long))0xdeadbeef;
43 
44 static void compute_alias(struct cache_info *c)
45 {
46 	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
47 	c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
48 }
49 
50 static void __init emit_cache_params(void)
51 {
52 	printk("PVR=%08x CVR=%08x PRR=%08x\n",
53 		ctrl_inl(CCN_PVR),
54 		ctrl_inl(CCN_CVR),
55 		ctrl_inl(CCN_PRR));
56 	printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
57 		current_cpu_data.icache.ways,
58 		current_cpu_data.icache.sets,
59 		current_cpu_data.icache.way_incr);
60 	printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
61 		current_cpu_data.icache.entry_mask,
62 		current_cpu_data.icache.alias_mask,
63 		current_cpu_data.icache.n_aliases);
64 	printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
65 		current_cpu_data.dcache.ways,
66 		current_cpu_data.dcache.sets,
67 		current_cpu_data.dcache.way_incr);
68 	printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
69 		current_cpu_data.dcache.entry_mask,
70 		current_cpu_data.dcache.alias_mask,
71 		current_cpu_data.dcache.n_aliases);
72 
73 	if (!__flush_dcache_segment_fn)
74 		panic("unknown number of cache ways\n");
75 }
76 
77 /*
78  * SH-4 has virtually indexed and physically tagged cache.
79  */
80 
81 /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
82 #define MAX_P3_MUTEXES 16
83 
84 struct mutex p3map_mutex[MAX_P3_MUTEXES];
85 
86 void __init p3_cache_init(void)
87 {
88 	int i;
89 
90 	compute_alias(&current_cpu_data.icache);
91 	compute_alias(&current_cpu_data.dcache);
92 
93 	switch (current_cpu_data.dcache.ways) {
94 	case 1:
95 		__flush_dcache_segment_fn = __flush_dcache_segment_1way;
96 		break;
97 	case 2:
98 		__flush_dcache_segment_fn = __flush_dcache_segment_2way;
99 		break;
100 	case 4:
101 		__flush_dcache_segment_fn = __flush_dcache_segment_4way;
102 		break;
103 	default:
104 		__flush_dcache_segment_fn = NULL;
105 		break;
106 	}
107 
108 	emit_cache_params();
109 
110 	if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
111 		panic("%s failed.", __FUNCTION__);
112 
113 	for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
114 		mutex_init(&p3map_mutex[i]);
115 }
116 
117 /*
118  * Write back the dirty D-caches, but not invalidate them.
119  *
120  * START: Virtual Address (U0, P1, or P3)
121  * SIZE: Size of the region.
122  */
123 void __flush_wback_region(void *start, int size)
124 {
125 	unsigned long v;
126 	unsigned long begin, end;
127 
128 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
129 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
130 		& ~(L1_CACHE_BYTES-1);
131 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
132 		asm volatile("ocbwb	%0"
133 			     : /* no output */
134 			     : "m" (__m(v)));
135 	}
136 }
137 
138 /*
139  * Write back the dirty D-caches and invalidate them.
140  *
141  * START: Virtual Address (U0, P1, or P3)
142  * SIZE: Size of the region.
143  */
144 void __flush_purge_region(void *start, int size)
145 {
146 	unsigned long v;
147 	unsigned long begin, end;
148 
149 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
150 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
151 		& ~(L1_CACHE_BYTES-1);
152 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
153 		asm volatile("ocbp	%0"
154 			     : /* no output */
155 			     : "m" (__m(v)));
156 	}
157 }
158 
159 /*
160  * No write back please
161  */
162 void __flush_invalidate_region(void *start, int size)
163 {
164 	unsigned long v;
165 	unsigned long begin, end;
166 
167 	begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
168 	end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
169 		& ~(L1_CACHE_BYTES-1);
170 	for (v = begin; v < end; v+=L1_CACHE_BYTES) {
171 		asm volatile("ocbi	%0"
172 			     : /* no output */
173 			     : "m" (__m(v)));
174 	}
175 }
176 
177 /*
178  * Write back the range of D-cache, and purge the I-cache.
179  *
180  * Called from kernel/module.c:sys_init_module and routine for a.out format.
181  */
182 void flush_icache_range(unsigned long start, unsigned long end)
183 {
184 	flush_cache_all();
185 }
186 
187 /*
188  * Write back the D-cache and purge the I-cache for signal trampoline.
189  * .. which happens to be the same behavior as flush_icache_range().
190  * So, we simply flush out a line.
191  */
192 void flush_cache_sigtramp(unsigned long addr)
193 {
194 	unsigned long v, index;
195 	unsigned long flags;
196 	int i;
197 
198 	v = addr & ~(L1_CACHE_BYTES-1);
199 	asm volatile("ocbwb	%0"
200 		     : /* no output */
201 		     : "m" (__m(v)));
202 
203 	index = CACHE_IC_ADDRESS_ARRAY |
204 			(v & current_cpu_data.icache.entry_mask);
205 
206 	local_irq_save(flags);
207 	jump_to_P2();
208 
209 	for (i = 0; i < current_cpu_data.icache.ways;
210 	     i++, index += current_cpu_data.icache.way_incr)
211 		ctrl_outl(0, index);	/* Clear out Valid-bit */
212 
213 	back_to_P1();
214 	wmb();
215 	local_irq_restore(flags);
216 }
217 
218 static inline void flush_cache_4096(unsigned long start,
219 				    unsigned long phys)
220 {
221 	unsigned long flags, exec_offset = 0;
222 
223 	/*
224 	 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
225 	 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
226 	 */
227 	if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
228 	    (start < CACHE_OC_ADDRESS_ARRAY))
229 		exec_offset = 0x20000000;
230 
231 	local_irq_save(flags);
232 	__flush_cache_4096(start | SH_CACHE_ASSOC,
233 			   P1SEGADDR(phys), exec_offset);
234 	local_irq_restore(flags);
235 }
236 
237 /*
238  * Write back & invalidate the D-cache of the page.
239  * (To avoid "alias" issues)
240  */
241 void flush_dcache_page(struct page *page)
242 {
243 	if (test_bit(PG_mapped, &page->flags)) {
244 		unsigned long phys = PHYSADDR(page_address(page));
245 		unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
246 		int i, n;
247 
248 		/* Loop all the D-cache */
249 		n = current_cpu_data.dcache.n_aliases;
250 		for (i = 0; i < n; i++, addr += 4096)
251 			flush_cache_4096(addr, phys);
252 	}
253 
254 	wmb();
255 }
256 
257 /* TODO: Selective icache invalidation through IC address array.. */
258 static inline void flush_icache_all(void)
259 {
260 	unsigned long flags, ccr;
261 
262 	local_irq_save(flags);
263 	jump_to_P2();
264 
265 	/* Flush I-cache */
266 	ccr = ctrl_inl(CCR);
267 	ccr |= CCR_CACHE_ICI;
268 	ctrl_outl(ccr, CCR);
269 
270 	/*
271 	 * back_to_P1() will take care of the barrier for us, don't add
272 	 * another one!
273 	 */
274 
275 	back_to_P1();
276 	local_irq_restore(flags);
277 }
278 
279 void flush_dcache_all(void)
280 {
281 	(*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
282 	wmb();
283 }
284 
285 void flush_cache_all(void)
286 {
287 	flush_dcache_all();
288 	flush_icache_all();
289 }
290 
291 static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
292 			     unsigned long end)
293 {
294 	unsigned long d = 0, p = start & PAGE_MASK;
295 	unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
296 	unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
297 	unsigned long select_bit;
298 	unsigned long all_aliases_mask;
299 	unsigned long addr_offset;
300 	pgd_t *dir;
301 	pmd_t *pmd;
302 	pud_t *pud;
303 	pte_t *pte;
304 	int i;
305 
306 	dir = pgd_offset(mm, p);
307 	pud = pud_offset(dir, p);
308 	pmd = pmd_offset(pud, p);
309 	end = PAGE_ALIGN(end);
310 
311 	all_aliases_mask = (1 << n_aliases) - 1;
312 
313 	do {
314 		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
315 			p &= PMD_MASK;
316 			p += PMD_SIZE;
317 			pmd++;
318 
319 			continue;
320 		}
321 
322 		pte = pte_offset_kernel(pmd, p);
323 
324 		do {
325 			unsigned long phys;
326 			pte_t entry = *pte;
327 
328 			if (!(pte_val(entry) & _PAGE_PRESENT)) {
329 				pte++;
330 				p += PAGE_SIZE;
331 				continue;
332 			}
333 
334 			phys = pte_val(entry) & PTE_PHYS_MASK;
335 
336 			if ((p ^ phys) & alias_mask) {
337 				d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
338 				d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
339 
340 				if (d == all_aliases_mask)
341 					goto loop_exit;
342 			}
343 
344 			pte++;
345 			p += PAGE_SIZE;
346 		} while (p < end && ((unsigned long)pte & ~PAGE_MASK));
347 		pmd++;
348 	} while (p < end);
349 
350 loop_exit:
351 	addr_offset = 0;
352 	select_bit = 1;
353 
354 	for (i = 0; i < n_aliases; i++) {
355 		if (d & select_bit) {
356 			(*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
357 			wmb();
358 		}
359 
360 		select_bit <<= 1;
361 		addr_offset += PAGE_SIZE;
362 	}
363 }
364 
365 /*
366  * Note : (RPC) since the caches are physically tagged, the only point
367  * of flush_cache_mm for SH-4 is to get rid of aliases from the
368  * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
369  * lines can stay resident so long as the virtual address they were
370  * accessed with (hence cache set) is in accord with the physical
371  * address (i.e. tag).  It's no different here.  So I reckon we don't
372  * need to flush the I-cache, since aliases don't matter for that.  We
373  * should try that.
374  *
375  * Caller takes mm->mmap_sem.
376  */
377 void flush_cache_mm(struct mm_struct *mm)
378 {
379 	/*
380 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
381 	 * the cache is physically tagged, the data can just be left in there.
382 	 */
383 	if (current_cpu_data.dcache.n_aliases == 0)
384 		return;
385 
386 	/*
387 	 * Don't bother groveling around the dcache for the VMA ranges
388 	 * if there are too many PTEs to make it worthwhile.
389 	 */
390 	if (mm->nr_ptes >= MAX_DCACHE_PAGES)
391 		flush_dcache_all();
392 	else {
393 		struct vm_area_struct *vma;
394 
395 		/*
396 		 * In this case there are reasonably sized ranges to flush,
397 		 * iterate through the VMA list and take care of any aliases.
398 		 */
399 		for (vma = mm->mmap; vma; vma = vma->vm_next)
400 			__flush_cache_mm(mm, vma->vm_start, vma->vm_end);
401 	}
402 
403 	/* Only touch the icache if one of the VMAs has VM_EXEC set. */
404 	if (mm->exec_vm)
405 		flush_icache_all();
406 }
407 
408 /*
409  * Write back and invalidate I/D-caches for the page.
410  *
411  * ADDR: Virtual Address (U0 address)
412  * PFN: Physical page number
413  */
414 void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
415 		      unsigned long pfn)
416 {
417 	unsigned long phys = pfn << PAGE_SHIFT;
418 	unsigned int alias_mask;
419 
420 	alias_mask = current_cpu_data.dcache.alias_mask;
421 
422 	/* We only need to flush D-cache when we have alias */
423 	if ((address^phys) & alias_mask) {
424 		/* Loop 4K of the D-cache */
425 		flush_cache_4096(
426 			CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
427 			phys);
428 		/* Loop another 4K of the D-cache */
429 		flush_cache_4096(
430 			CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
431 			phys);
432 	}
433 
434 	alias_mask = current_cpu_data.icache.alias_mask;
435 	if (vma->vm_flags & VM_EXEC) {
436 		/*
437 		 * Evict entries from the portion of the cache from which code
438 		 * may have been executed at this address (virtual).  There's
439 		 * no need to evict from the portion corresponding to the
440 		 * physical address as for the D-cache, because we know the
441 		 * kernel has never executed the code through its identity
442 		 * translation.
443 		 */
444 		flush_cache_4096(
445 			CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
446 			phys);
447 	}
448 }
449 
450 /*
451  * Write back and invalidate D-caches.
452  *
453  * START, END: Virtual Address (U0 address)
454  *
455  * NOTE: We need to flush the _physical_ page entry.
456  * Flushing the cache lines for U0 only isn't enough.
457  * We need to flush for P1 too, which may contain aliases.
458  */
459 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
460 		       unsigned long end)
461 {
462 	/*
463 	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
464 	 * the cache is physically tagged, the data can just be left in there.
465 	 */
466 	if (current_cpu_data.dcache.n_aliases == 0)
467 		return;
468 
469 	/*
470 	 * Don't bother with the lookup and alias check if we have a
471 	 * wide range to cover, just blow away the dcache in its
472 	 * entirety instead. -- PFM.
473 	 */
474 	if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
475 		flush_dcache_all();
476 	else
477 		__flush_cache_mm(vma->vm_mm, start, end);
478 
479 	if (vma->vm_flags & VM_EXEC) {
480 		/*
481 		 * TODO: Is this required???  Need to look at how I-cache
482 		 * coherency is assured when new programs are loaded to see if
483 		 * this matters.
484 		 */
485 		flush_icache_all();
486 	}
487 }
488 
489 /*
490  * flush_icache_user_range
491  * @vma: VMA of the process
492  * @page: page
493  * @addr: U0 address
494  * @len: length of the range (< page size)
495  */
496 void flush_icache_user_range(struct vm_area_struct *vma,
497 			     struct page *page, unsigned long addr, int len)
498 {
499 	flush_cache_page(vma, addr, page_to_pfn(page));
500 	mb();
501 }
502 
503 /**
504  * __flush_cache_4096
505  *
506  * @addr:  address in memory mapped cache array
507  * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
508  *         set i.e. associative write)
509  * @exec_offset: set to 0x20000000 if flush has to be executed from P2
510  *               region else 0x0
511  *
512  * The offset into the cache array implied by 'addr' selects the
513  * 'colour' of the virtual address range that will be flushed.  The
514  * operation (purge/write-back) is selected by the lower 2 bits of
515  * 'phys'.
516  */
517 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
518 			       unsigned long exec_offset)
519 {
520 	int way_count;
521 	unsigned long base_addr = addr;
522 	struct cache_info *dcache;
523 	unsigned long way_incr;
524 	unsigned long a, ea, p;
525 	unsigned long temp_pc;
526 
527 	dcache = &current_cpu_data.dcache;
528 	/* Write this way for better assembly. */
529 	way_count = dcache->ways;
530 	way_incr = dcache->way_incr;
531 
532 	/*
533 	 * Apply exec_offset (i.e. branch to P2 if required.).
534 	 *
535 	 * FIXME:
536 	 *
537 	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
538 	 *	trashing exec_offset before it's been added on - why?  Hence
539 	 *	"=&r" as a 'workaround'
540 	 */
541 	asm volatile("mov.l 1f, %0\n\t"
542 		     "add   %1, %0\n\t"
543 		     "jmp   @%0\n\t"
544 		     "nop\n\t"
545 		     ".balign 4\n\t"
546 		     "1:  .long 2f\n\t"
547 		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
548 
549 	/*
550 	 * We know there will be >=1 iteration, so write as do-while to avoid
551 	 * pointless nead-of-loop check for 0 iterations.
552 	 */
553 	do {
554 		ea = base_addr + PAGE_SIZE;
555 		a = base_addr;
556 		p = phys;
557 
558 		do {
559 			*(volatile unsigned long *)a = p;
560 			/*
561 			 * Next line: intentionally not p+32, saves an add, p
562 			 * will do since only the cache tag bits need to
563 			 * match.
564 			 */
565 			*(volatile unsigned long *)(a+32) = p;
566 			a += 64;
567 			p += 64;
568 		} while (a < ea);
569 
570 		base_addr += way_incr;
571 	} while (--way_count != 0);
572 }
573 
574 /*
575  * Break the 1, 2 and 4 way variants of this out into separate functions to
576  * avoid nearly all the overhead of having the conditional stuff in the function
577  * bodies (+ the 1 and 2 way cases avoid saving any registers too).
578  */
579 static void __flush_dcache_segment_1way(unsigned long start,
580 					unsigned long extent_per_way)
581 {
582 	unsigned long orig_sr, sr_with_bl;
583 	unsigned long base_addr;
584 	unsigned long way_incr, linesz, way_size;
585 	struct cache_info *dcache;
586 	register unsigned long a0, a0e;
587 
588 	asm volatile("stc sr, %0" : "=r" (orig_sr));
589 	sr_with_bl = orig_sr | (1<<28);
590 	base_addr = ((unsigned long)&empty_zero_page[0]);
591 
592 	/*
593 	 * The previous code aligned base_addr to 16k, i.e. the way_size of all
594 	 * existing SH-4 D-caches.  Whilst I don't see a need to have this
595 	 * aligned to any better than the cache line size (which it will be
596 	 * anyway by construction), let's align it to at least the way_size of
597 	 * any existing or conceivable SH-4 D-cache.  -- RPC
598 	 */
599 	base_addr = ((base_addr >> 16) << 16);
600 	base_addr |= start;
601 
602 	dcache = &current_cpu_data.dcache;
603 	linesz = dcache->linesz;
604 	way_incr = dcache->way_incr;
605 	way_size = dcache->way_size;
606 
607 	a0 = base_addr;
608 	a0e = base_addr + extent_per_way;
609 	do {
610 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
611 		asm volatile("movca.l r0, @%0\n\t"
612 			     "ocbi @%0" : : "r" (a0));
613 		a0 += linesz;
614 		asm volatile("movca.l r0, @%0\n\t"
615 			     "ocbi @%0" : : "r" (a0));
616 		a0 += linesz;
617 		asm volatile("movca.l r0, @%0\n\t"
618 			     "ocbi @%0" : : "r" (a0));
619 		a0 += linesz;
620 		asm volatile("movca.l r0, @%0\n\t"
621 			     "ocbi @%0" : : "r" (a0));
622 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
623 		a0 += linesz;
624 	} while (a0 < a0e);
625 }
626 
627 static void __flush_dcache_segment_2way(unsigned long start,
628 					unsigned long extent_per_way)
629 {
630 	unsigned long orig_sr, sr_with_bl;
631 	unsigned long base_addr;
632 	unsigned long way_incr, linesz, way_size;
633 	struct cache_info *dcache;
634 	register unsigned long a0, a1, a0e;
635 
636 	asm volatile("stc sr, %0" : "=r" (orig_sr));
637 	sr_with_bl = orig_sr | (1<<28);
638 	base_addr = ((unsigned long)&empty_zero_page[0]);
639 
640 	/* See comment under 1-way above */
641 	base_addr = ((base_addr >> 16) << 16);
642 	base_addr |= start;
643 
644 	dcache = &current_cpu_data.dcache;
645 	linesz = dcache->linesz;
646 	way_incr = dcache->way_incr;
647 	way_size = dcache->way_size;
648 
649 	a0 = base_addr;
650 	a1 = a0 + way_incr;
651 	a0e = base_addr + extent_per_way;
652 	do {
653 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
654 		asm volatile("movca.l r0, @%0\n\t"
655 			     "movca.l r0, @%1\n\t"
656 			     "ocbi @%0\n\t"
657 			     "ocbi @%1" : :
658 			     "r" (a0), "r" (a1));
659 		a0 += linesz;
660 		a1 += linesz;
661 		asm volatile("movca.l r0, @%0\n\t"
662 			     "movca.l r0, @%1\n\t"
663 			     "ocbi @%0\n\t"
664 			     "ocbi @%1" : :
665 			     "r" (a0), "r" (a1));
666 		a0 += linesz;
667 		a1 += linesz;
668 		asm volatile("movca.l r0, @%0\n\t"
669 			     "movca.l r0, @%1\n\t"
670 			     "ocbi @%0\n\t"
671 			     "ocbi @%1" : :
672 			     "r" (a0), "r" (a1));
673 		a0 += linesz;
674 		a1 += linesz;
675 		asm volatile("movca.l r0, @%0\n\t"
676 			     "movca.l r0, @%1\n\t"
677 			     "ocbi @%0\n\t"
678 			     "ocbi @%1" : :
679 			     "r" (a0), "r" (a1));
680 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
681 		a0 += linesz;
682 		a1 += linesz;
683 	} while (a0 < a0e);
684 }
685 
686 static void __flush_dcache_segment_4way(unsigned long start,
687 					unsigned long extent_per_way)
688 {
689 	unsigned long orig_sr, sr_with_bl;
690 	unsigned long base_addr;
691 	unsigned long way_incr, linesz, way_size;
692 	struct cache_info *dcache;
693 	register unsigned long a0, a1, a2, a3, a0e;
694 
695 	asm volatile("stc sr, %0" : "=r" (orig_sr));
696 	sr_with_bl = orig_sr | (1<<28);
697 	base_addr = ((unsigned long)&empty_zero_page[0]);
698 
699 	/* See comment under 1-way above */
700 	base_addr = ((base_addr >> 16) << 16);
701 	base_addr |= start;
702 
703 	dcache = &current_cpu_data.dcache;
704 	linesz = dcache->linesz;
705 	way_incr = dcache->way_incr;
706 	way_size = dcache->way_size;
707 
708 	a0 = base_addr;
709 	a1 = a0 + way_incr;
710 	a2 = a1 + way_incr;
711 	a3 = a2 + way_incr;
712 	a0e = base_addr + extent_per_way;
713 	do {
714 		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
715 		asm volatile("movca.l r0, @%0\n\t"
716 			     "movca.l r0, @%1\n\t"
717 			     "movca.l r0, @%2\n\t"
718 			     "movca.l r0, @%3\n\t"
719 			     "ocbi @%0\n\t"
720 			     "ocbi @%1\n\t"
721 			     "ocbi @%2\n\t"
722 			     "ocbi @%3\n\t" : :
723 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
724 		a0 += linesz;
725 		a1 += linesz;
726 		a2 += linesz;
727 		a3 += linesz;
728 		asm volatile("movca.l r0, @%0\n\t"
729 			     "movca.l r0, @%1\n\t"
730 			     "movca.l r0, @%2\n\t"
731 			     "movca.l r0, @%3\n\t"
732 			     "ocbi @%0\n\t"
733 			     "ocbi @%1\n\t"
734 			     "ocbi @%2\n\t"
735 			     "ocbi @%3\n\t" : :
736 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
737 		a0 += linesz;
738 		a1 += linesz;
739 		a2 += linesz;
740 		a3 += linesz;
741 		asm volatile("movca.l r0, @%0\n\t"
742 			     "movca.l r0, @%1\n\t"
743 			     "movca.l r0, @%2\n\t"
744 			     "movca.l r0, @%3\n\t"
745 			     "ocbi @%0\n\t"
746 			     "ocbi @%1\n\t"
747 			     "ocbi @%2\n\t"
748 			     "ocbi @%3\n\t" : :
749 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
750 		a0 += linesz;
751 		a1 += linesz;
752 		a2 += linesz;
753 		a3 += linesz;
754 		asm volatile("movca.l r0, @%0\n\t"
755 			     "movca.l r0, @%1\n\t"
756 			     "movca.l r0, @%2\n\t"
757 			     "movca.l r0, @%3\n\t"
758 			     "ocbi @%0\n\t"
759 			     "ocbi @%1\n\t"
760 			     "ocbi @%2\n\t"
761 			     "ocbi @%3\n\t" : :
762 			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
763 		asm volatile("ldc %0, sr" : : "r" (orig_sr));
764 		a0 += linesz;
765 		a1 += linesz;
766 		a2 += linesz;
767 		a3 += linesz;
768 	} while (a0 < a0e);
769 }
770