xref: /openbmc/u-boot/arch/sh/cpu/sh4/cache.c (revision d59c33a1)
1  /*
2   * (C) Copyright 2007
3   * Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
4   *
5   * See file CREDITS for list of people who contributed to this
6   * project.
7   *
8   * This program is free software; you can redistribute it and/or
9   * modify it under the terms of the GNU General Public License as
10   * published by the Free Software Foundation; either version 2 of
11   * the License, or (at your option) any later version.
12   *
13   * This program is distributed in the hope that it will be useful,
14   * but WITHOUT ANY WARRANTY; without even the implied warranty of
15   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16   * GNU General Public License for more details.
17   *
18   * You should have received a copy of the GNU General Public License
19   * along with this program; if not, write to the Free Software
20   * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21   * MA 02111-1307 USA
22   */
23  
24  #include <common.h>
25  #include <command.h>
26  #include <asm/processor.h>
27  #include <asm/io.h>
28  
29  /*
30   * Jump to P2 area.
31   * When handling TLB or caches, we need to do it from P2 area.
32   */
33  #define jump_to_P2()			\
34    do {					\
35      unsigned long __dummy;		\
36      __asm__ __volatile__(		\
37  		"mov.l	1f, %0\n\t"	\
38  		"or	%1, %0\n\t"	\
39  		"jmp	@%0\n\t"	\
40  		" nop\n\t"		\
41  		".balign 4\n"		\
42  		"1:	.long 2f\n"	\
43  		"2:"			\
44  		: "=&r" (__dummy)	\
45  		: "r" (0x20000000));	\
46    } while (0)
47  
48  /*
49   * Back to P1 area.
50   */
51  #define back_to_P1()					\
52    do {							\
53      unsigned long __dummy;				\
54      __asm__ __volatile__(				\
55  		"nop;nop;nop;nop;nop;nop;nop\n\t"	\
56  		"mov.l	1f, %0\n\t"			\
57  		"jmp	@%0\n\t"			\
58  		" nop\n\t"				\
59  		".balign 4\n"				\
60  		"1:	.long 2f\n"			\
61  		"2:"					\
62  		: "=&r" (__dummy));			\
63    } while (0)
64  
65  #define CACHE_VALID       1
66  #define CACHE_UPDATED     2
67  
68  static inline void cache_wback_all(void)
69  {
70  	unsigned long addr, data, i, j;
71  
72  	jump_to_P2();
73  	for (i = 0; i < CACHE_OC_NUM_ENTRIES; i++){
74  		for (j = 0; j < CACHE_OC_NUM_WAYS; j++) {
75  			addr = CACHE_OC_ADDRESS_ARRAY | (j << CACHE_OC_WAY_SHIFT)
76  				| (i << CACHE_OC_ENTRY_SHIFT);
77  			data = inl(addr);
78  			if (data & CACHE_UPDATED) {
79  				data &= ~CACHE_UPDATED;
80  				outl(data, addr);
81  			}
82  		}
83  	}
84  	back_to_P1();
85  }
86  
87  
88  #define CACHE_ENABLE      0
89  #define CACHE_DISABLE     1
90  
91  int cache_control(unsigned int cmd)
92  {
93  	unsigned long ccr;
94  
95  	jump_to_P2();
96  	ccr = inl(CCR);
97  
98  	if (ccr & CCR_CACHE_ENABLE)
99  		cache_wback_all();
100  
101  	if (cmd == CACHE_DISABLE)
102  		outl(CCR_CACHE_STOP, CCR);
103  	else
104  		outl(CCR_CACHE_INIT, CCR);
105  	back_to_P1();
106  
107  	return 0;
108  }
109  
110  void dcache_wback_range(u32 start, u32 end)
111  {
112  	u32 v;
113  
114  	start &= ~(L1_CACHE_BYTES - 1);
115  	for (v = start; v < end; v += L1_CACHE_BYTES) {
116  		asm volatile ("ocbwb     %0" :	/* no output */
117  			      : "m" (__m(v)));
118  	}
119  }
120  
121  void dcache_invalid_range(u32 start, u32 end)
122  {
123  	u32 v;
124  
125  	start &= ~(L1_CACHE_BYTES - 1);
126  	for (v = start; v < end; v += L1_CACHE_BYTES) {
127  		asm volatile ("ocbi     %0" :	/* no output */
128  			      : "m" (__m(v)));
129  	}
130  }
131