xref: /openbmc/linux/arch/powerpc/include/asm/cache.h (revision f4329f2ecb149282fdfdd8830a936a56b1497a05)
1b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CACHE_H
2b8b572e1SStephen Rothwell #define _ASM_POWERPC_CACHE_H
3b8b572e1SStephen Rothwell 
4b8b572e1SStephen Rothwell #ifdef __KERNEL__
5b8b572e1SStephen Rothwell 
6b8b572e1SStephen Rothwell 
7b8b572e1SStephen Rothwell /* bytes per L1 cache line */
8b8b572e1SStephen Rothwell #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
9b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT		4
10b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH	1
11b8b572e1SStephen Rothwell #elif defined(CONFIG_PPC_E500MC)
12b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT		6
13b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH	4
14b8b572e1SStephen Rothwell #elif defined(CONFIG_PPC32)
15b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH	4
16e7f75ad0SDave Kleikamp #if defined(CONFIG_PPC_47x)
17e7f75ad0SDave Kleikamp #define L1_CACHE_SHIFT		7
18e7f75ad0SDave Kleikamp #else
19e7f75ad0SDave Kleikamp #define L1_CACHE_SHIFT		5
20e7f75ad0SDave Kleikamp #endif
21b8b572e1SStephen Rothwell #else /* CONFIG_PPC64 */
22b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT		7
23*f4329f2eSNicholas Piggin #define IFETCH_ALIGN_SHIFT	4 /* POWER8,9 */
24b8b572e1SStephen Rothwell #endif
25b8b572e1SStephen Rothwell 
26b8b572e1SStephen Rothwell #define	L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
27b8b572e1SStephen Rothwell 
28b8b572e1SStephen Rothwell #define	SMP_CACHE_BYTES		L1_CACHE_BYTES
29b8b572e1SStephen Rothwell 
30*f4329f2eSNicholas Piggin #define IFETCH_ALIGN_BYTES	(1 << IFETCH_ALIGN_SHIFT)
31*f4329f2eSNicholas Piggin 
32b8b572e1SStephen Rothwell #if defined(__powerpc64__) && !defined(__ASSEMBLY__)
33b8b572e1SStephen Rothwell struct ppc64_caches {
34b8b572e1SStephen Rothwell 	u32	dsize;			/* L1 d-cache size */
35b8b572e1SStephen Rothwell 	u32	dline_size;		/* L1 d-cache line size	*/
36b8b572e1SStephen Rothwell 	u32	log_dline_size;
37b8b572e1SStephen Rothwell 	u32	dlines_per_page;
38b8b572e1SStephen Rothwell 	u32	isize;			/* L1 i-cache size */
39b8b572e1SStephen Rothwell 	u32	iline_size;		/* L1 i-cache line size	*/
40b8b572e1SStephen Rothwell 	u32	log_iline_size;
41b8b572e1SStephen Rothwell 	u32	ilines_per_page;
42b8b572e1SStephen Rothwell };
43b8b572e1SStephen Rothwell 
44b8b572e1SStephen Rothwell extern struct ppc64_caches ppc64_caches;
45b8b572e1SStephen Rothwell #endif /* __powerpc64__ && ! __ASSEMBLY__ */
46b8b572e1SStephen Rothwell 
470ce63670SKevin Hao #if defined(__ASSEMBLY__)
480ce63670SKevin Hao /*
490ce63670SKevin Hao  * For a snooping icache, we still need a dummy icbi to purge all the
500ce63670SKevin Hao  * prefetched instructions from the ifetch buffers. We also need a sync
510ce63670SKevin Hao  * before the icbi to order the the actual stores to memory that might
520ce63670SKevin Hao  * have modified instructions with the icbi.
530ce63670SKevin Hao  */
540ce63670SKevin Hao #define PURGE_PREFETCHED_INS	\
550ce63670SKevin Hao 	sync;			\
560ce63670SKevin Hao 	icbi	0,r3;		\
570ce63670SKevin Hao 	sync;			\
580ce63670SKevin Hao 	isync
59ae3a197eSDavid Howells 
600ce63670SKevin Hao #else
6154cb27a7SDenys Vlasenko #define __read_mostly __attribute__((__section__(".data..read_mostly")))
62ae3a197eSDavid Howells 
63ae3a197eSDavid Howells #ifdef CONFIG_6xx
64ae3a197eSDavid Howells extern long _get_L2CR(void);
65ae3a197eSDavid Howells extern long _get_L3CR(void);
66ae3a197eSDavid Howells extern void _set_L2CR(unsigned long);
67ae3a197eSDavid Howells extern void _set_L3CR(unsigned long);
68ae3a197eSDavid Howells #else
69ae3a197eSDavid Howells #define _get_L2CR()	0L
70ae3a197eSDavid Howells #define _get_L3CR()	0L
71ae3a197eSDavid Howells #define _set_L2CR(val)	do { } while(0)
72ae3a197eSDavid Howells #define _set_L3CR(val)	do { } while(0)
73b8b572e1SStephen Rothwell #endif
74b8b572e1SStephen Rothwell 
75d6bfa02fSChristophe Leroy static inline void dcbz(void *addr)
76d6bfa02fSChristophe Leroy {
77d6bfa02fSChristophe Leroy 	__asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
78d6bfa02fSChristophe Leroy }
79d6bfa02fSChristophe Leroy 
80d6bfa02fSChristophe Leroy static inline void dcbi(void *addr)
81d6bfa02fSChristophe Leroy {
82d6bfa02fSChristophe Leroy 	__asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
83d6bfa02fSChristophe Leroy }
84d6bfa02fSChristophe Leroy 
85d6bfa02fSChristophe Leroy static inline void dcbf(void *addr)
86d6bfa02fSChristophe Leroy {
87d6bfa02fSChristophe Leroy 	__asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
88d6bfa02fSChristophe Leroy }
89d6bfa02fSChristophe Leroy 
90d6bfa02fSChristophe Leroy static inline void dcbst(void *addr)
91d6bfa02fSChristophe Leroy {
92d6bfa02fSChristophe Leroy 	__asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
93d6bfa02fSChristophe Leroy }
94ae3a197eSDavid Howells #endif /* !__ASSEMBLY__ */
95b8b572e1SStephen Rothwell #endif /* __KERNEL__ */
96b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CACHE_H */
97