1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CACHE_H 3b8b572e1SStephen Rothwell #define _ASM_POWERPC_CACHE_H 4b8b572e1SStephen Rothwell 5b8b572e1SStephen Rothwell #ifdef __KERNEL__ 6b8b572e1SStephen Rothwell 7b8b572e1SStephen Rothwell 8b8b572e1SStephen Rothwell /* bytes per L1 cache line */ 9968159c0SChristophe Leroy #if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) 10b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 4 11b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 1 121128bb78SChristophe Leroy #define IFETCH_ALIGN_SHIFT 2 13b8b572e1SStephen Rothwell #elif defined(CONFIG_PPC_E500MC) 14b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 6 15b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 4 161128bb78SChristophe Leroy #define IFETCH_ALIGN_SHIFT 3 17b8b572e1SStephen Rothwell #elif defined(CONFIG_PPC32) 18b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 4 191128bb78SChristophe Leroy #define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */ 20e7f75ad0SDave Kleikamp #if defined(CONFIG_PPC_47x) 21e7f75ad0SDave Kleikamp #define L1_CACHE_SHIFT 7 22e7f75ad0SDave Kleikamp #else 23e7f75ad0SDave Kleikamp #define L1_CACHE_SHIFT 5 24e7f75ad0SDave Kleikamp #endif 25b8b572e1SStephen Rothwell #else /* CONFIG_PPC64 */ 26b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 7 27f4329f2eSNicholas Piggin #define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */ 28b8b572e1SStephen Rothwell #endif 29b8b572e1SStephen Rothwell 30b8b572e1SStephen Rothwell #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 31b8b572e1SStephen Rothwell 32b8b572e1SStephen Rothwell #define SMP_CACHE_BYTES L1_CACHE_BYTES 33b8b572e1SStephen Rothwell 34f4329f2eSNicholas Piggin #define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT) 35f4329f2eSNicholas Piggin 36d98fc70fSChristophe Leroy #if !defined(__ASSEMBLY__) 37d98fc70fSChristophe Leroy #ifdef CONFIG_PPC64 38e2827fe5SBenjamin Herrenschmidt 39e2827fe5SBenjamin Herrenschmidt struct ppc_cache_info { 40e2827fe5SBenjamin Herrenschmidt u32 size; 41e2827fe5SBenjamin Herrenschmidt u32 line_size; 42e2827fe5SBenjamin Herrenschmidt u32 block_size; /* L1 only */ 43e2827fe5SBenjamin Herrenschmidt u32 log_block_size; 44e2827fe5SBenjamin Herrenschmidt u32 blocks_per_page; 45e2827fe5SBenjamin Herrenschmidt u32 sets; 4698a5f361SBenjamin Herrenschmidt u32 assoc; 47e2827fe5SBenjamin Herrenschmidt }; 48e2827fe5SBenjamin Herrenschmidt 49b8b572e1SStephen Rothwell struct ppc64_caches { 50e2827fe5SBenjamin Herrenschmidt struct ppc_cache_info l1d; 51e2827fe5SBenjamin Herrenschmidt struct ppc_cache_info l1i; 5265e01f38SBenjamin Herrenschmidt struct ppc_cache_info l2; 5365e01f38SBenjamin Herrenschmidt struct ppc_cache_info l3; 54b8b572e1SStephen Rothwell }; 55b8b572e1SStephen Rothwell 56b8b572e1SStephen Rothwell extern struct ppc64_caches ppc64_caches; 5722e9c88dSChristophe Leroy 58*7a0745c5SAlastair D'Silva static inline u32 l1_dcache_shift(void) 5922e9c88dSChristophe Leroy { 6022e9c88dSChristophe Leroy return ppc64_caches.l1d.log_block_size; 6122e9c88dSChristophe Leroy } 6222e9c88dSChristophe Leroy 63*7a0745c5SAlastair D'Silva static inline u32 l1_dcache_bytes(void) 6422e9c88dSChristophe Leroy { 6522e9c88dSChristophe Leroy return ppc64_caches.l1d.block_size; 6622e9c88dSChristophe Leroy } 67*7a0745c5SAlastair D'Silva 68*7a0745c5SAlastair D'Silva static inline u32 l1_icache_shift(void) 69*7a0745c5SAlastair D'Silva { 70*7a0745c5SAlastair D'Silva return ppc64_caches.l1i.log_block_size; 71*7a0745c5SAlastair D'Silva } 72*7a0745c5SAlastair D'Silva 73*7a0745c5SAlastair D'Silva static inline u32 l1_icache_bytes(void) 74*7a0745c5SAlastair D'Silva { 75*7a0745c5SAlastair D'Silva return ppc64_caches.l1i.block_size; 76*7a0745c5SAlastair D'Silva } 77d98fc70fSChristophe Leroy #else 78*7a0745c5SAlastair D'Silva static inline u32 l1_dcache_shift(void) 79d98fc70fSChristophe Leroy { 80d98fc70fSChristophe Leroy return L1_CACHE_SHIFT; 81d98fc70fSChristophe Leroy } 82d98fc70fSChristophe Leroy 83*7a0745c5SAlastair D'Silva static inline u32 l1_dcache_bytes(void) 84d98fc70fSChristophe Leroy { 85d98fc70fSChristophe Leroy return L1_CACHE_BYTES; 86d98fc70fSChristophe Leroy } 87*7a0745c5SAlastair D'Silva 88*7a0745c5SAlastair D'Silva static inline u32 l1_icache_shift(void) 89*7a0745c5SAlastair D'Silva { 90*7a0745c5SAlastair D'Silva return L1_CACHE_SHIFT; 91*7a0745c5SAlastair D'Silva } 92*7a0745c5SAlastair D'Silva 93*7a0745c5SAlastair D'Silva static inline u32 l1_icache_bytes(void) 94*7a0745c5SAlastair D'Silva { 95*7a0745c5SAlastair D'Silva return L1_CACHE_BYTES; 96*7a0745c5SAlastair D'Silva } 97*7a0745c5SAlastair D'Silva 98d98fc70fSChristophe Leroy #endif 99d98fc70fSChristophe Leroy #endif /* ! __ASSEMBLY__ */ 100b8b572e1SStephen Rothwell 1010ce63670SKevin Hao #if defined(__ASSEMBLY__) 1020ce63670SKevin Hao /* 1030ce63670SKevin Hao * For a snooping icache, we still need a dummy icbi to purge all the 1040ce63670SKevin Hao * prefetched instructions from the ifetch buffers. We also need a sync 1050ce63670SKevin Hao * before the icbi to order the the actual stores to memory that might 1060ce63670SKevin Hao * have modified instructions with the icbi. 1070ce63670SKevin Hao */ 1080ce63670SKevin Hao #define PURGE_PREFETCHED_INS \ 1090ce63670SKevin Hao sync; \ 1100ce63670SKevin Hao icbi 0,r3; \ 1110ce63670SKevin Hao sync; \ 1120ce63670SKevin Hao isync 113ae3a197eSDavid Howells 1140ce63670SKevin Hao #else 11554cb27a7SDenys Vlasenko #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 116ae3a197eSDavid Howells 117d7cceda9SChristophe Leroy #ifdef CONFIG_PPC_BOOK3S_32 118ae3a197eSDavid Howells extern long _get_L2CR(void); 119ae3a197eSDavid Howells extern long _get_L3CR(void); 120ae3a197eSDavid Howells extern void _set_L2CR(unsigned long); 121ae3a197eSDavid Howells extern void _set_L3CR(unsigned long); 122ae3a197eSDavid Howells #else 123ae3a197eSDavid Howells #define _get_L2CR() 0L 124ae3a197eSDavid Howells #define _get_L3CR() 0L 125ae3a197eSDavid Howells #define _set_L2CR(val) do { } while(0) 126ae3a197eSDavid Howells #define _set_L3CR(val) do { } while(0) 127b8b572e1SStephen Rothwell #endif 128b8b572e1SStephen Rothwell 129d6bfa02fSChristophe Leroy static inline void dcbz(void *addr) 130d6bfa02fSChristophe Leroy { 131ed4289e8SMichael Ellerman __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory"); 132d6bfa02fSChristophe Leroy } 133d6bfa02fSChristophe Leroy 134d6bfa02fSChristophe Leroy static inline void dcbi(void *addr) 135d6bfa02fSChristophe Leroy { 136ed4289e8SMichael Ellerman __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory"); 137d6bfa02fSChristophe Leroy } 138d6bfa02fSChristophe Leroy 139d6bfa02fSChristophe Leroy static inline void dcbf(void *addr) 140d6bfa02fSChristophe Leroy { 141ed4289e8SMichael Ellerman __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory"); 142d6bfa02fSChristophe Leroy } 143d6bfa02fSChristophe Leroy 144d6bfa02fSChristophe Leroy static inline void dcbst(void *addr) 145d6bfa02fSChristophe Leroy { 146ed4289e8SMichael Ellerman __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory"); 147d6bfa02fSChristophe Leroy } 148ae3a197eSDavid Howells #endif /* !__ASSEMBLY__ */ 149b8b572e1SStephen Rothwell #endif /* __KERNEL__ */ 150b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CACHE_H */ 151