1*b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CACHE_H 3b8b572e1SStephen Rothwell #define _ASM_POWERPC_CACHE_H 4b8b572e1SStephen Rothwell 5b8b572e1SStephen Rothwell #ifdef __KERNEL__ 6b8b572e1SStephen Rothwell 7b8b572e1SStephen Rothwell 8b8b572e1SStephen Rothwell /* bytes per L1 cache line */ 9968159c0SChristophe Leroy #if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) 10b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 4 11b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 1 12b8b572e1SStephen Rothwell #elif defined(CONFIG_PPC_E500MC) 13b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 6 14b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 4 15b8b572e1SStephen Rothwell #elif defined(CONFIG_PPC32) 16b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 4 17e7f75ad0SDave Kleikamp #if defined(CONFIG_PPC_47x) 18e7f75ad0SDave Kleikamp #define L1_CACHE_SHIFT 7 19e7f75ad0SDave Kleikamp #else 20e7f75ad0SDave Kleikamp #define L1_CACHE_SHIFT 5 21e7f75ad0SDave Kleikamp #endif 22b8b572e1SStephen Rothwell #else /* CONFIG_PPC64 */ 23b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 7 24f4329f2eSNicholas Piggin #define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */ 25b8b572e1SStephen Rothwell #endif 26b8b572e1SStephen Rothwell 27b8b572e1SStephen Rothwell #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 28b8b572e1SStephen Rothwell 29b8b572e1SStephen Rothwell #define SMP_CACHE_BYTES L1_CACHE_BYTES 30b8b572e1SStephen Rothwell 31f4329f2eSNicholas Piggin #define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT) 32f4329f2eSNicholas Piggin 33b8b572e1SStephen Rothwell #if defined(__powerpc64__) && !defined(__ASSEMBLY__) 34e2827fe5SBenjamin Herrenschmidt 35e2827fe5SBenjamin Herrenschmidt struct ppc_cache_info { 36e2827fe5SBenjamin Herrenschmidt u32 size; 37e2827fe5SBenjamin Herrenschmidt u32 line_size; 38e2827fe5SBenjamin Herrenschmidt u32 block_size; /* L1 only */ 39e2827fe5SBenjamin Herrenschmidt u32 log_block_size; 40e2827fe5SBenjamin Herrenschmidt u32 blocks_per_page; 41e2827fe5SBenjamin Herrenschmidt u32 sets; 4298a5f361SBenjamin Herrenschmidt u32 assoc; 43e2827fe5SBenjamin Herrenschmidt }; 44e2827fe5SBenjamin Herrenschmidt 45b8b572e1SStephen Rothwell struct ppc64_caches { 46e2827fe5SBenjamin Herrenschmidt struct ppc_cache_info l1d; 47e2827fe5SBenjamin Herrenschmidt struct ppc_cache_info l1i; 4865e01f38SBenjamin Herrenschmidt struct ppc_cache_info l2; 4965e01f38SBenjamin Herrenschmidt struct ppc_cache_info l3; 50b8b572e1SStephen Rothwell }; 51b8b572e1SStephen Rothwell 52b8b572e1SStephen Rothwell extern struct ppc64_caches ppc64_caches; 53b8b572e1SStephen Rothwell #endif /* __powerpc64__ && ! __ASSEMBLY__ */ 54b8b572e1SStephen Rothwell 550ce63670SKevin Hao #if defined(__ASSEMBLY__) 560ce63670SKevin Hao /* 570ce63670SKevin Hao * For a snooping icache, we still need a dummy icbi to purge all the 580ce63670SKevin Hao * prefetched instructions from the ifetch buffers. We also need a sync 590ce63670SKevin Hao * before the icbi to order the the actual stores to memory that might 600ce63670SKevin Hao * have modified instructions with the icbi. 610ce63670SKevin Hao */ 620ce63670SKevin Hao #define PURGE_PREFETCHED_INS \ 630ce63670SKevin Hao sync; \ 640ce63670SKevin Hao icbi 0,r3; \ 650ce63670SKevin Hao sync; \ 660ce63670SKevin Hao isync 67ae3a197eSDavid Howells 680ce63670SKevin Hao #else 6954cb27a7SDenys Vlasenko #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 70ae3a197eSDavid Howells 71ae3a197eSDavid Howells #ifdef CONFIG_6xx 72ae3a197eSDavid Howells extern long _get_L2CR(void); 73ae3a197eSDavid Howells extern long _get_L3CR(void); 74ae3a197eSDavid Howells extern void _set_L2CR(unsigned long); 75ae3a197eSDavid Howells extern void _set_L3CR(unsigned long); 76ae3a197eSDavid Howells #else 77ae3a197eSDavid Howells #define _get_L2CR() 0L 78ae3a197eSDavid Howells #define _get_L3CR() 0L 79ae3a197eSDavid Howells #define _set_L2CR(val) do { } while(0) 80ae3a197eSDavid Howells #define _set_L3CR(val) do { } while(0) 81b8b572e1SStephen Rothwell #endif 82b8b572e1SStephen Rothwell 83d6bfa02fSChristophe Leroy static inline void dcbz(void *addr) 84d6bfa02fSChristophe Leroy { 85d6bfa02fSChristophe Leroy __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory"); 86d6bfa02fSChristophe Leroy } 87d6bfa02fSChristophe Leroy 88d6bfa02fSChristophe Leroy static inline void dcbi(void *addr) 89d6bfa02fSChristophe Leroy { 90d6bfa02fSChristophe Leroy __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory"); 91d6bfa02fSChristophe Leroy } 92d6bfa02fSChristophe Leroy 93d6bfa02fSChristophe Leroy static inline void dcbf(void *addr) 94d6bfa02fSChristophe Leroy { 95d6bfa02fSChristophe Leroy __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory"); 96d6bfa02fSChristophe Leroy } 97d6bfa02fSChristophe Leroy 98d6bfa02fSChristophe Leroy static inline void dcbst(void *addr) 99d6bfa02fSChristophe Leroy { 100d6bfa02fSChristophe Leroy __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory"); 101d6bfa02fSChristophe Leroy } 102ae3a197eSDavid Howells #endif /* !__ASSEMBLY__ */ 103b8b572e1SStephen Rothwell #endif /* __KERNEL__ */ 104b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CACHE_H */ 105