1*b8b572e1SStephen Rothwell #ifndef _ASM_POWERPC_CACHE_H 2*b8b572e1SStephen Rothwell #define _ASM_POWERPC_CACHE_H 3*b8b572e1SStephen Rothwell 4*b8b572e1SStephen Rothwell #ifdef __KERNEL__ 5*b8b572e1SStephen Rothwell 6*b8b572e1SStephen Rothwell 7*b8b572e1SStephen Rothwell /* bytes per L1 cache line */ 8*b8b572e1SStephen Rothwell #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) 9*b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 4 10*b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 1 11*b8b572e1SStephen Rothwell #elif defined(CONFIG_PPC_E500MC) 12*b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 6 13*b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 4 14*b8b572e1SStephen Rothwell #elif defined(CONFIG_PPC32) 15*b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 5 16*b8b572e1SStephen Rothwell #define MAX_COPY_PREFETCH 4 17*b8b572e1SStephen Rothwell #else /* CONFIG_PPC64 */ 18*b8b572e1SStephen Rothwell #define L1_CACHE_SHIFT 7 19*b8b572e1SStephen Rothwell #endif 20*b8b572e1SStephen Rothwell 21*b8b572e1SStephen Rothwell #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 22*b8b572e1SStephen Rothwell 23*b8b572e1SStephen Rothwell #define SMP_CACHE_BYTES L1_CACHE_BYTES 24*b8b572e1SStephen Rothwell 25*b8b572e1SStephen Rothwell #if defined(__powerpc64__) && !defined(__ASSEMBLY__) 26*b8b572e1SStephen Rothwell struct ppc64_caches { 27*b8b572e1SStephen Rothwell u32 dsize; /* L1 d-cache size */ 28*b8b572e1SStephen Rothwell u32 dline_size; /* L1 d-cache line size */ 29*b8b572e1SStephen Rothwell u32 log_dline_size; 30*b8b572e1SStephen Rothwell u32 dlines_per_page; 31*b8b572e1SStephen Rothwell u32 isize; /* L1 i-cache size */ 32*b8b572e1SStephen Rothwell u32 iline_size; /* L1 i-cache line size */ 33*b8b572e1SStephen Rothwell u32 log_iline_size; 34*b8b572e1SStephen Rothwell u32 ilines_per_page; 35*b8b572e1SStephen Rothwell }; 36*b8b572e1SStephen Rothwell 37*b8b572e1SStephen Rothwell extern struct ppc64_caches ppc64_caches; 38*b8b572e1SStephen Rothwell #endif /* __powerpc64__ && ! __ASSEMBLY__ */ 39*b8b572e1SStephen Rothwell 40*b8b572e1SStephen Rothwell #if !defined(__ASSEMBLY__) 41*b8b572e1SStephen Rothwell #define __read_mostly __attribute__((__section__(".data.read_mostly"))) 42*b8b572e1SStephen Rothwell #endif 43*b8b572e1SStephen Rothwell 44*b8b572e1SStephen Rothwell #endif /* __KERNEL__ */ 45*b8b572e1SStephen Rothwell #endif /* _ASM_POWERPC_CACHE_H */ 46