1 #ifndef __ASM_SH_CACHE_H 2 #define __ASM_SH_CACHE_H 3 4 #if defined(CONFIG_SH4) || defined(CONFIG_SH4A) 5 6 int cache_control(unsigned int cmd); 7 8 #define L1_CACHE_BYTES 32 9 10 struct __large_struct { unsigned long buf[100]; }; 11 #define __m(x) (*(struct __large_struct *)(x)) 12 13 void dcache_wback_range(u32 start, u32 end) 14 { 15 u32 v; 16 17 start &= ~(L1_CACHE_BYTES - 1); 18 for (v = start; v < end; v += L1_CACHE_BYTES) { 19 asm volatile ("ocbwb %0" : /* no output */ 20 : "m" (__m(v))); 21 } 22 } 23 24 void dcache_invalid_range(u32 start, u32 end) 25 { 26 u32 v; 27 28 start &= ~(L1_CACHE_BYTES - 1); 29 for (v = start; v < end; v += L1_CACHE_BYTES) { 30 asm volatile ("ocbi %0" : /* no output */ 31 : "m" (__m(v))); 32 } 33 } 34 #else 35 36 /* 37 * 32-bytes is the largest L1 data cache line size for SH the architecture. So 38 * it is a safe default for DMA alignment. 39 */ 40 #define ARCH_DMA_MINALIGN 32 41 42 #endif /* CONFIG_SH4 || CONFIG_SH4A */ 43 44 /* 45 * Use the L1 data cache line size value for the minimum DMA buffer alignment 46 * on SH. 47 */ 48 #ifndef ARCH_DMA_MINALIGN 49 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 50 #endif 51 52 #endif /* __ASM_SH_CACHE_H */ 53