1 #ifndef _ASM_X86_CACHEFLUSH_H 2 #define _ASM_X86_CACHEFLUSH_H 3 4 /* Keep includes the same across arches. */ 5 #include <linux/mm.h> 6 7 /* Caches aren't brain-dead on the intel. */ 8 static inline void flush_cache_all(void) { } 9 static inline void flush_cache_mm(struct mm_struct *mm) { } 10 static inline void flush_cache_dup_mm(struct mm_struct *mm) { } 11 static inline void flush_cache_range(struct vm_area_struct *vma, 12 unsigned long start, unsigned long end) { } 13 static inline void flush_cache_page(struct vm_area_struct *vma, 14 unsigned long vmaddr, unsigned long pfn) { } 15 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 16 static inline void flush_dcache_page(struct page *page) { } 17 static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } 18 static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } 19 static inline void flush_icache_range(unsigned long start, 20 unsigned long end) { } 21 static inline void flush_icache_page(struct vm_area_struct *vma, 22 struct page *page) { } 23 static inline void flush_icache_user_range(struct vm_area_struct *vma, 24 struct page *page, 25 unsigned long addr, 26 unsigned long len) { } 27 static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } 28 static inline void flush_cache_vunmap(unsigned long start, 29 unsigned long end) { } 30 31 static inline void copy_to_user_page(struct vm_area_struct *vma, 32 struct page *page, unsigned long vaddr, 33 void *dst, const void *src, 34 unsigned long len) 35 { 36 memcpy(dst, src, len); 37 } 38 39 static inline void copy_from_user_page(struct vm_area_struct *vma, 40 struct page *page, unsigned long vaddr, 41 void *dst, const void *src, 42 unsigned long len) 43 { 44 memcpy(dst, src, len); 45 } 46 47 #ifdef CONFIG_X86_PAT 48 /* 49 * X86 PAT uses page flags WC and Uncached together to keep track of 50 * memory type of pages that have backing page struct. X86 PAT supports 3 51 * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and 52 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not 53 * been changed from its default (value of -1 used to denote this). 54 * Note we do not support _PAGE_CACHE_UC here. 55 */ 56 57 #define _PGMT_DEFAULT 0 58 #define _PGMT_WC (1UL << PG_arch_1) 59 #define _PGMT_UC_MINUS (1UL << PG_uncached) 60 #define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1) 61 #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1) 62 #define _PGMT_CLEAR_MASK (~_PGMT_MASK) 63 64 static inline unsigned long get_page_memtype(struct page *pg) 65 { 66 unsigned long pg_flags = pg->flags & _PGMT_MASK; 67 68 if (pg_flags == _PGMT_DEFAULT) 69 return -1; 70 else if (pg_flags == _PGMT_WC) 71 return _PAGE_CACHE_WC; 72 else if (pg_flags == _PGMT_UC_MINUS) 73 return _PAGE_CACHE_UC_MINUS; 74 else 75 return _PAGE_CACHE_WB; 76 } 77 78 static inline void set_page_memtype(struct page *pg, unsigned long memtype) 79 { 80 unsigned long memtype_flags = _PGMT_DEFAULT; 81 unsigned long old_flags; 82 unsigned long new_flags; 83 84 switch (memtype) { 85 case _PAGE_CACHE_WC: 86 memtype_flags = _PGMT_WC; 87 break; 88 case _PAGE_CACHE_UC_MINUS: 89 memtype_flags = _PGMT_UC_MINUS; 90 break; 91 case _PAGE_CACHE_WB: 92 memtype_flags = _PGMT_WB; 93 break; 94 } 95 96 do { 97 old_flags = pg->flags; 98 new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; 99 } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags); 100 } 101 #else 102 static inline unsigned long get_page_memtype(struct page *pg) { return -1; } 103 static inline void set_page_memtype(struct page *pg, unsigned long memtype) { } 104 #endif 105 106 /* 107 * The set_memory_* API can be used to change various attributes of a virtual 108 * address range. The attributes include: 109 * Cachability : UnCached, WriteCombining, WriteBack 110 * Executability : eXeutable, NoteXecutable 111 * Read/Write : ReadOnly, ReadWrite 112 * Presence : NotPresent 113 * 114 * Within a catagory, the attributes are mutually exclusive. 115 * 116 * The implementation of this API will take care of various aspects that 117 * are associated with changing such attributes, such as: 118 * - Flushing TLBs 119 * - Flushing CPU caches 120 * - Making sure aliases of the memory behind the mapping don't violate 121 * coherency rules as defined by the CPU in the system. 122 * 123 * What this API does not do: 124 * - Provide exclusion between various callers - including callers that 125 * operation on other mappings of the same physical page 126 * - Restore default attributes when a page is freed 127 * - Guarantee that mappings other than the requested one are 128 * in any state, other than that these do not violate rules for 129 * the CPU you have. Do not depend on any effects on other mappings, 130 * CPUs other than the one you have may have more relaxed rules. 131 * The caller is required to take care of these. 132 */ 133 134 int _set_memory_uc(unsigned long addr, int numpages); 135 int _set_memory_wc(unsigned long addr, int numpages); 136 int _set_memory_wb(unsigned long addr, int numpages); 137 int set_memory_uc(unsigned long addr, int numpages); 138 int set_memory_wc(unsigned long addr, int numpages); 139 int set_memory_wb(unsigned long addr, int numpages); 140 int set_memory_x(unsigned long addr, int numpages); 141 int set_memory_nx(unsigned long addr, int numpages); 142 int set_memory_ro(unsigned long addr, int numpages); 143 int set_memory_rw(unsigned long addr, int numpages); 144 int set_memory_np(unsigned long addr, int numpages); 145 int set_memory_4k(unsigned long addr, int numpages); 146 147 int set_memory_array_uc(unsigned long *addr, int addrinarray); 148 int set_memory_array_wc(unsigned long *addr, int addrinarray); 149 int set_memory_array_wb(unsigned long *addr, int addrinarray); 150 151 int set_pages_array_uc(struct page **pages, int addrinarray); 152 int set_pages_array_wc(struct page **pages, int addrinarray); 153 int set_pages_array_wb(struct page **pages, int addrinarray); 154 155 /* 156 * For legacy compatibility with the old APIs, a few functions 157 * are provided that work on a "struct page". 158 * These functions operate ONLY on the 1:1 kernel mapping of the 159 * memory that the struct page represents, and internally just 160 * call the set_memory_* function. See the description of the 161 * set_memory_* function for more details on conventions. 162 * 163 * These APIs should be considered *deprecated* and are likely going to 164 * be removed in the future. 165 * The reason for this is the implicit operation on the 1:1 mapping only, 166 * making this not a generally useful API. 167 * 168 * Specifically, many users of the old APIs had a virtual address, 169 * called virt_to_page() or vmalloc_to_page() on that address to 170 * get a struct page* that the old API required. 171 * To convert these cases, use set_memory_*() on the original 172 * virtual address, do not use these functions. 173 */ 174 175 int set_pages_uc(struct page *page, int numpages); 176 int set_pages_wb(struct page *page, int numpages); 177 int set_pages_x(struct page *page, int numpages); 178 int set_pages_nx(struct page *page, int numpages); 179 int set_pages_ro(struct page *page, int numpages); 180 int set_pages_rw(struct page *page, int numpages); 181 182 183 void clflush_cache_range(void *addr, unsigned int size); 184 185 #ifdef CONFIG_DEBUG_RODATA 186 void mark_rodata_ro(void); 187 extern const int rodata_test_data; 188 extern int kernel_set_to_readonly; 189 void set_kernel_text_rw(void); 190 void set_kernel_text_ro(void); 191 #else 192 static inline void set_kernel_text_rw(void) { } 193 static inline void set_kernel_text_ro(void) { } 194 #endif 195 196 #ifdef CONFIG_DEBUG_RODATA_TEST 197 int rodata_test(void); 198 #else 199 static inline int rodata_test(void) 200 { 201 return 0; 202 } 203 #endif 204 205 #endif /* _ASM_X86_CACHEFLUSH_H */ 206