1 #ifndef _ASM_X86_CACHEFLUSH_H 2 #define _ASM_X86_CACHEFLUSH_H 3 4 /* Keep includes the same across arches. */ 5 #include <linux/mm.h> 6 7 /* Caches aren't brain-dead on the intel. */ 8 static inline void flush_cache_all(void) { } 9 static inline void flush_cache_mm(struct mm_struct *mm) { } 10 static inline void flush_cache_dup_mm(struct mm_struct *mm) { } 11 static inline void flush_cache_range(struct vm_area_struct *vma, 12 unsigned long start, unsigned long end) { } 13 static inline void flush_cache_page(struct vm_area_struct *vma, 14 unsigned long vmaddr, unsigned long pfn) { } 15 static inline void flush_dcache_page(struct page *page) { } 16 static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } 17 static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } 18 static inline void flush_icache_range(unsigned long start, 19 unsigned long end) { } 20 static inline void flush_icache_page(struct vm_area_struct *vma, 21 struct page *page) { } 22 static inline void flush_icache_user_range(struct vm_area_struct *vma, 23 struct page *page, 24 unsigned long addr, 25 unsigned long len) { } 26 static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } 27 static inline void flush_cache_vunmap(unsigned long start, 28 unsigned long end) { } 29 30 static inline void copy_to_user_page(struct vm_area_struct *vma, 31 struct page *page, unsigned long vaddr, 32 void *dst, const void *src, 33 unsigned long len) 34 { 35 memcpy(dst, src, len); 36 } 37 38 static inline void copy_from_user_page(struct vm_area_struct *vma, 39 struct page *page, unsigned long vaddr, 40 void *dst, const void *src, 41 unsigned long len) 42 { 43 memcpy(dst, src, len); 44 } 45 46 #define PG_non_WB PG_arch_1 47 PAGEFLAG(NonWB, non_WB) 48 49 /* 50 * The set_memory_* API can be used to change various attributes of a virtual 51 * address range. The attributes include: 52 * Cachability : UnCached, WriteCombining, WriteBack 53 * Executability : eXeutable, NoteXecutable 54 * Read/Write : ReadOnly, ReadWrite 55 * Presence : NotPresent 56 * 57 * Within a catagory, the attributes are mutually exclusive. 58 * 59 * The implementation of this API will take care of various aspects that 60 * are associated with changing such attributes, such as: 61 * - Flushing TLBs 62 * - Flushing CPU caches 63 * - Making sure aliases of the memory behind the mapping don't violate 64 * coherency rules as defined by the CPU in the system. 65 * 66 * What this API does not do: 67 * - Provide exclusion between various callers - including callers that 68 * operation on other mappings of the same physical page 69 * - Restore default attributes when a page is freed 70 * - Guarantee that mappings other than the requested one are 71 * in any state, other than that these do not violate rules for 72 * the CPU you have. Do not depend on any effects on other mappings, 73 * CPUs other than the one you have may have more relaxed rules. 74 * The caller is required to take care of these. 75 */ 76 77 int _set_memory_uc(unsigned long addr, int numpages); 78 int _set_memory_wc(unsigned long addr, int numpages); 79 int _set_memory_wb(unsigned long addr, int numpages); 80 int set_memory_uc(unsigned long addr, int numpages); 81 int set_memory_wc(unsigned long addr, int numpages); 82 int set_memory_wb(unsigned long addr, int numpages); 83 int set_memory_x(unsigned long addr, int numpages); 84 int set_memory_nx(unsigned long addr, int numpages); 85 int set_memory_ro(unsigned long addr, int numpages); 86 int set_memory_rw(unsigned long addr, int numpages); 87 int set_memory_np(unsigned long addr, int numpages); 88 int set_memory_4k(unsigned long addr, int numpages); 89 90 int set_memory_array_uc(unsigned long *addr, int addrinarray); 91 int set_memory_array_wb(unsigned long *addr, int addrinarray); 92 93 int set_pages_array_uc(struct page **pages, int addrinarray); 94 int set_pages_array_wb(struct page **pages, int addrinarray); 95 96 /* 97 * For legacy compatibility with the old APIs, a few functions 98 * are provided that work on a "struct page". 99 * These functions operate ONLY on the 1:1 kernel mapping of the 100 * memory that the struct page represents, and internally just 101 * call the set_memory_* function. See the description of the 102 * set_memory_* function for more details on conventions. 103 * 104 * These APIs should be considered *deprecated* and are likely going to 105 * be removed in the future. 106 * The reason for this is the implicit operation on the 1:1 mapping only, 107 * making this not a generally useful API. 108 * 109 * Specifically, many users of the old APIs had a virtual address, 110 * called virt_to_page() or vmalloc_to_page() on that address to 111 * get a struct page* that the old API required. 112 * To convert these cases, use set_memory_*() on the original 113 * virtual address, do not use these functions. 114 */ 115 116 int set_pages_uc(struct page *page, int numpages); 117 int set_pages_wb(struct page *page, int numpages); 118 int set_pages_x(struct page *page, int numpages); 119 int set_pages_nx(struct page *page, int numpages); 120 int set_pages_ro(struct page *page, int numpages); 121 int set_pages_rw(struct page *page, int numpages); 122 123 124 void clflush_cache_range(void *addr, unsigned int size); 125 126 #ifdef CONFIG_DEBUG_RODATA 127 void mark_rodata_ro(void); 128 extern const int rodata_test_data; 129 void set_kernel_text_rw(void); 130 void set_kernel_text_ro(void); 131 #else 132 static inline void set_kernel_text_rw(void) { } 133 static inline void set_kernel_text_ro(void) { } 134 #endif 135 136 #ifdef CONFIG_DEBUG_RODATA_TEST 137 int rodata_test(void); 138 #else 139 static inline int rodata_test(void) 140 { 141 return 0; 142 } 143 #endif 144 145 #endif /* _ASM_X86_CACHEFLUSH_H */ 146