1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020 ARM Ltd. 4 */ 5 #ifndef __ASM_MTE_KASAN_H 6 #define __ASM_MTE_KASAN_H 7 8 #include <asm/mte-def.h> 9 10 #ifndef __ASSEMBLY__ 11 12 #include <linux/types.h> 13 14 #ifdef CONFIG_ARM64_MTE 15 16 /* 17 * These functions are meant to be only used from KASAN runtime through 18 * the arch_*() interface defined in asm/memory.h. 19 * These functions don't include system_supports_mte() checks, 20 * as KASAN only calls them when MTE is supported and enabled. 21 */ 22 23 static inline u8 mte_get_ptr_tag(void *ptr) 24 { 25 /* Note: The format of KASAN tags is 0xF<x> */ 26 u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT); 27 28 return tag; 29 } 30 31 /* Get allocation tag for the address. */ 32 static inline u8 mte_get_mem_tag(void *addr) 33 { 34 asm(__MTE_PREAMBLE "ldg %0, [%0]" 35 : "+r" (addr)); 36 37 return mte_get_ptr_tag(addr); 38 } 39 40 /* Generate a random tag. */ 41 static inline u8 mte_get_random_tag(void) 42 { 43 void *addr; 44 45 asm(__MTE_PREAMBLE "irg %0, %0" 46 : "=r" (addr)); 47 48 return mte_get_ptr_tag(addr); 49 } 50 51 static inline u64 __stg_post(u64 p) 52 { 53 asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16" 54 : "+r"(p) 55 : 56 : "memory"); 57 return p; 58 } 59 60 static inline u64 __stzg_post(u64 p) 61 { 62 asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16" 63 : "+r"(p) 64 : 65 : "memory"); 66 return p; 67 } 68 69 static inline void __dc_gva(u64 p) 70 { 71 asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory"); 72 } 73 74 static inline void __dc_gzva(u64 p) 75 { 76 asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory"); 77 } 78 79 /* 80 * Assign allocation tags for a region of memory based on the pointer tag. 81 * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and 82 * size must be MTE_GRANULE_SIZE aligned. 83 */ 84 static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, 85 bool init) 86 { 87 u64 curr, mask, dczid_bs, end1, end2, end3; 88 89 /* Read DC G(Z)VA block size from the system register. */ 90 dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf); 91 92 curr = (u64)__tag_set(addr, tag); 93 mask = dczid_bs - 1; 94 /* STG/STZG up to the end of the first block. */ 95 end1 = curr | mask; 96 end3 = curr + size; 97 /* DC GVA / GZVA in [end1, end2) */ 98 end2 = end3 & ~mask; 99 100 /* 101 * The following code uses STG on the first DC GVA block even if the 102 * start address is aligned - it appears to be faster than an alignment 103 * check + conditional branch. Also, if the range size is at least 2 DC 104 * GVA blocks, the first two loops can use post-condition to save one 105 * branch each. 106 */ 107 #define SET_MEMTAG_RANGE(stg_post, dc_gva) \ 108 do { \ 109 if (size >= 2 * dczid_bs) { \ 110 do { \ 111 curr = stg_post(curr); \ 112 } while (curr < end1); \ 113 \ 114 do { \ 115 dc_gva(curr); \ 116 curr += dczid_bs; \ 117 } while (curr < end2); \ 118 } \ 119 \ 120 while (curr < end3) \ 121 curr = stg_post(curr); \ 122 } while (0) 123 124 if (init) 125 SET_MEMTAG_RANGE(__stzg_post, __dc_gzva); 126 else 127 SET_MEMTAG_RANGE(__stg_post, __dc_gva); 128 #undef SET_MEMTAG_RANGE 129 } 130 131 void mte_enable_kernel_sync(void); 132 void mte_enable_kernel_async(void); 133 134 #else /* CONFIG_ARM64_MTE */ 135 136 static inline u8 mte_get_ptr_tag(void *ptr) 137 { 138 return 0xFF; 139 } 140 141 static inline u8 mte_get_mem_tag(void *addr) 142 { 143 return 0xFF; 144 } 145 146 static inline u8 mte_get_random_tag(void) 147 { 148 return 0xFF; 149 } 150 151 static inline void mte_set_mem_tag_range(void *addr, size_t size, 152 u8 tag, bool init) 153 { 154 } 155 156 static inline void mte_enable_kernel_sync(void) 157 { 158 } 159 160 static inline void mte_enable_kernel_async(void) 161 { 162 } 163 164 #endif /* CONFIG_ARM64_MTE */ 165 166 #endif /* __ASSEMBLY__ */ 167 168 #endif /* __ASM_MTE_KASAN_H */ 169