1 /* 2 * Copyright (C) 2013 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __ASM_PERCPU_H 17 #define __ASM_PERCPU_H 18 19 static inline void set_my_cpu_offset(unsigned long off) 20 { 21 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); 22 } 23 24 static inline unsigned long __my_cpu_offset(void) 25 { 26 unsigned long off; 27 28 /* 29 * We want to allow caching the value, so avoid using volatile and 30 * instead use a fake stack read to hazard against barrier(). 31 */ 32 asm("mrs %0, tpidr_el1" : "=r" (off) : 33 "Q" (*(const unsigned long *)current_stack_pointer)); 34 35 return off; 36 } 37 #define __my_cpu_offset __my_cpu_offset() 38 39 #define PERCPU_OP(op, asm_op) \ 40 static inline unsigned long __percpu_##op(void *ptr, \ 41 unsigned long val, int size) \ 42 { \ 43 unsigned long loop, ret; \ 44 \ 45 switch (size) { \ 46 case 1: \ 47 do { \ 48 asm ("//__per_cpu_" #op "_1\n" \ 49 "ldxrb %w[ret], %[ptr]\n" \ 50 #asm_op " %w[ret], %w[ret], %w[val]\n" \ 51 "stxrb %w[loop], %w[ret], %[ptr]\n" \ 52 : [loop] "=&r" (loop), [ret] "=&r" (ret), \ 53 [ptr] "+Q"(*(u8 *)ptr) \ 54 : [val] "Ir" (val)); \ 55 } while (loop); \ 56 break; \ 57 case 2: \ 58 do { \ 59 asm ("//__per_cpu_" #op "_2\n" \ 60 "ldxrh %w[ret], %[ptr]\n" \ 61 #asm_op " %w[ret], %w[ret], %w[val]\n" \ 62 "stxrh %w[loop], %w[ret], %[ptr]\n" \ 63 : [loop] "=&r" (loop), [ret] "=&r" (ret), \ 64 [ptr] "+Q"(*(u16 *)ptr) \ 65 : [val] "Ir" (val)); \ 66 } while (loop); \ 67 break; \ 68 case 4: \ 69 do { \ 70 asm ("//__per_cpu_" #op "_4\n" \ 71 "ldxr %w[ret], %[ptr]\n" \ 72 #asm_op " %w[ret], %w[ret], %w[val]\n" \ 73 "stxr %w[loop], %w[ret], %[ptr]\n" \ 74 : [loop] "=&r" (loop), [ret] "=&r" (ret), \ 75 [ptr] "+Q"(*(u32 *)ptr) \ 76 : [val] "Ir" (val)); \ 77 } while (loop); \ 78 break; \ 79 case 8: \ 80 do { \ 81 asm ("//__per_cpu_" #op "_8\n" \ 82 "ldxr %[ret], %[ptr]\n" \ 83 #asm_op " %[ret], %[ret], %[val]\n" \ 84 "stxr %w[loop], %[ret], %[ptr]\n" \ 85 : [loop] "=&r" (loop), [ret] "=&r" (ret), \ 86 [ptr] "+Q"(*(u64 *)ptr) \ 87 : [val] "Ir" (val)); \ 88 } while (loop); \ 89 break; \ 90 default: \ 91 BUILD_BUG(); \ 92 } \ 93 \ 94 return ret; \ 95 } 96 97 PERCPU_OP(add, add) 98 PERCPU_OP(and, and) 99 PERCPU_OP(or, orr) 100 #undef PERCPU_OP 101 102 static inline unsigned long __percpu_read(void *ptr, int size) 103 { 104 unsigned long ret; 105 106 switch (size) { 107 case 1: 108 ret = ACCESS_ONCE(*(u8 *)ptr); 109 break; 110 case 2: 111 ret = ACCESS_ONCE(*(u16 *)ptr); 112 break; 113 case 4: 114 ret = ACCESS_ONCE(*(u32 *)ptr); 115 break; 116 case 8: 117 ret = ACCESS_ONCE(*(u64 *)ptr); 118 break; 119 default: 120 BUILD_BUG(); 121 } 122 123 return ret; 124 } 125 126 static inline void __percpu_write(void *ptr, unsigned long val, int size) 127 { 128 switch (size) { 129 case 1: 130 ACCESS_ONCE(*(u8 *)ptr) = (u8)val; 131 break; 132 case 2: 133 ACCESS_ONCE(*(u16 *)ptr) = (u16)val; 134 break; 135 case 4: 136 ACCESS_ONCE(*(u32 *)ptr) = (u32)val; 137 break; 138 case 8: 139 ACCESS_ONCE(*(u64 *)ptr) = (u64)val; 140 break; 141 default: 142 BUILD_BUG(); 143 } 144 } 145 146 static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, 147 int size) 148 { 149 unsigned long ret, loop; 150 151 switch (size) { 152 case 1: 153 do { 154 asm ("//__percpu_xchg_1\n" 155 "ldxrb %w[ret], %[ptr]\n" 156 "stxrb %w[loop], %w[val], %[ptr]\n" 157 : [loop] "=&r"(loop), [ret] "=&r"(ret), 158 [ptr] "+Q"(*(u8 *)ptr) 159 : [val] "r" (val)); 160 } while (loop); 161 break; 162 case 2: 163 do { 164 asm ("//__percpu_xchg_2\n" 165 "ldxrh %w[ret], %[ptr]\n" 166 "stxrh %w[loop], %w[val], %[ptr]\n" 167 : [loop] "=&r"(loop), [ret] "=&r"(ret), 168 [ptr] "+Q"(*(u16 *)ptr) 169 : [val] "r" (val)); 170 } while (loop); 171 break; 172 case 4: 173 do { 174 asm ("//__percpu_xchg_4\n" 175 "ldxr %w[ret], %[ptr]\n" 176 "stxr %w[loop], %w[val], %[ptr]\n" 177 : [loop] "=&r"(loop), [ret] "=&r"(ret), 178 [ptr] "+Q"(*(u32 *)ptr) 179 : [val] "r" (val)); 180 } while (loop); 181 break; 182 case 8: 183 do { 184 asm ("//__percpu_xchg_8\n" 185 "ldxr %[ret], %[ptr]\n" 186 "stxr %w[loop], %[val], %[ptr]\n" 187 : [loop] "=&r"(loop), [ret] "=&r"(ret), 188 [ptr] "+Q"(*(u64 *)ptr) 189 : [val] "r" (val)); 190 } while (loop); 191 break; 192 default: 193 BUILD_BUG(); 194 } 195 196 return ret; 197 } 198 199 #define _percpu_read(pcp) \ 200 ({ \ 201 typeof(pcp) __retval; \ 202 preempt_disable(); \ 203 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ 204 sizeof(pcp)); \ 205 preempt_enable(); \ 206 __retval; \ 207 }) 208 209 #define _percpu_write(pcp, val) \ 210 do { \ 211 preempt_disable(); \ 212 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ 213 sizeof(pcp)); \ 214 preempt_enable(); \ 215 } while(0) \ 216 217 #define _pcp_protect(operation, pcp, val) \ 218 ({ \ 219 typeof(pcp) __retval; \ 220 preempt_disable(); \ 221 __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ 222 (val), sizeof(pcp)); \ 223 preempt_enable(); \ 224 __retval; \ 225 }) 226 227 #define _percpu_add(pcp, val) \ 228 _pcp_protect(__percpu_add, pcp, val) 229 230 #define _percpu_add_return(pcp, val) _percpu_add(pcp, val) 231 232 #define _percpu_and(pcp, val) \ 233 _pcp_protect(__percpu_and, pcp, val) 234 235 #define _percpu_or(pcp, val) \ 236 _pcp_protect(__percpu_or, pcp, val) 237 238 #define _percpu_xchg(pcp, val) (typeof(pcp)) \ 239 _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) 240 241 #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) 242 #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) 243 #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) 244 #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) 245 246 #define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val) 247 #define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val) 248 #define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val) 249 #define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val) 250 251 #define this_cpu_and_1(pcp, val) _percpu_and(pcp, val) 252 #define this_cpu_and_2(pcp, val) _percpu_and(pcp, val) 253 #define this_cpu_and_4(pcp, val) _percpu_and(pcp, val) 254 #define this_cpu_and_8(pcp, val) _percpu_and(pcp, val) 255 256 #define this_cpu_or_1(pcp, val) _percpu_or(pcp, val) 257 #define this_cpu_or_2(pcp, val) _percpu_or(pcp, val) 258 #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) 259 #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) 260 261 #define this_cpu_read_1(pcp) _percpu_read(pcp) 262 #define this_cpu_read_2(pcp) _percpu_read(pcp) 263 #define this_cpu_read_4(pcp) _percpu_read(pcp) 264 #define this_cpu_read_8(pcp) _percpu_read(pcp) 265 266 #define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) 267 #define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) 268 #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) 269 #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) 270 271 #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) 272 #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) 273 #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) 274 #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) 275 276 #include <asm-generic/percpu.h> 277 278 #endif /* __ASM_PERCPU_H */ 279