1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Atomic operations for the Hexagon architecture 4 * 5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. 6 */ 7 8 #ifndef _ASM_ATOMIC_H 9 #define _ASM_ATOMIC_H 10 11 #include <linux/types.h> 12 #include <asm/cmpxchg.h> 13 #include <asm/barrier.h> 14 15 #define ATOMIC_INIT(i) { (i) } 16 17 /* Normal writes in our arch don't clear lock reservations */ 18 19 static inline void atomic_set(atomic_t *v, int new) 20 { 21 asm volatile( 22 "1: r6 = memw_locked(%0);\n" 23 " memw_locked(%0,p0) = %1;\n" 24 " if (!P0) jump 1b;\n" 25 : 26 : "r" (&v->counter), "r" (new) 27 : "memory", "p0", "r6" 28 ); 29 } 30 31 #define atomic_set_release(v, i) atomic_set((v), (i)) 32 33 /** 34 * atomic_read - reads a word, atomically 35 * @v: pointer to atomic value 36 * 37 * Assumes all word reads on our architecture are atomic. 38 */ 39 #define atomic_read(v) READ_ONCE((v)->counter) 40 41 /** 42 * atomic_xchg - atomic 43 * @v: pointer to memory to change 44 * @new: new value (technically passed in a register -- see xchg) 45 */ 46 #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) 47 48 49 /** 50 * atomic_cmpxchg - atomic compare-and-exchange values 51 * @v: pointer to value to change 52 * @old: desired old value to match 53 * @new: new value to put in 54 * 55 * Parameters are then pointer, value-in-register, value-in-register, 56 * and the output is the old value. 57 * 58 * Apparently this is complicated for archs that don't support 59 * the memw_locked like we do (or it's broken or whatever). 60 * 61 * Kind of the lynchpin of the rest of the generically defined routines. 62 * Remember V2 had that bug with dotnew predicate set by memw_locked. 63 * 64 * "old" is "expected" old val, __oldval is actual old value 65 */ 66 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 67 { 68 int __oldval; 69 70 asm volatile( 71 "1: %0 = memw_locked(%1);\n" 72 " { P0 = cmp.eq(%0,%2);\n" 73 " if (!P0.new) jump:nt 2f; }\n" 74 " memw_locked(%1,P0) = %3;\n" 75 " if (!P0) jump 1b;\n" 76 "2:\n" 77 : "=&r" (__oldval) 78 : "r" (&v->counter), "r" (old), "r" (new) 79 : "memory", "p0" 80 ); 81 82 return __oldval; 83 } 84 85 #define ATOMIC_OP(op) \ 86 static inline void atomic_##op(int i, atomic_t *v) \ 87 { \ 88 int output; \ 89 \ 90 __asm__ __volatile__ ( \ 91 "1: %0 = memw_locked(%1);\n" \ 92 " %0 = "#op "(%0,%2);\n" \ 93 " memw_locked(%1,P3)=%0;\n" \ 94 " if (!P3) jump 1b;\n" \ 95 : "=&r" (output) \ 96 : "r" (&v->counter), "r" (i) \ 97 : "memory", "p3" \ 98 ); \ 99 } \ 100 101 #define ATOMIC_OP_RETURN(op) \ 102 static inline int atomic_##op##_return(int i, atomic_t *v) \ 103 { \ 104 int output; \ 105 \ 106 __asm__ __volatile__ ( \ 107 "1: %0 = memw_locked(%1);\n" \ 108 " %0 = "#op "(%0,%2);\n" \ 109 " memw_locked(%1,P3)=%0;\n" \ 110 " if (!P3) jump 1b;\n" \ 111 : "=&r" (output) \ 112 : "r" (&v->counter), "r" (i) \ 113 : "memory", "p3" \ 114 ); \ 115 return output; \ 116 } 117 118 #define ATOMIC_FETCH_OP(op) \ 119 static inline int atomic_fetch_##op(int i, atomic_t *v) \ 120 { \ 121 int output, val; \ 122 \ 123 __asm__ __volatile__ ( \ 124 "1: %0 = memw_locked(%2);\n" \ 125 " %1 = "#op "(%0,%3);\n" \ 126 " memw_locked(%2,P3)=%1;\n" \ 127 " if (!P3) jump 1b;\n" \ 128 : "=&r" (output), "=&r" (val) \ 129 : "r" (&v->counter), "r" (i) \ 130 : "memory", "p3" \ 131 ); \ 132 return output; \ 133 } 134 135 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) 136 137 ATOMIC_OPS(add) 138 ATOMIC_OPS(sub) 139 140 #undef ATOMIC_OPS 141 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) 142 143 ATOMIC_OPS(and) 144 ATOMIC_OPS(or) 145 ATOMIC_OPS(xor) 146 147 #undef ATOMIC_OPS 148 #undef ATOMIC_FETCH_OP 149 #undef ATOMIC_OP_RETURN 150 #undef ATOMIC_OP 151 152 /** 153 * atomic_fetch_add_unless - add unless the number is a given value 154 * @v: pointer to value 155 * @a: amount to add 156 * @u: unless value is equal to u 157 * 158 * Returns old value. 159 * 160 */ 161 162 static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) 163 { 164 int __oldval; 165 register int tmp; 166 167 asm volatile( 168 "1: %0 = memw_locked(%2);" 169 " {" 170 " p3 = cmp.eq(%0, %4);" 171 " if (p3.new) jump:nt 2f;" 172 " %1 = add(%0, %3);" 173 " }" 174 " memw_locked(%2, p3) = %1;" 175 " {" 176 " if (!p3) jump 1b;" 177 " }" 178 "2:" 179 : "=&r" (__oldval), "=&r" (tmp) 180 : "r" (v), "r" (a), "r" (u) 181 : "memory", "p3" 182 ); 183 return __oldval; 184 } 185 #define atomic_fetch_add_unless atomic_fetch_add_unless 186 187 #endif 188