108dbd0f8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
275085018SRichard Kuo /*
375085018SRichard Kuo * Atomic operations for the Hexagon architecture
475085018SRichard Kuo *
57c6a5df4SRichard Kuo * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
675085018SRichard Kuo */
775085018SRichard Kuo
875085018SRichard Kuo #ifndef _ASM_ATOMIC_H
975085018SRichard Kuo #define _ASM_ATOMIC_H
1075085018SRichard Kuo
1175085018SRichard Kuo #include <linux/types.h>
128335896bSDavid Howells #include <asm/cmpxchg.h>
1394cf42f8SPeter Zijlstra #include <asm/barrier.h>
1475085018SRichard Kuo
15b10fa7b6SRichard Kuo /* Normal writes in our arch don't clear lock reservations */
16b10fa7b6SRichard Kuo
arch_atomic_set(atomic_t * v,int new)1794b63eb6SMark Rutland static inline void arch_atomic_set(atomic_t *v, int new)
18b10fa7b6SRichard Kuo {
19b10fa7b6SRichard Kuo asm volatile(
20b10fa7b6SRichard Kuo "1: r6 = memw_locked(%0);\n"
21b10fa7b6SRichard Kuo " memw_locked(%0,p0) = %1;\n"
22b10fa7b6SRichard Kuo " if (!P0) jump 1b;\n"
23b10fa7b6SRichard Kuo :
24b10fa7b6SRichard Kuo : "r" (&v->counter), "r" (new)
25b10fa7b6SRichard Kuo : "memory", "p0", "r6"
26b10fa7b6SRichard Kuo );
27b10fa7b6SRichard Kuo }
2875085018SRichard Kuo
2994b63eb6SMark Rutland #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
309d664c0aSPeter Zijlstra
3194b63eb6SMark Rutland #define arch_atomic_read(v) READ_ONCE((v)->counter)
3275085018SRichard Kuo
3350f853e3SPeter Zijlstra #define ATOMIC_OP(op) \
3494b63eb6SMark Rutland static inline void arch_atomic_##op(int i, atomic_t *v) \
3550f853e3SPeter Zijlstra { \
3650f853e3SPeter Zijlstra int output; \
3750f853e3SPeter Zijlstra \
3850f853e3SPeter Zijlstra __asm__ __volatile__ ( \
3950f853e3SPeter Zijlstra "1: %0 = memw_locked(%1);\n" \
4050f853e3SPeter Zijlstra " %0 = "#op "(%0,%2);\n" \
4150f853e3SPeter Zijlstra " memw_locked(%1,P3)=%0;\n" \
42780a0cfdSNick Desaulniers " if (!P3) jump 1b;\n" \
4350f853e3SPeter Zijlstra : "=&r" (output) \
4450f853e3SPeter Zijlstra : "r" (&v->counter), "r" (i) \
4550f853e3SPeter Zijlstra : "memory", "p3" \
4650f853e3SPeter Zijlstra ); \
4750f853e3SPeter Zijlstra } \
4875085018SRichard Kuo
4950f853e3SPeter Zijlstra #define ATOMIC_OP_RETURN(op) \
5094b63eb6SMark Rutland static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
5150f853e3SPeter Zijlstra { \
5250f853e3SPeter Zijlstra int output; \
5350f853e3SPeter Zijlstra \
5450f853e3SPeter Zijlstra __asm__ __volatile__ ( \
5550f853e3SPeter Zijlstra "1: %0 = memw_locked(%1);\n" \
5650f853e3SPeter Zijlstra " %0 = "#op "(%0,%2);\n" \
5750f853e3SPeter Zijlstra " memw_locked(%1,P3)=%0;\n" \
58780a0cfdSNick Desaulniers " if (!P3) jump 1b;\n" \
5950f853e3SPeter Zijlstra : "=&r" (output) \
6050f853e3SPeter Zijlstra : "r" (&v->counter), "r" (i) \
6150f853e3SPeter Zijlstra : "memory", "p3" \
6250f853e3SPeter Zijlstra ); \
6350f853e3SPeter Zijlstra return output; \
6475085018SRichard Kuo }
6575085018SRichard Kuo
664be7dd39SPeter Zijlstra #define ATOMIC_FETCH_OP(op) \
6794b63eb6SMark Rutland static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
684be7dd39SPeter Zijlstra { \
694be7dd39SPeter Zijlstra int output, val; \
704be7dd39SPeter Zijlstra \
714be7dd39SPeter Zijlstra __asm__ __volatile__ ( \
724be7dd39SPeter Zijlstra "1: %0 = memw_locked(%2);\n" \
734be7dd39SPeter Zijlstra " %1 = "#op "(%0,%3);\n" \
744be7dd39SPeter Zijlstra " memw_locked(%2,P3)=%1;\n" \
75780a0cfdSNick Desaulniers " if (!P3) jump 1b;\n" \
764be7dd39SPeter Zijlstra : "=&r" (output), "=&r" (val) \
774be7dd39SPeter Zijlstra : "r" (&v->counter), "r" (i) \
784be7dd39SPeter Zijlstra : "memory", "p3" \
794be7dd39SPeter Zijlstra ); \
804be7dd39SPeter Zijlstra return output; \
814be7dd39SPeter Zijlstra }
824be7dd39SPeter Zijlstra
834be7dd39SPeter Zijlstra #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
8475085018SRichard Kuo
8550f853e3SPeter Zijlstra ATOMIC_OPS(add)
ATOMIC_OPS(sub)8650f853e3SPeter Zijlstra ATOMIC_OPS(sub)
8775085018SRichard Kuo
88*8ad17f21SMark Rutland #define arch_atomic_add_return arch_atomic_add_return
89*8ad17f21SMark Rutland #define arch_atomic_sub_return arch_atomic_sub_return
90*8ad17f21SMark Rutland #define arch_atomic_fetch_add arch_atomic_fetch_add
91*8ad17f21SMark Rutland #define arch_atomic_fetch_sub arch_atomic_fetch_sub
92*8ad17f21SMark Rutland
934be7dd39SPeter Zijlstra #undef ATOMIC_OPS
944be7dd39SPeter Zijlstra #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
954be7dd39SPeter Zijlstra
964be7dd39SPeter Zijlstra ATOMIC_OPS(and)
974be7dd39SPeter Zijlstra ATOMIC_OPS(or)
984be7dd39SPeter Zijlstra ATOMIC_OPS(xor)
99610f7ba9SPeter Zijlstra
100*8ad17f21SMark Rutland #define arch_atomic_fetch_and arch_atomic_fetch_and
101*8ad17f21SMark Rutland #define arch_atomic_fetch_or arch_atomic_fetch_or
102*8ad17f21SMark Rutland #define arch_atomic_fetch_xor arch_atomic_fetch_xor
103*8ad17f21SMark Rutland
10450f853e3SPeter Zijlstra #undef ATOMIC_OPS
1054be7dd39SPeter Zijlstra #undef ATOMIC_FETCH_OP
10650f853e3SPeter Zijlstra #undef ATOMIC_OP_RETURN
10750f853e3SPeter Zijlstra #undef ATOMIC_OP
10875085018SRichard Kuo
10994b63eb6SMark Rutland static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
11075085018SRichard Kuo {
111e0025a72SRichard Kuo int __oldval;
112e0025a72SRichard Kuo register int tmp;
113e0025a72SRichard Kuo
11475085018SRichard Kuo asm volatile(
11575085018SRichard Kuo "1: %0 = memw_locked(%2);"
11675085018SRichard Kuo " {"
11775085018SRichard Kuo " p3 = cmp.eq(%0, %4);"
11875085018SRichard Kuo " if (p3.new) jump:nt 2f;"
119e0025a72SRichard Kuo " %1 = add(%0, %3);"
12075085018SRichard Kuo " }"
121e0025a72SRichard Kuo " memw_locked(%2, p3) = %1;"
12275085018SRichard Kuo " {"
123780a0cfdSNick Desaulniers " if (!p3) jump 1b;"
12475085018SRichard Kuo " }"
12575085018SRichard Kuo "2:"
126e0025a72SRichard Kuo : "=&r" (__oldval), "=&r" (tmp)
12775085018SRichard Kuo : "r" (v), "r" (a), "r" (u)
12875085018SRichard Kuo : "memory", "p3"
12975085018SRichard Kuo );
130e0025a72SRichard Kuo return __oldval;
13175085018SRichard Kuo }
13294b63eb6SMark Rutland #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
13375085018SRichard Kuo
13475085018SRichard Kuo #endif
135