xref: /openbmc/linux/arch/hexagon/include/asm/cmpxchg.h (revision 06855063)
108dbd0f8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
28335896bSDavid Howells /*
38335896bSDavid Howells  * xchg/cmpxchg operations for the Hexagon architecture
48335896bSDavid Howells  *
5e1858b2aSRichard Kuo  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
68335896bSDavid Howells  */
78335896bSDavid Howells 
88335896bSDavid Howells #ifndef _ASM_CMPXCHG_H
98335896bSDavid Howells #define _ASM_CMPXCHG_H
108335896bSDavid Howells 
118335896bSDavid Howells /*
12*06855063SAndrzej Hajda  * __arch_xchg - atomically exchange a register and a memory location
138335896bSDavid Howells  * @x: value to swap
148335896bSDavid Howells  * @ptr: pointer to memory
158335896bSDavid Howells  * @size:  size of the value
168335896bSDavid Howells  *
178335896bSDavid Howells  * Only 4 bytes supported currently.
188335896bSDavid Howells  *
198335896bSDavid Howells  * Note:  there was an errata for V2 about .new's and memw_locked.
208335896bSDavid Howells  *
218335896bSDavid Howells  */
22*06855063SAndrzej Hajda static inline unsigned long
__arch_xchg(unsigned long x,volatile void * ptr,int size)23*06855063SAndrzej Hajda __arch_xchg(unsigned long x, volatile void *ptr, int size)
248335896bSDavid Howells {
258335896bSDavid Howells 	unsigned long retval;
268335896bSDavid Howells 
278335896bSDavid Howells 	/*  Can't seem to use printk or panic here, so just stop  */
288335896bSDavid Howells 	if (size != 4) do { asm volatile("brkpt;\n"); } while (1);
298335896bSDavid Howells 
308335896bSDavid Howells 	__asm__ __volatile__ (
318335896bSDavid Howells 	"1:	%0 = memw_locked(%1);\n"    /*  load into retval */
328335896bSDavid Howells 	"	memw_locked(%1,P0) = %2;\n" /*  store into memory */
33780a0cfdSNick Desaulniers 	"	if (!P0) jump 1b;\n"
348335896bSDavid Howells 	: "=&r" (retval)
358335896bSDavid Howells 	: "r" (ptr), "r" (x)
368335896bSDavid Howells 	: "memory", "p0"
378335896bSDavid Howells 	);
388335896bSDavid Howells 	return retval;
398335896bSDavid Howells }
408335896bSDavid Howells 
418335896bSDavid Howells /*
428335896bSDavid Howells  * Atomically swap the contents of a register with memory.  Should be atomic
438335896bSDavid Howells  * between multiple CPU's and within interrupts on the same CPU.
448335896bSDavid Howells  */
45*06855063SAndrzej Hajda #define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(v), (ptr), \
468335896bSDavid Howells 							   sizeof(*(ptr))))
478335896bSDavid Howells 
488335896bSDavid Howells /*
498335896bSDavid Howells  *  see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps.
508335896bSDavid Howells  *  looks just like atomic_cmpxchg on our arch currently with a bunch of
518335896bSDavid Howells  *  variable casting.
528335896bSDavid Howells  */
538335896bSDavid Howells 
5494b63eb6SMark Rutland #define arch_cmpxchg(ptr, old, new)				\
558335896bSDavid Howells ({								\
568335896bSDavid Howells 	__typeof__(ptr) __ptr = (ptr);				\
578335896bSDavid Howells 	__typeof__(*(ptr)) __old = (old);			\
588335896bSDavid Howells 	__typeof__(*(ptr)) __new = (new);			\
598335896bSDavid Howells 	__typeof__(*(ptr)) __oldval = 0;			\
608335896bSDavid Howells 								\
618335896bSDavid Howells 	asm volatile(						\
628335896bSDavid Howells 		"1:	%0 = memw_locked(%1);\n"		\
638335896bSDavid Howells 		"	{ P0 = cmp.eq(%0,%2);\n"		\
648335896bSDavid Howells 		"	  if (!P0.new) jump:nt 2f; }\n"		\
658335896bSDavid Howells 		"	memw_locked(%1,p0) = %3;\n"		\
668335896bSDavid Howells 		"	if (!P0) jump 1b;\n"			\
678335896bSDavid Howells 		"2:\n"						\
688335896bSDavid Howells 		: "=&r" (__oldval)				\
698335896bSDavid Howells 		: "r" (__ptr), "r" (__old), "r" (__new)		\
708335896bSDavid Howells 		: "memory", "p0"				\
718335896bSDavid Howells 	);							\
728335896bSDavid Howells 	__oldval;						\
738335896bSDavid Howells })
748335896bSDavid Howells 
758335896bSDavid Howells #endif /* _ASM_CMPXCHG_H */
76