xref: /openbmc/linux/arch/mips/kernel/cmpxchg.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2b70eb300SPaul Burton /*
3b70eb300SPaul Burton  * Copyright (C) 2017 Imagination Technologies
4fb615d61SPaul Burton  * Author: Paul Burton <paul.burton@mips.com>
5b70eb300SPaul Burton  */
6b70eb300SPaul Burton 
7b70eb300SPaul Burton #include <linux/bitops.h>
8b70eb300SPaul Burton #include <asm/cmpxchg.h>
9b70eb300SPaul Burton 
__xchg_small(volatile void * ptr,unsigned long val,unsigned int size)10b70eb300SPaul Burton unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
11b70eb300SPaul Burton {
12b70eb300SPaul Burton 	u32 old32, new32, load32, mask;
13b70eb300SPaul Burton 	volatile u32 *ptr32;
14b70eb300SPaul Burton 	unsigned int shift;
15b70eb300SPaul Burton 
16b70eb300SPaul Burton 	/* Check that ptr is naturally aligned */
17b70eb300SPaul Burton 	WARN_ON((unsigned long)ptr & (size - 1));
18b70eb300SPaul Burton 
19b70eb300SPaul Burton 	/* Mask value to the correct size. */
20b70eb300SPaul Burton 	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
21b70eb300SPaul Burton 	val &= mask;
22b70eb300SPaul Burton 
23b70eb300SPaul Burton 	/*
24b70eb300SPaul Burton 	 * Calculate a shift & mask that correspond to the value we wish to
25b70eb300SPaul Burton 	 * exchange within the naturally aligned 4 byte integer that includes
26b70eb300SPaul Burton 	 * it.
27b70eb300SPaul Burton 	 */
28b70eb300SPaul Burton 	shift = (unsigned long)ptr & 0x3;
29b70eb300SPaul Burton 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
30b70eb300SPaul Burton 		shift ^= sizeof(u32) - size;
31b70eb300SPaul Burton 	shift *= BITS_PER_BYTE;
32b70eb300SPaul Burton 	mask <<= shift;
33b70eb300SPaul Burton 
34b70eb300SPaul Burton 	/*
35b70eb300SPaul Burton 	 * Calculate a pointer to the naturally aligned 4 byte integer that
36b70eb300SPaul Burton 	 * includes our byte of interest, and load its value.
37b70eb300SPaul Burton 	 */
38b70eb300SPaul Burton 	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
39b70eb300SPaul Burton 	load32 = *ptr32;
40b70eb300SPaul Burton 
41b70eb300SPaul Burton 	do {
42b70eb300SPaul Burton 		old32 = load32;
43b70eb300SPaul Burton 		new32 = (load32 & ~mask) | (val << shift);
44*c7b5fd6fSMark Rutland 		load32 = arch_cmpxchg(ptr32, old32, new32);
45b70eb300SPaul Burton 	} while (load32 != old32);
46b70eb300SPaul Burton 
47b70eb300SPaul Burton 	return (load32 & mask) >> shift;
48b70eb300SPaul Burton }
493ba7f44dSPaul Burton 
__cmpxchg_small(volatile void * ptr,unsigned long old,unsigned long new,unsigned int size)503ba7f44dSPaul Burton unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
513ba7f44dSPaul Burton 			      unsigned long new, unsigned int size)
523ba7f44dSPaul Burton {
5394ee12b5SMichael Clark 	u32 mask, old32, new32, load32, load;
543ba7f44dSPaul Burton 	volatile u32 *ptr32;
553ba7f44dSPaul Burton 	unsigned int shift;
563ba7f44dSPaul Burton 
573ba7f44dSPaul Burton 	/* Check that ptr is naturally aligned */
583ba7f44dSPaul Burton 	WARN_ON((unsigned long)ptr & (size - 1));
593ba7f44dSPaul Burton 
603ba7f44dSPaul Burton 	/* Mask inputs to the correct size. */
613ba7f44dSPaul Burton 	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
623ba7f44dSPaul Burton 	old &= mask;
633ba7f44dSPaul Burton 	new &= mask;
643ba7f44dSPaul Burton 
653ba7f44dSPaul Burton 	/*
663ba7f44dSPaul Burton 	 * Calculate a shift & mask that correspond to the value we wish to
673ba7f44dSPaul Burton 	 * compare & exchange within the naturally aligned 4 byte integer
683ba7f44dSPaul Burton 	 * that includes it.
693ba7f44dSPaul Burton 	 */
703ba7f44dSPaul Burton 	shift = (unsigned long)ptr & 0x3;
713ba7f44dSPaul Burton 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
723ba7f44dSPaul Burton 		shift ^= sizeof(u32) - size;
733ba7f44dSPaul Burton 	shift *= BITS_PER_BYTE;
743ba7f44dSPaul Burton 	mask <<= shift;
753ba7f44dSPaul Burton 
763ba7f44dSPaul Burton 	/*
773ba7f44dSPaul Burton 	 * Calculate a pointer to the naturally aligned 4 byte integer that
783ba7f44dSPaul Burton 	 * includes our byte of interest, and load its value.
793ba7f44dSPaul Burton 	 */
803ba7f44dSPaul Burton 	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
813ba7f44dSPaul Burton 	load32 = *ptr32;
823ba7f44dSPaul Burton 
833ba7f44dSPaul Burton 	while (true) {
843ba7f44dSPaul Burton 		/*
853ba7f44dSPaul Burton 		 * Ensure the byte we want to exchange matches the expected
863ba7f44dSPaul Burton 		 * old value, and if not then bail.
873ba7f44dSPaul Burton 		 */
883ba7f44dSPaul Burton 		load = (load32 & mask) >> shift;
893ba7f44dSPaul Burton 		if (load != old)
903ba7f44dSPaul Burton 			return load;
913ba7f44dSPaul Burton 
923ba7f44dSPaul Burton 		/*
933ba7f44dSPaul Burton 		 * Calculate the old & new values of the naturally aligned
943ba7f44dSPaul Burton 		 * 4 byte integer that include the byte we want to exchange.
953ba7f44dSPaul Burton 		 * Attempt to exchange the old value for the new value, and
963ba7f44dSPaul Burton 		 * return if we succeed.
973ba7f44dSPaul Burton 		 */
983ba7f44dSPaul Burton 		old32 = (load32 & ~mask) | (old << shift);
993ba7f44dSPaul Burton 		new32 = (load32 & ~mask) | (new << shift);
100*c7b5fd6fSMark Rutland 		load32 = arch_cmpxchg(ptr32, old32, new32);
1013ba7f44dSPaul Burton 		if (load32 == old32)
1023ba7f44dSPaul Burton 			return old;
1033ba7f44dSPaul Burton 	}
1043ba7f44dSPaul Burton }
105