1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 3 #ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H 4 #define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H 5 6 #include <linux/bits.h> 7 8 #ifndef _LINUX_BITOPS_H 9 #error only <linux/bitops.h> can be included directly 10 #endif 11 12 /* 13 * Generic definitions for bit operations, should not be used in regular code 14 * directly. 15 */ 16 17 /** 18 * generic___set_bit - Set a bit in memory 19 * @nr: the bit to set 20 * @addr: the address to start counting from 21 * 22 * Unlike set_bit(), this function is non-atomic and may be reordered. 23 * If it's called on the same region of memory simultaneously, the effect 24 * may be that only one operation succeeds. 25 */ 26 static __always_inline void 27 generic___set_bit(unsigned long nr, volatile unsigned long *addr) 28 { 29 unsigned long mask = BIT_MASK(nr); 30 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 31 32 *p |= mask; 33 } 34 35 static __always_inline void 36 generic___clear_bit(unsigned long nr, volatile unsigned long *addr) 37 { 38 unsigned long mask = BIT_MASK(nr); 39 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 40 41 *p &= ~mask; 42 } 43 44 /** 45 * generic___change_bit - Toggle a bit in memory 46 * @nr: the bit to change 47 * @addr: the address to start counting from 48 * 49 * Unlike change_bit(), this function is non-atomic and may be reordered. 50 * If it's called on the same region of memory simultaneously, the effect 51 * may be that only one operation succeeds. 52 */ 53 static __always_inline void 54 generic___change_bit(unsigned long nr, volatile unsigned long *addr) 55 { 56 unsigned long mask = BIT_MASK(nr); 57 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 58 59 *p ^= mask; 60 } 61 62 /** 63 * generic___test_and_set_bit - Set a bit and return its old value 64 * @nr: Bit to set 65 * @addr: Address to count from 66 * 67 * This operation is non-atomic and can be reordered. 68 * If two examples of this operation race, one can appear to succeed 69 * but actually fail. You must protect multiple accesses with a lock. 70 */ 71 static __always_inline bool 72 generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) 73 { 74 unsigned long mask = BIT_MASK(nr); 75 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 76 unsigned long old = *p; 77 78 *p = old | mask; 79 return (old & mask) != 0; 80 } 81 82 /** 83 * generic___test_and_clear_bit - Clear a bit and return its old value 84 * @nr: Bit to clear 85 * @addr: Address to count from 86 * 87 * This operation is non-atomic and can be reordered. 88 * If two examples of this operation race, one can appear to succeed 89 * but actually fail. You must protect multiple accesses with a lock. 90 */ 91 static __always_inline bool 92 generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) 93 { 94 unsigned long mask = BIT_MASK(nr); 95 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 96 unsigned long old = *p; 97 98 *p = old & ~mask; 99 return (old & mask) != 0; 100 } 101 102 /* WARNING: non atomic and it can be reordered! */ 103 static __always_inline bool 104 generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) 105 { 106 unsigned long mask = BIT_MASK(nr); 107 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); 108 unsigned long old = *p; 109 110 *p = old ^ mask; 111 return (old & mask) != 0; 112 } 113 114 /** 115 * generic_test_bit - Determine whether a bit is set 116 * @nr: bit number to test 117 * @addr: Address to start counting from 118 */ 119 static __always_inline bool 120 generic_test_bit(unsigned long nr, const volatile unsigned long *addr) 121 { 122 /* 123 * Unlike the bitops with the '__' prefix above, this one *is* atomic, 124 * so `volatile` must always stay here with no cast-aways. See 125 * `Documentation/atomic_bitops.txt` for the details. 126 */ 127 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); 128 } 129 130 /* 131 * const_*() definitions provide good compile-time optimizations when 132 * the passed arguments can be resolved at compile time. 133 */ 134 #define const___set_bit generic___set_bit 135 #define const___clear_bit generic___clear_bit 136 #define const___change_bit generic___change_bit 137 #define const___test_and_set_bit generic___test_and_set_bit 138 #define const___test_and_clear_bit generic___test_and_clear_bit 139 #define const___test_and_change_bit generic___test_and_change_bit 140 141 /** 142 * const_test_bit - Determine whether a bit is set 143 * @nr: bit number to test 144 * @addr: Address to start counting from 145 * 146 * A version of generic_test_bit() which discards the `volatile` qualifier to 147 * allow a compiler to optimize code harder. Non-atomic and to be called only 148 * for testing compile-time constants, e.g. by the corresponding macros, not 149 * directly from "regular" code. 150 */ 151 static __always_inline bool 152 const_test_bit(unsigned long nr, const volatile unsigned long *addr) 153 { 154 const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr); 155 unsigned long mask = BIT_MASK(nr); 156 unsigned long val = *p; 157 158 return !!(val & mask); 159 } 160 161 #endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */ 162