1 #include <linux/percpu.h> 2 #include <linux/sched.h> 3 #include <linux/osq_lock.h> 4 5 /* 6 * An MCS like lock especially tailored for optimistic spinning for sleeping 7 * lock implementations (mutex, rwsem, etc). 8 * 9 * Using a single mcs node per CPU is safe because sleeping locks should not be 10 * called from interrupt context and we have preemption disabled while 11 * spinning. 12 */ 13 static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node); 14 15 /* 16 * We use the value 0 to represent "no CPU", thus the encoded value 17 * will be the CPU number incremented by 1. 18 */ 19 static inline int encode_cpu(int cpu_nr) 20 { 21 return cpu_nr + 1; 22 } 23 24 static inline int node_cpu(struct optimistic_spin_node *node) 25 { 26 return node->cpu - 1; 27 } 28 29 static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val) 30 { 31 int cpu_nr = encoded_cpu_val - 1; 32 33 return per_cpu_ptr(&osq_node, cpu_nr); 34 } 35 36 /* 37 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. 38 * Can return NULL in case we were the last queued and we updated @lock instead. 39 */ 40 static inline struct optimistic_spin_node * 41 osq_wait_next(struct optimistic_spin_queue *lock, 42 struct optimistic_spin_node *node, 43 struct optimistic_spin_node *prev) 44 { 45 struct optimistic_spin_node *next = NULL; 46 int curr = encode_cpu(smp_processor_id()); 47 int old; 48 49 /* 50 * If there is a prev node in queue, then the 'old' value will be 51 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if 52 * we're currently last in queue, then the queue will then become empty. 53 */ 54 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL; 55 56 for (;;) { 57 if (atomic_read(&lock->tail) == curr && 58 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { 59 /* 60 * We were the last queued, we moved @lock back. @prev 61 * will now observe @lock and will complete its 62 * unlock()/unqueue(). 63 */ 64 break; 65 } 66 67 /* 68 * We must xchg() the @node->next value, because if we were to 69 * leave it in, a concurrent unlock()/unqueue() from 70 * @node->next might complete Step-A and think its @prev is 71 * still valid. 72 * 73 * If the concurrent unlock()/unqueue() wins the race, we'll 74 * wait for either @lock to point to us, through its Step-B, or 75 * wait for a new @node->next from its Step-C. 76 */ 77 if (node->next) { 78 next = xchg(&node->next, NULL); 79 if (next) 80 break; 81 } 82 83 cpu_relax(); 84 } 85 86 return next; 87 } 88 89 bool osq_lock(struct optimistic_spin_queue *lock) 90 { 91 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); 92 struct optimistic_spin_node *prev, *next; 93 int curr = encode_cpu(smp_processor_id()); 94 int old; 95 96 node->locked = 0; 97 node->next = NULL; 98 node->cpu = curr; 99 100 /* 101 * We need both ACQUIRE (pairs with corresponding RELEASE in 102 * unlock() uncontended, or fastpath) and RELEASE (to publish 103 * the node fields we just initialised) semantics when updating 104 * the lock tail. 105 */ 106 old = atomic_xchg(&lock->tail, curr); 107 if (old == OSQ_UNLOCKED_VAL) 108 return true; 109 110 prev = decode_cpu(old); 111 node->prev = prev; 112 WRITE_ONCE(prev->next, node); 113 114 /* 115 * Normally @prev is untouchable after the above store; because at that 116 * moment unlock can proceed and wipe the node element from stack. 117 * 118 * However, since our nodes are static per-cpu storage, we're 119 * guaranteed their existence -- this allows us to apply 120 * cmpxchg in an attempt to undo our queueing. 121 */ 122 123 while (!READ_ONCE(node->locked)) { 124 /* 125 * If we need to reschedule bail... so we can block. 126 * Use vcpu_is_preempted() to avoid waiting for a preempted 127 * lock holder: 128 */ 129 if (need_resched() || vcpu_is_preempted(node_cpu(node->prev))) 130 goto unqueue; 131 132 cpu_relax(); 133 } 134 return true; 135 136 unqueue: 137 /* 138 * Step - A -- stabilize @prev 139 * 140 * Undo our @prev->next assignment; this will make @prev's 141 * unlock()/unqueue() wait for a next pointer since @lock points to us 142 * (or later). 143 */ 144 145 for (;;) { 146 if (prev->next == node && 147 cmpxchg(&prev->next, node, NULL) == node) 148 break; 149 150 /* 151 * We can only fail the cmpxchg() racing against an unlock(), 152 * in which case we should observe @node->locked becomming 153 * true. 154 */ 155 if (smp_load_acquire(&node->locked)) 156 return true; 157 158 cpu_relax(); 159 160 /* 161 * Or we race against a concurrent unqueue()'s step-B, in which 162 * case its step-C will write us a new @node->prev pointer. 163 */ 164 prev = READ_ONCE(node->prev); 165 } 166 167 /* 168 * Step - B -- stabilize @next 169 * 170 * Similar to unlock(), wait for @node->next or move @lock from @node 171 * back to @prev. 172 */ 173 174 next = osq_wait_next(lock, node, prev); 175 if (!next) 176 return false; 177 178 /* 179 * Step - C -- unlink 180 * 181 * @prev is stable because its still waiting for a new @prev->next 182 * pointer, @next is stable because our @node->next pointer is NULL and 183 * it will wait in Step-A. 184 */ 185 186 WRITE_ONCE(next->prev, prev); 187 WRITE_ONCE(prev->next, next); 188 189 return false; 190 } 191 192 void osq_unlock(struct optimistic_spin_queue *lock) 193 { 194 struct optimistic_spin_node *node, *next; 195 int curr = encode_cpu(smp_processor_id()); 196 197 /* 198 * Fast path for the uncontended case. 199 */ 200 if (likely(atomic_cmpxchg_release(&lock->tail, curr, 201 OSQ_UNLOCKED_VAL) == curr)) 202 return; 203 204 /* 205 * Second most likely case. 206 */ 207 node = this_cpu_ptr(&osq_node); 208 next = xchg(&node->next, NULL); 209 if (next) { 210 WRITE_ONCE(next->locked, 1); 211 return; 212 } 213 214 next = osq_wait_next(lock, node, NULL); 215 if (next) 216 WRITE_ONCE(next->locked, 1); 217 } 218