1951f22d5SMartin Schwidefsky /* 2951f22d5SMartin Schwidefsky * Out of line spinlock code. 3951f22d5SMartin Schwidefsky * 4a53c8fabSHeiko Carstens * Copyright IBM Corp. 2004, 2006 5951f22d5SMartin Schwidefsky * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 6951f22d5SMartin Schwidefsky */ 7951f22d5SMartin Schwidefsky 8951f22d5SMartin Schwidefsky #include <linux/types.h> 9951f22d5SMartin Schwidefsky #include <linux/module.h> 10951f22d5SMartin Schwidefsky #include <linux/spinlock.h> 11951f22d5SMartin Schwidefsky #include <linux/init.h> 128b646bd7SMartin Schwidefsky #include <linux/smp.h> 13951f22d5SMartin Schwidefsky #include <asm/io.h> 14951f22d5SMartin Schwidefsky 15951f22d5SMartin Schwidefsky int spin_retry = 1000; 16951f22d5SMartin Schwidefsky 17951f22d5SMartin Schwidefsky /** 18951f22d5SMartin Schwidefsky * spin_retry= parameter 19951f22d5SMartin Schwidefsky */ 20951f22d5SMartin Schwidefsky static int __init spin_retry_setup(char *str) 21951f22d5SMartin Schwidefsky { 22951f22d5SMartin Schwidefsky spin_retry = simple_strtoul(str, &str, 0); 23951f22d5SMartin Schwidefsky return 1; 24951f22d5SMartin Schwidefsky } 25951f22d5SMartin Schwidefsky __setup("spin_retry=", spin_retry_setup); 26951f22d5SMartin Schwidefsky 270199c4e6SThomas Gleixner void arch_spin_lock_wait(arch_spinlock_t *lp) 28951f22d5SMartin Schwidefsky { 296c8cd5bbSPhilipp Hachtmann unsigned int cpu = SPINLOCK_LOCKVAL; 3059b69787SGerald Schaefer unsigned int owner; 312e4006b3SGerald Schaefer int count; 32951f22d5SMartin Schwidefsky 33951f22d5SMartin Schwidefsky while (1) { 34470ada6bSMartin Schwidefsky owner = ACCESS_ONCE(lp->lock); 35470ada6bSMartin Schwidefsky /* Try to get the lock if it is free. */ 36470ada6bSMartin Schwidefsky if (!owner) { 375b3f683eSPhilipp Hachtmann if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 3859b69787SGerald Schaefer return; 3959b69787SGerald Schaefer continue; 4059b69787SGerald Schaefer } 41470ada6bSMartin Schwidefsky /* Check if the lock owner is running. */ 42470ada6bSMartin Schwidefsky if (!smp_vcpu_scheduled(~owner)) { 438b646bd7SMartin Schwidefsky smp_yield_cpu(~owner); 44470ada6bSMartin Schwidefsky continue; 45470ada6bSMartin Schwidefsky } 46470ada6bSMartin Schwidefsky /* Loop for a while on the lock value. */ 47470ada6bSMartin Schwidefsky count = spin_retry; 48470ada6bSMartin Schwidefsky do { 49470ada6bSMartin Schwidefsky owner = ACCESS_ONCE(lp->lock); 50470ada6bSMartin Schwidefsky } while (owner && count-- > 0); 51470ada6bSMartin Schwidefsky if (!owner) 52470ada6bSMartin Schwidefsky continue; 53470ada6bSMartin Schwidefsky /* 54470ada6bSMartin Schwidefsky * For multiple layers of hypervisors, e.g. z/VM + LPAR 55470ada6bSMartin Schwidefsky * yield the CPU if the lock is still unavailable. 56470ada6bSMartin Schwidefsky */ 57470ada6bSMartin Schwidefsky if (!MACHINE_IS_LPAR) 58470ada6bSMartin Schwidefsky smp_yield_cpu(~owner); 59951f22d5SMartin Schwidefsky } 60951f22d5SMartin Schwidefsky } 610199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_lock_wait); 62951f22d5SMartin Schwidefsky 630199c4e6SThomas Gleixner void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) 64894cdde2SHisashi Hifumi { 656c8cd5bbSPhilipp Hachtmann unsigned int cpu = SPINLOCK_LOCKVAL; 6659b69787SGerald Schaefer unsigned int owner; 672e4006b3SGerald Schaefer int count; 68894cdde2SHisashi Hifumi 69894cdde2SHisashi Hifumi local_irq_restore(flags); 70894cdde2SHisashi Hifumi while (1) { 71470ada6bSMartin Schwidefsky owner = ACCESS_ONCE(lp->lock); 72470ada6bSMartin Schwidefsky /* Try to get the lock if it is free. */ 73470ada6bSMartin Schwidefsky if (!owner) { 74894cdde2SHisashi Hifumi local_irq_disable(); 755b3f683eSPhilipp Hachtmann if (_raw_compare_and_swap(&lp->lock, 0, cpu)) 7659b69787SGerald Schaefer return; 7759b69787SGerald Schaefer local_irq_restore(flags); 78470ada6bSMartin Schwidefsky } 79470ada6bSMartin Schwidefsky /* Check if the lock owner is running. */ 80470ada6bSMartin Schwidefsky if (!smp_vcpu_scheduled(~owner)) { 81470ada6bSMartin Schwidefsky smp_yield_cpu(~owner); 8259b69787SGerald Schaefer continue; 8359b69787SGerald Schaefer } 84470ada6bSMartin Schwidefsky /* Loop for a while on the lock value. */ 85470ada6bSMartin Schwidefsky count = spin_retry; 86470ada6bSMartin Schwidefsky do { 87470ada6bSMartin Schwidefsky owner = ACCESS_ONCE(lp->lock); 88470ada6bSMartin Schwidefsky } while (owner && count-- > 0); 89470ada6bSMartin Schwidefsky if (!owner) 90470ada6bSMartin Schwidefsky continue; 91470ada6bSMartin Schwidefsky /* 92470ada6bSMartin Schwidefsky * For multiple layers of hypervisors, e.g. z/VM + LPAR 93470ada6bSMartin Schwidefsky * yield the CPU if the lock is still unavailable. 94470ada6bSMartin Schwidefsky */ 95470ada6bSMartin Schwidefsky if (!MACHINE_IS_LPAR) 968b646bd7SMartin Schwidefsky smp_yield_cpu(~owner); 97894cdde2SHisashi Hifumi } 98894cdde2SHisashi Hifumi } 990199c4e6SThomas Gleixner EXPORT_SYMBOL(arch_spin_lock_wait_flags); 100894cdde2SHisashi Hifumi 1015b3f683eSPhilipp Hachtmann int arch_spin_trylock_retry(arch_spinlock_t *lp) 1025b3f683eSPhilipp Hachtmann { 1035b3f683eSPhilipp Hachtmann int count; 1045b3f683eSPhilipp Hachtmann 105bae8f567SMartin Schwidefsky for (count = spin_retry; count > 0; count--) 1065b3f683eSPhilipp Hachtmann if (arch_spin_trylock_once(lp)) 1075b3f683eSPhilipp Hachtmann return 1; 1085b3f683eSPhilipp Hachtmann return 0; 1095b3f683eSPhilipp Hachtmann } 1105b3f683eSPhilipp Hachtmann EXPORT_SYMBOL(arch_spin_trylock_retry); 1115b3f683eSPhilipp Hachtmann 112fb3a6bbcSThomas Gleixner void _raw_read_lock_wait(arch_rwlock_t *rw) 113951f22d5SMartin Schwidefsky { 114d59b93daSMartin Schwidefsky unsigned int owner, old; 115951f22d5SMartin Schwidefsky int count = spin_retry; 116951f22d5SMartin Schwidefsky 117d59b93daSMartin Schwidefsky owner = 0; 118951f22d5SMartin Schwidefsky while (1) { 119951f22d5SMartin Schwidefsky if (count-- <= 0) { 120d59b93daSMartin Schwidefsky if (owner && !smp_vcpu_scheduled(~owner)) 121d59b93daSMartin Schwidefsky smp_yield_cpu(~owner); 122951f22d5SMartin Schwidefsky count = spin_retry; 123951f22d5SMartin Schwidefsky } 124bae8f567SMartin Schwidefsky old = ACCESS_ONCE(rw->lock); 125d59b93daSMartin Schwidefsky owner = ACCESS_ONCE(rw->owner); 126bae8f567SMartin Schwidefsky if ((int) old < 0) 12796567161SChristian Ehrhardt continue; 1285b3f683eSPhilipp Hachtmann if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 129951f22d5SMartin Schwidefsky return; 130951f22d5SMartin Schwidefsky } 131951f22d5SMartin Schwidefsky } 132951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_read_lock_wait); 133951f22d5SMartin Schwidefsky 134fb3a6bbcSThomas Gleixner int _raw_read_trylock_retry(arch_rwlock_t *rw) 135951f22d5SMartin Schwidefsky { 136951f22d5SMartin Schwidefsky unsigned int old; 137951f22d5SMartin Schwidefsky int count = spin_retry; 138951f22d5SMartin Schwidefsky 139951f22d5SMartin Schwidefsky while (count-- > 0) { 140bae8f567SMartin Schwidefsky old = ACCESS_ONCE(rw->lock); 141bae8f567SMartin Schwidefsky if ((int) old < 0) 14296567161SChristian Ehrhardt continue; 1435b3f683eSPhilipp Hachtmann if (_raw_compare_and_swap(&rw->lock, old, old + 1)) 144951f22d5SMartin Schwidefsky return 1; 145951f22d5SMartin Schwidefsky } 146951f22d5SMartin Schwidefsky return 0; 147951f22d5SMartin Schwidefsky } 148951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_read_trylock_retry); 149951f22d5SMartin Schwidefsky 150fb3a6bbcSThomas Gleixner void _raw_write_lock_wait(arch_rwlock_t *rw) 151951f22d5SMartin Schwidefsky { 15294232a43SMartin Schwidefsky unsigned int owner, old, prev; 153951f22d5SMartin Schwidefsky int count = spin_retry; 154951f22d5SMartin Schwidefsky 15594232a43SMartin Schwidefsky prev = 0x80000000; 156d59b93daSMartin Schwidefsky owner = 0; 157951f22d5SMartin Schwidefsky while (1) { 158951f22d5SMartin Schwidefsky if (count-- <= 0) { 159d59b93daSMartin Schwidefsky if (owner && !smp_vcpu_scheduled(~owner)) 160d59b93daSMartin Schwidefsky smp_yield_cpu(~owner); 161951f22d5SMartin Schwidefsky count = spin_retry; 162951f22d5SMartin Schwidefsky } 163bae8f567SMartin Schwidefsky old = ACCESS_ONCE(rw->lock); 164d59b93daSMartin Schwidefsky owner = ACCESS_ONCE(rw->owner); 16594232a43SMartin Schwidefsky if ((int) old >= 0 && 16694232a43SMartin Schwidefsky _raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) 16794232a43SMartin Schwidefsky prev = old; 16894232a43SMartin Schwidefsky else 16994232a43SMartin Schwidefsky smp_rmb(); 17094232a43SMartin Schwidefsky if ((old & 0x7fffffff) == 0 && (int) prev >= 0) 17194232a43SMartin Schwidefsky break; 172951f22d5SMartin Schwidefsky } 173951f22d5SMartin Schwidefsky } 174951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_write_lock_wait); 175951f22d5SMartin Schwidefsky 176fb3a6bbcSThomas Gleixner int _raw_write_trylock_retry(arch_rwlock_t *rw) 177951f22d5SMartin Schwidefsky { 178bae8f567SMartin Schwidefsky unsigned int old; 179951f22d5SMartin Schwidefsky int count = spin_retry; 180951f22d5SMartin Schwidefsky 181951f22d5SMartin Schwidefsky while (count-- > 0) { 182bae8f567SMartin Schwidefsky old = ACCESS_ONCE(rw->lock); 183bae8f567SMartin Schwidefsky if (old) 18496567161SChristian Ehrhardt continue; 1855b3f683eSPhilipp Hachtmann if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) 186951f22d5SMartin Schwidefsky return 1; 187951f22d5SMartin Schwidefsky } 188951f22d5SMartin Schwidefsky return 0; 189951f22d5SMartin Schwidefsky } 190951f22d5SMartin Schwidefsky EXPORT_SYMBOL(_raw_write_trylock_retry); 191d59b93daSMartin Schwidefsky 192d59b93daSMartin Schwidefsky void arch_lock_relax(unsigned int cpu) 193d59b93daSMartin Schwidefsky { 194d59b93daSMartin Schwidefsky if (!cpu) 195d59b93daSMartin Schwidefsky return; 196d59b93daSMartin Schwidefsky if (MACHINE_IS_LPAR && smp_vcpu_scheduled(~cpu)) 197d59b93daSMartin Schwidefsky return; 198d59b93daSMartin Schwidefsky smp_yield_cpu(~cpu); 199d59b93daSMartin Schwidefsky } 200d59b93daSMartin Schwidefsky EXPORT_SYMBOL(arch_lock_relax); 201