108e875c1SCatalin Marinas /* 208e875c1SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 308e875c1SCatalin Marinas * 408e875c1SCatalin Marinas * This program is free software; you can redistribute it and/or modify 508e875c1SCatalin Marinas * it under the terms of the GNU General Public License version 2 as 608e875c1SCatalin Marinas * published by the Free Software Foundation. 708e875c1SCatalin Marinas * 808e875c1SCatalin Marinas * This program is distributed in the hope that it will be useful, 908e875c1SCatalin Marinas * but WITHOUT ANY WARRANTY; without even the implied warranty of 1008e875c1SCatalin Marinas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 1108e875c1SCatalin Marinas * GNU General Public License for more details. 1208e875c1SCatalin Marinas * 1308e875c1SCatalin Marinas * You should have received a copy of the GNU General Public License 1408e875c1SCatalin Marinas * along with this program. If not, see <http://www.gnu.org/licenses/>. 1508e875c1SCatalin Marinas */ 1608e875c1SCatalin Marinas #ifndef __ASM_SPINLOCK_H 1708e875c1SCatalin Marinas #define __ASM_SPINLOCK_H 1808e875c1SCatalin Marinas 1981bb5c64SWill Deacon #include <asm/lse.h> 2008e875c1SCatalin Marinas #include <asm/spinlock_types.h> 2108e875c1SCatalin Marinas #include <asm/processor.h> 2208e875c1SCatalin Marinas 2308e875c1SCatalin Marinas /* 2408e875c1SCatalin Marinas * Spinlock implementation. 2508e875c1SCatalin Marinas * 2608e875c1SCatalin Marinas * The memory barriers are implicit with the load-acquire and store-release 2708e875c1SCatalin Marinas * instructions. 2808e875c1SCatalin Marinas */ 2908e875c1SCatalin Marinas 3008e875c1SCatalin Marinas static inline void arch_spin_lock(arch_spinlock_t *lock) 3108e875c1SCatalin Marinas { 3208e875c1SCatalin Marinas unsigned int tmp; 3352ea2a56SWill Deacon arch_spinlock_t lockval, newval; 3408e875c1SCatalin Marinas 3508e875c1SCatalin Marinas asm volatile( 3652ea2a56SWill Deacon /* Atomically increment the next ticket. */ 3781bb5c64SWill Deacon ARM64_LSE_ATOMIC_INSN( 3881bb5c64SWill Deacon /* LL/SC */ 3952ea2a56SWill Deacon " prfm pstl1strm, %3\n" 4052ea2a56SWill Deacon "1: ldaxr %w0, %3\n" 4152ea2a56SWill Deacon " add %w1, %w0, %w5\n" 4252ea2a56SWill Deacon " stxr %w2, %w1, %3\n" 4381bb5c64SWill Deacon " cbnz %w2, 1b\n", 4481bb5c64SWill Deacon /* LSE atomics */ 4581bb5c64SWill Deacon " mov %w2, %w5\n" 4681bb5c64SWill Deacon " ldadda %w2, %w0, %3\n" 4705492f2fSWill Deacon __nops(3) 4881bb5c64SWill Deacon ) 4981bb5c64SWill Deacon 5052ea2a56SWill Deacon /* Did we get the lock? */ 5152ea2a56SWill Deacon " eor %w1, %w0, %w0, ror #16\n" 5252ea2a56SWill Deacon " cbz %w1, 3f\n" 5352ea2a56SWill Deacon /* 5452ea2a56SWill Deacon * No: spin on the owner. Send a local event to avoid missing an 5552ea2a56SWill Deacon * unlock before the exclusive load. 5652ea2a56SWill Deacon */ 5708e875c1SCatalin Marinas " sevl\n" 5852ea2a56SWill Deacon "2: wfe\n" 5952ea2a56SWill Deacon " ldaxrh %w2, %4\n" 6052ea2a56SWill Deacon " eor %w1, %w2, %w0, lsr #16\n" 6152ea2a56SWill Deacon " cbnz %w1, 2b\n" 6252ea2a56SWill Deacon /* We got the lock. Critical section starts here. */ 6352ea2a56SWill Deacon "3:" 6452ea2a56SWill Deacon : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) 6552ea2a56SWill Deacon : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) 6652ea2a56SWill Deacon : "memory"); 6708e875c1SCatalin Marinas } 6808e875c1SCatalin Marinas 6908e875c1SCatalin Marinas static inline int arch_spin_trylock(arch_spinlock_t *lock) 7008e875c1SCatalin Marinas { 7108e875c1SCatalin Marinas unsigned int tmp; 7252ea2a56SWill Deacon arch_spinlock_t lockval; 7308e875c1SCatalin Marinas 7481bb5c64SWill Deacon asm volatile(ARM64_LSE_ATOMIC_INSN( 7581bb5c64SWill Deacon /* LL/SC */ 7652ea2a56SWill Deacon " prfm pstl1strm, %2\n" 7752ea2a56SWill Deacon "1: ldaxr %w0, %2\n" 7852ea2a56SWill Deacon " eor %w1, %w0, %w0, ror #16\n" 7952ea2a56SWill Deacon " cbnz %w1, 2f\n" 8052ea2a56SWill Deacon " add %w0, %w0, %3\n" 8152ea2a56SWill Deacon " stxr %w1, %w0, %2\n" 8252ea2a56SWill Deacon " cbnz %w1, 1b\n" 8381bb5c64SWill Deacon "2:", 8481bb5c64SWill Deacon /* LSE atomics */ 8581bb5c64SWill Deacon " ldr %w0, %2\n" 8681bb5c64SWill Deacon " eor %w1, %w0, %w0, ror #16\n" 8781bb5c64SWill Deacon " cbnz %w1, 1f\n" 8881bb5c64SWill Deacon " add %w1, %w0, %3\n" 8981bb5c64SWill Deacon " casa %w0, %w1, %2\n" 9081bb5c64SWill Deacon " and %w1, %w1, #0xffff\n" 9181bb5c64SWill Deacon " eor %w1, %w1, %w0, lsr #16\n" 9281bb5c64SWill Deacon "1:") 9352ea2a56SWill Deacon : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) 9452ea2a56SWill Deacon : "I" (1 << TICKET_SHIFT) 9552ea2a56SWill Deacon : "memory"); 9608e875c1SCatalin Marinas 9708e875c1SCatalin Marinas return !tmp; 9808e875c1SCatalin Marinas } 9908e875c1SCatalin Marinas 10008e875c1SCatalin Marinas static inline void arch_spin_unlock(arch_spinlock_t *lock) 10108e875c1SCatalin Marinas { 10281bb5c64SWill Deacon unsigned long tmp; 10381bb5c64SWill Deacon 10481bb5c64SWill Deacon asm volatile(ARM64_LSE_ATOMIC_INSN( 10581bb5c64SWill Deacon /* LL/SC */ 106c1d7cd22SWill Deacon " ldrh %w1, %0\n" 10781bb5c64SWill Deacon " add %w1, %w1, #1\n" 10881bb5c64SWill Deacon " stlrh %w1, %0", 10981bb5c64SWill Deacon /* LSE atomics */ 11081bb5c64SWill Deacon " mov %w1, #1\n" 11105492f2fSWill Deacon " staddlh %w1, %0\n" 11205492f2fSWill Deacon __nops(1)) 11381bb5c64SWill Deacon : "=Q" (lock->owner), "=&r" (tmp) 11481bb5c64SWill Deacon : 11552ea2a56SWill Deacon : "memory"); 11608e875c1SCatalin Marinas } 11708e875c1SCatalin Marinas 1185686b06cSWill Deacon static inline int arch_spin_value_unlocked(arch_spinlock_t lock) 1195686b06cSWill Deacon { 1205686b06cSWill Deacon return lock.owner == lock.next; 1215686b06cSWill Deacon } 1225686b06cSWill Deacon 12352ea2a56SWill Deacon static inline int arch_spin_is_locked(arch_spinlock_t *lock) 12452ea2a56SWill Deacon { 125952111d7SPaul E. McKenney /* 126952111d7SPaul E. McKenney * Ensure prior spin_lock operations to other locks have completed 127952111d7SPaul E. McKenney * on this CPU before we test whether "lock" is locked. 128952111d7SPaul E. McKenney */ 129952111d7SPaul E. McKenney smp_mb(); /* ^^^ */ 130af2e7aaeSChristian Borntraeger return !arch_spin_value_unlocked(READ_ONCE(*lock)); 13152ea2a56SWill Deacon } 13252ea2a56SWill Deacon 13352ea2a56SWill Deacon static inline int arch_spin_is_contended(arch_spinlock_t *lock) 13452ea2a56SWill Deacon { 135af2e7aaeSChristian Borntraeger arch_spinlock_t lockval = READ_ONCE(*lock); 13652ea2a56SWill Deacon return (lockval.next - lockval.owner) > 1; 13752ea2a56SWill Deacon } 13852ea2a56SWill Deacon #define arch_spin_is_contended arch_spin_is_contended 13952ea2a56SWill Deacon 140087133acSWill Deacon #include <asm/qrwlock.h> 14108e875c1SCatalin Marinas 142d89e588cSPeter Zijlstra /* See include/linux/spinlock.h */ 143d89e588cSPeter Zijlstra #define smp_mb__after_spinlock() smp_mb() 144872c63fbSWill Deacon 14508e875c1SCatalin Marinas #endif /* __ASM_SPINLOCK_H */ 146