1 /* 2 * Spinlock support for the Hexagon architecture 3 * 4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 5 * 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 19 * 02110-1301, USA. 20 */ 21 22 #ifndef _ASM_SPINLOCK_H 23 #define _ASM_SPINLOCK_H 24 25 #include <asm/irqflags.h> 26 #include <asm/barrier.h> 27 #include <asm/processor.h> 28 29 /* 30 * This file is pulled in for SMP builds. 31 * Really need to check all the barrier stuff for "true" SMP 32 */ 33 34 /* 35 * Read locks: 36 * - load the lock value 37 * - increment it 38 * - if the lock value is still negative, go back and try again. 39 * - unsuccessful store is unsuccessful. Go back and try again. Loser. 40 * - successful store new lock value if positive -> lock acquired 41 */ 42 static inline void arch_read_lock(arch_rwlock_t *lock) 43 { 44 __asm__ __volatile__( 45 "1: R6 = memw_locked(%0);\n" 46 " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 47 " { if !P3 jump 1b; }\n" 48 " memw_locked(%0,P3) = R6;\n" 49 " { if !P3 jump 1b; }\n" 50 : 51 : "r" (&lock->lock) 52 : "memory", "r6", "p3" 53 ); 54 55 } 56 57 static inline void arch_read_unlock(arch_rwlock_t *lock) 58 { 59 __asm__ __volatile__( 60 "1: R6 = memw_locked(%0);\n" 61 " R6 = add(R6,#-1);\n" 62 " memw_locked(%0,P3) = R6\n" 63 " if !P3 jump 1b;\n" 64 : 65 : "r" (&lock->lock) 66 : "memory", "r6", "p3" 67 ); 68 69 } 70 71 /* I think this returns 0 on fail, 1 on success. */ 72 static inline int arch_read_trylock(arch_rwlock_t *lock) 73 { 74 int temp; 75 __asm__ __volatile__( 76 " R6 = memw_locked(%1);\n" 77 " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" 78 " { if !P3 jump 1f; }\n" 79 " memw_locked(%1,P3) = R6;\n" 80 " { %0 = P3 }\n" 81 "1:\n" 82 : "=&r" (temp) 83 : "r" (&lock->lock) 84 : "memory", "r6", "p3" 85 ); 86 return temp; 87 } 88 89 /* Stuffs a -1 in the lock value? */ 90 static inline void arch_write_lock(arch_rwlock_t *lock) 91 { 92 __asm__ __volatile__( 93 "1: R6 = memw_locked(%0)\n" 94 " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 95 " { if !P3 jump 1b; }\n" 96 " memw_locked(%0,P3) = R6;\n" 97 " { if !P3 jump 1b; }\n" 98 : 99 : "r" (&lock->lock) 100 : "memory", "r6", "p3" 101 ); 102 } 103 104 105 static inline int arch_write_trylock(arch_rwlock_t *lock) 106 { 107 int temp; 108 __asm__ __volatile__( 109 " R6 = memw_locked(%1)\n" 110 " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" 111 " { if !P3 jump 1f; }\n" 112 " memw_locked(%1,P3) = R6;\n" 113 " %0 = P3;\n" 114 "1:\n" 115 : "=&r" (temp) 116 : "r" (&lock->lock) 117 : "memory", "r6", "p3" 118 ); 119 return temp; 120 121 } 122 123 static inline void arch_write_unlock(arch_rwlock_t *lock) 124 { 125 smp_mb(); 126 lock->lock = 0; 127 } 128 129 static inline void arch_spin_lock(arch_spinlock_t *lock) 130 { 131 __asm__ __volatile__( 132 "1: R6 = memw_locked(%0);\n" 133 " P3 = cmp.eq(R6,#0);\n" 134 " { if !P3 jump 1b; R6 = #1; }\n" 135 " memw_locked(%0,P3) = R6;\n" 136 " { if !P3 jump 1b; }\n" 137 : 138 : "r" (&lock->lock) 139 : "memory", "r6", "p3" 140 ); 141 142 } 143 144 static inline void arch_spin_unlock(arch_spinlock_t *lock) 145 { 146 smp_mb(); 147 lock->lock = 0; 148 } 149 150 static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) 151 { 152 int temp; 153 __asm__ __volatile__( 154 " R6 = memw_locked(%1);\n" 155 " P3 = cmp.eq(R6,#0);\n" 156 " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" 157 " memw_locked(%1,P3) = R6;\n" 158 " %0 = P3;\n" 159 "1:\n" 160 : "=&r" (temp) 161 : "r" (&lock->lock) 162 : "memory", "r6", "p3" 163 ); 164 return temp; 165 } 166 167 /* 168 * SMP spinlocks are intended to allow only a single CPU at the lock 169 */ 170 #define arch_spin_is_locked(x) ((x)->lock != 0) 171 172 #endif 173