1 #ifndef _ASM_PARISC_FUTEX_H 2 #define _ASM_PARISC_FUTEX_H 3 4 #ifdef __KERNEL__ 5 6 #include <linux/futex.h> 7 #include <linux/uaccess.h> 8 #include <asm/atomic.h> 9 #include <asm/errno.h> 10 11 /* The following has to match the LWS code in syscall.S. We have 12 sixteen four-word locks. */ 13 14 static inline void 15 _futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags) 16 { 17 extern u32 lws_lock_start[]; 18 long index = ((long)uaddr & 0xf0) >> 2; 19 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; 20 local_irq_save(*flags); 21 arch_spin_lock(s); 22 } 23 24 static inline void 25 _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags) 26 { 27 extern u32 lws_lock_start[]; 28 long index = ((long)uaddr & 0xf0) >> 2; 29 arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index]; 30 arch_spin_unlock(s); 31 local_irq_restore(*flags); 32 } 33 34 static inline int 35 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) 36 { 37 unsigned long int flags; 38 int oldval, ret; 39 u32 tmp; 40 41 _futex_spin_lock_irqsave(uaddr, &flags); 42 pagefault_disable(); 43 44 ret = -EFAULT; 45 if (unlikely(get_user(oldval, uaddr) != 0)) 46 goto out_pagefault_enable; 47 48 ret = 0; 49 tmp = oldval; 50 51 switch (op) { 52 case FUTEX_OP_SET: 53 tmp = oparg; 54 break; 55 case FUTEX_OP_ADD: 56 tmp += oparg; 57 break; 58 case FUTEX_OP_OR: 59 tmp |= oparg; 60 break; 61 case FUTEX_OP_ANDN: 62 tmp &= ~oparg; 63 break; 64 case FUTEX_OP_XOR: 65 tmp ^= oparg; 66 break; 67 default: 68 ret = -ENOSYS; 69 } 70 71 if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0)) 72 ret = -EFAULT; 73 74 out_pagefault_enable: 75 pagefault_enable(); 76 _futex_spin_unlock_irqrestore(uaddr, &flags); 77 78 if (!ret) 79 *oval = oldval; 80 81 return ret; 82 } 83 84 static inline int 85 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 86 u32 oldval, u32 newval) 87 { 88 u32 val; 89 unsigned long flags; 90 91 /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is 92 * our gateway page, and causes no end of trouble... 93 */ 94 if (uaccess_kernel() && !uaddr) 95 return -EFAULT; 96 97 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 98 return -EFAULT; 99 100 /* HPPA has no cmpxchg in hardware and therefore the 101 * best we can do here is use an array of locks. The 102 * lock selected is based on a hash of the userspace 103 * address. This should scale to a couple of CPUs. 104 */ 105 106 _futex_spin_lock_irqsave(uaddr, &flags); 107 if (unlikely(get_user(val, uaddr) != 0)) { 108 _futex_spin_unlock_irqrestore(uaddr, &flags); 109 return -EFAULT; 110 } 111 112 if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) { 113 _futex_spin_unlock_irqrestore(uaddr, &flags); 114 return -EFAULT; 115 } 116 117 *uval = val; 118 _futex_spin_unlock_irqrestore(uaddr, &flags); 119 120 return 0; 121 } 122 123 #endif /*__KERNEL__*/ 124 #endif /*_ASM_PARISC_FUTEX_H*/ 125