xref: /openbmc/linux/arch/parisc/include/asm/futex.h (revision 5d0e4d78)
1 #ifndef _ASM_PARISC_FUTEX_H
2 #define _ASM_PARISC_FUTEX_H
3 
4 #ifdef __KERNEL__
5 
6 #include <linux/futex.h>
7 #include <linux/uaccess.h>
8 #include <asm/atomic.h>
9 #include <asm/errno.h>
10 
11 /* The following has to match the LWS code in syscall.S.  We have
12    sixteen four-word locks. */
13 
14 static inline void
15 _futex_spin_lock_irqsave(u32 __user *uaddr, unsigned long int *flags)
16 {
17 	extern u32 lws_lock_start[];
18 	long index = ((long)uaddr & 0xf0) >> 2;
19 	arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
20 	local_irq_save(*flags);
21 	arch_spin_lock(s);
22 }
23 
24 static inline void
25 _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
26 {
27 	extern u32 lws_lock_start[];
28 	long index = ((long)uaddr & 0xf0) >> 2;
29 	arch_spinlock_t *s = (arch_spinlock_t *)&lws_lock_start[index];
30 	arch_spin_unlock(s);
31 	local_irq_restore(*flags);
32 }
33 
34 static inline int
35 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
36 {
37 	unsigned long int flags;
38 	int op = (encoded_op >> 28) & 7;
39 	int cmp = (encoded_op >> 24) & 15;
40 	int oparg = (encoded_op << 8) >> 20;
41 	int cmparg = (encoded_op << 20) >> 20;
42 	int oldval, ret;
43 	u32 tmp;
44 
45 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 		oparg = 1 << oparg;
47 
48 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
49 		return -EFAULT;
50 
51 	_futex_spin_lock_irqsave(uaddr, &flags);
52 	pagefault_disable();
53 
54 	ret = -EFAULT;
55 	if (unlikely(get_user(oldval, uaddr) != 0))
56 		goto out_pagefault_enable;
57 
58 	ret = 0;
59 	tmp = oldval;
60 
61 	switch (op) {
62 	case FUTEX_OP_SET:
63 		tmp = oparg;
64 		break;
65 	case FUTEX_OP_ADD:
66 		tmp += oparg;
67 		break;
68 	case FUTEX_OP_OR:
69 		tmp |= oparg;
70 		break;
71 	case FUTEX_OP_ANDN:
72 		tmp &= ~oparg;
73 		break;
74 	case FUTEX_OP_XOR:
75 		tmp ^= oparg;
76 		break;
77 	default:
78 		ret = -ENOSYS;
79 	}
80 
81 	if (ret == 0 && unlikely(put_user(tmp, uaddr) != 0))
82 		ret = -EFAULT;
83 
84 out_pagefault_enable:
85 	pagefault_enable();
86 	_futex_spin_unlock_irqrestore(uaddr, &flags);
87 
88 	if (ret == 0) {
89 		switch (cmp) {
90 		case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
91 		case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
92 		case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
93 		case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
94 		case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
95 		case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
96 		default: ret = -ENOSYS;
97 		}
98 	}
99 	return ret;
100 }
101 
102 static inline int
103 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
104 			      u32 oldval, u32 newval)
105 {
106 	u32 val;
107 	unsigned long flags;
108 
109 	/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
110 	 * our gateway page, and causes no end of trouble...
111 	 */
112 	if (uaccess_kernel() && !uaddr)
113 		return -EFAULT;
114 
115 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
116 		return -EFAULT;
117 
118 	/* HPPA has no cmpxchg in hardware and therefore the
119 	 * best we can do here is use an array of locks. The
120 	 * lock selected is based on a hash of the userspace
121 	 * address. This should scale to a couple of CPUs.
122 	 */
123 
124 	_futex_spin_lock_irqsave(uaddr, &flags);
125 	if (unlikely(get_user(val, uaddr) != 0)) {
126 		_futex_spin_unlock_irqrestore(uaddr, &flags);
127 		return -EFAULT;
128 	}
129 
130 	if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
131 		_futex_spin_unlock_irqrestore(uaddr, &flags);
132 		return -EFAULT;
133 	}
134 
135 	*uval = val;
136 	_futex_spin_unlock_irqrestore(uaddr, &flags);
137 
138 	return 0;
139 }
140 
141 #endif /*__KERNEL__*/
142 #endif /*_ASM_PARISC_FUTEX_H*/
143