xref: /openbmc/linux/fs/orangefs/waitqueue.c (revision 0edbfea5)
1 /*
2  * (C) 2001 Clemson University and The University of Chicago
3  * (C) 2011 Omnibond Systems
4  *
5  * Changes by Acxiom Corporation to implement generic service_operation()
6  * function, Copyright Acxiom Corporation, 2005.
7  *
8  * See COPYING in top-level directory.
9  */
10 
11 /*
12  *  In-kernel waitqueue operations.
13  */
14 
15 #include "protocol.h"
16 #include "orangefs-kernel.h"
17 #include "orangefs-bufmap.h"
18 
19 static int wait_for_matching_downcall(struct orangefs_kernel_op_s *, long, bool);
20 static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *);
21 
22 /*
23  * What we do in this function is to walk the list of operations that are
24  * present in the request queue and mark them as purged.
25  * NOTE: This is called from the device close after client-core has
26  * guaranteed that no new operations could appear on the list since the
27  * client-core is anyway going to exit.
28  */
29 void purge_waiting_ops(void)
30 {
31 	struct orangefs_kernel_op_s *op;
32 
33 	spin_lock(&orangefs_request_list_lock);
34 	list_for_each_entry(op, &orangefs_request_list, list) {
35 		gossip_debug(GOSSIP_WAIT_DEBUG,
36 			     "pvfs2-client-core: purging op tag %llu %s\n",
37 			     llu(op->tag),
38 			     get_opname_string(op));
39 		set_op_state_purged(op);
40 		gossip_debug(GOSSIP_DEV_DEBUG,
41 			     "%s: op:%s: op_state:%d: process:%s:\n",
42 			     __func__,
43 			     get_opname_string(op),
44 			     op->op_state,
45 			     current->comm);
46 	}
47 	spin_unlock(&orangefs_request_list_lock);
48 }
49 
50 /*
51  * submits a ORANGEFS operation and waits for it to complete
52  *
53  * Note op->downcall.status will contain the status of the operation (in
54  * errno format), whether provided by pvfs2-client or a result of failure to
55  * service the operation.  If the caller wishes to distinguish, then
56  * op->state can be checked to see if it was serviced or not.
57  *
58  * Returns contents of op->downcall.status for convenience
59  */
60 int service_operation(struct orangefs_kernel_op_s *op,
61 		      const char *op_name,
62 		      int flags)
63 {
64 	long timeout = MAX_SCHEDULE_TIMEOUT;
65 	int ret = 0;
66 
67 	DEFINE_WAIT(wait_entry);
68 
69 	op->upcall.tgid = current->tgid;
70 	op->upcall.pid = current->pid;
71 
72 retry_servicing:
73 	op->downcall.status = 0;
74 	gossip_debug(GOSSIP_WAIT_DEBUG,
75 		     "%s: %s op:%p: process:%s: pid:%d:\n",
76 		     __func__,
77 		     op_name,
78 		     op,
79 		     current->comm,
80 		     current->pid);
81 
82 	/*
83 	 * If ORANGEFS_OP_NO_MUTEX was set in flags, we need to avoid
84 	 * acquiring the request_mutex because we're servicing a
85 	 * high priority remount operation and the request_mutex is
86 	 * already taken.
87 	 */
88 	if (!(flags & ORANGEFS_OP_NO_MUTEX)) {
89 		if (flags & ORANGEFS_OP_INTERRUPTIBLE)
90 			ret = mutex_lock_interruptible(&request_mutex);
91 		else
92 			ret = mutex_lock_killable(&request_mutex);
93 		/*
94 		 * check to see if we were interrupted while waiting for
95 		 * mutex
96 		 */
97 		if (ret < 0) {
98 			op->downcall.status = ret;
99 			gossip_debug(GOSSIP_WAIT_DEBUG,
100 				     "%s: service_operation interrupted.\n",
101 				     __func__);
102 			return ret;
103 		}
104 	}
105 
106 	/* queue up the operation */
107 	spin_lock(&orangefs_request_list_lock);
108 	spin_lock(&op->lock);
109 	set_op_state_waiting(op);
110 	gossip_debug(GOSSIP_DEV_DEBUG,
111 		     "%s: op:%s: op_state:%d: process:%s:\n",
112 		     __func__,
113 		     get_opname_string(op),
114 		     op->op_state,
115 		     current->comm);
116 	/* add high priority remount op to the front of the line. */
117 	if (flags & ORANGEFS_OP_PRIORITY)
118 		list_add(&op->list, &orangefs_request_list);
119 	else
120 		list_add_tail(&op->list, &orangefs_request_list);
121 	spin_unlock(&op->lock);
122 	wake_up_interruptible(&orangefs_request_list_waitq);
123 	if (!__is_daemon_in_service()) {
124 		gossip_debug(GOSSIP_WAIT_DEBUG,
125 			     "%s:client core is NOT in service.\n",
126 			     __func__);
127 		timeout = op_timeout_secs * HZ;
128 	}
129 	spin_unlock(&orangefs_request_list_lock);
130 
131 	if (!(flags & ORANGEFS_OP_NO_MUTEX))
132 		mutex_unlock(&request_mutex);
133 
134 	ret = wait_for_matching_downcall(op, timeout,
135 					 flags & ORANGEFS_OP_INTERRUPTIBLE);
136 
137 	gossip_debug(GOSSIP_WAIT_DEBUG,
138 		     "%s: wait_for_matching_downcall returned %d for %p\n",
139 		     __func__,
140 		     ret,
141 		     op);
142 
143 	/* got matching downcall; make sure status is in errno format */
144 	if (!ret) {
145 		spin_unlock(&op->lock);
146 		op->downcall.status =
147 		    orangefs_normalize_to_errno(op->downcall.status);
148 		ret = op->downcall.status;
149 		goto out;
150 	}
151 
152 	/* failed to get matching downcall */
153 	if (ret == -ETIMEDOUT) {
154 		gossip_err("%s: %s -- wait timed out; aborting attempt.\n",
155 			   __func__,
156 			   op_name);
157 	}
158 
159 	/*
160 	 * remove a waiting op from the request list or
161 	 * remove an in-progress op from the in-progress list.
162 	 */
163 	orangefs_clean_up_interrupted_operation(op);
164 
165 	op->downcall.status = ret;
166 	/* retry if operation has not been serviced and if requested */
167 	if (ret == -EAGAIN) {
168 		op->attempts++;
169 		timeout = op_timeout_secs * HZ;
170 		gossip_debug(GOSSIP_WAIT_DEBUG,
171 			     "orangefs: tag %llu (%s)"
172 			     " -- operation to be retried (%d attempt)\n",
173 			     llu(op->tag),
174 			     op_name,
175 			     op->attempts);
176 
177 		/*
178 		 * io ops (ops that use the shared memory buffer) have
179 		 * to be returned to their caller for a retry. Other ops
180 		 * can just be recycled here.
181 		 */
182 		if (!op->uses_shared_memory)
183 			goto retry_servicing;
184 	}
185 
186 out:
187 	gossip_debug(GOSSIP_WAIT_DEBUG,
188 		     "%s: %s returning: %d for %p.\n",
189 		     __func__,
190 		     op_name,
191 		     ret,
192 		     op);
193 	return ret;
194 }
195 
196 /* This can get called on an I/O op if it had a bad service_operation. */
197 bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op)
198 {
199 	u64 tag = op->tag;
200 	if (!op_state_in_progress(op))
201 		return false;
202 
203 	op->slot_to_free = op->upcall.req.io.buf_index;
204 	memset(&op->upcall, 0, sizeof(op->upcall));
205 	memset(&op->downcall, 0, sizeof(op->downcall));
206 	op->upcall.type = ORANGEFS_VFS_OP_CANCEL;
207 	op->upcall.req.cancel.op_tag = tag;
208 	op->downcall.type = ORANGEFS_VFS_OP_INVALID;
209 	op->downcall.status = -1;
210 	orangefs_new_tag(op);
211 
212 	spin_lock(&orangefs_request_list_lock);
213 	/* orangefs_request_list_lock is enough of a barrier here */
214 	if (!__is_daemon_in_service()) {
215 		spin_unlock(&orangefs_request_list_lock);
216 		return false;
217 	}
218 	spin_lock(&op->lock);
219 	set_op_state_waiting(op);
220 	gossip_debug(GOSSIP_DEV_DEBUG,
221 		     "%s: op:%s: op_state:%d: process:%s:\n",
222 		     __func__,
223 		     get_opname_string(op),
224 		     op->op_state,
225 		     current->comm);
226 	list_add(&op->list, &orangefs_request_list);
227 	spin_unlock(&op->lock);
228 	spin_unlock(&orangefs_request_list_lock);
229 
230 	gossip_debug(GOSSIP_WAIT_DEBUG,
231 		     "Attempting ORANGEFS operation cancellation of tag %llu\n",
232 		     llu(tag));
233 	return true;
234 }
235 
236 /*
237  * Change an op to the "given up" state and remove it from its list.
238  */
239 static void
240 	orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
241 {
242 	/*
243 	 * handle interrupted cases depending on what state we were in when
244 	 * the interruption is detected.
245 	 *
246 	 * Called with op->lock held.
247 	 */
248 
249 	/*
250 	 * List manipulation code elsewhere will ignore ops that
251 	 * have been given up upon.
252 	 */
253 	op->op_state |= OP_VFS_STATE_GIVEN_UP;
254 
255 	if (list_empty(&op->list)) {
256 		/* caught copying to/from daemon */
257 		BUG_ON(op_state_serviced(op));
258 		spin_unlock(&op->lock);
259 		wait_for_completion(&op->waitq);
260 	} else if (op_state_waiting(op)) {
261 		/*
262 		 * upcall hasn't been read; remove op from upcall request
263 		 * list.
264 		 */
265 		spin_unlock(&op->lock);
266 		spin_lock(&orangefs_request_list_lock);
267 		list_del_init(&op->list);
268 		spin_unlock(&orangefs_request_list_lock);
269 		gossip_debug(GOSSIP_WAIT_DEBUG,
270 			     "Interrupted: Removed op %p from request_list\n",
271 			     op);
272 	} else if (op_state_in_progress(op)) {
273 		/* op must be removed from the in progress htable */
274 		spin_unlock(&op->lock);
275 		spin_lock(&htable_ops_in_progress_lock);
276 		list_del_init(&op->list);
277 		spin_unlock(&htable_ops_in_progress_lock);
278 		gossip_debug(GOSSIP_WAIT_DEBUG,
279 			     "Interrupted: Removed op %p"
280 			     " from htable_ops_in_progress\n",
281 			     op);
282 	} else {
283 		spin_unlock(&op->lock);
284 		gossip_err("interrupted operation is in a weird state 0x%x\n",
285 			   op->op_state);
286 	}
287 	reinit_completion(&op->waitq);
288 }
289 
290 /*
291  * Sleeps on waitqueue waiting for matching downcall.
292  * If client-core finishes servicing, then we are good to go.
293  * else if client-core exits, we get woken up here, and retry with a timeout
294  *
295  * When this call returns to the caller, the specified op will no
296  * longer be in either the in_progress hash table or on the request list.
297  *
298  * Returns 0 on success and -errno on failure
299  * Errors are:
300  * EAGAIN in case we want the caller to requeue and try again..
301  * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
302  * operation since client-core seems to be exiting too often
303  * or if we were interrupted.
304  *
305  * Returns with op->lock taken.
306  */
307 static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op,
308 				      long timeout,
309 				      bool interruptible)
310 {
311 	long n;
312 
313 	/*
314 	 * There's a "schedule_timeout" inside of these wait
315 	 * primitives, during which the op is out of the hands of the
316 	 * user process that needs something done and is being
317 	 * manipulated by the client-core process.
318 	 */
319 	if (interruptible)
320 		n = wait_for_completion_interruptible_timeout(&op->waitq,
321 							      timeout);
322 	else
323 		n = wait_for_completion_killable_timeout(&op->waitq, timeout);
324 
325 	spin_lock(&op->lock);
326 
327 	if (op_state_serviced(op))
328 		return 0;
329 
330 	if (unlikely(n < 0)) {
331 		gossip_debug(GOSSIP_WAIT_DEBUG,
332 			     "%s: operation interrupted, tag %llu, %p\n",
333 			     __func__,
334 			     llu(op->tag),
335 			     op);
336 		return -EINTR;
337 	}
338 	if (op_state_purged(op)) {
339 		gossip_debug(GOSSIP_WAIT_DEBUG,
340 			     "%s: operation purged, tag %llu, %p, %d\n",
341 			     __func__,
342 			     llu(op->tag),
343 			     op,
344 			     op->attempts);
345 		return (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
346 			 -EAGAIN :
347 			 -EIO;
348 	}
349 	/* must have timed out, then... */
350 	gossip_debug(GOSSIP_WAIT_DEBUG,
351 		     "%s: operation timed out, tag %llu, %p, %d)\n",
352 		     __func__,
353 		     llu(op->tag),
354 		     op,
355 		     op->attempts);
356 	return -ETIMEDOUT;
357 }
358