xref: /openbmc/linux/fs/afs/flock.c (revision 8365a898)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS file locking support
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include "internal.h"
9 
10 #define AFS_LOCK_GRANTED	0
11 #define AFS_LOCK_PENDING	1
12 #define AFS_LOCK_YOUR_TRY	2
13 
14 struct workqueue_struct *afs_lock_manager;
15 
16 static void afs_next_locker(struct afs_vnode *vnode, int error);
17 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
18 static void afs_fl_release_private(struct file_lock *fl);
19 
20 static const struct file_lock_operations afs_lock_ops = {
21 	.fl_copy_lock		= afs_fl_copy_lock,
22 	.fl_release_private	= afs_fl_release_private,
23 };
24 
25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
26 {
27 	_debug("STATE %u -> %u", vnode->lock_state, state);
28 	vnode->lock_state = state;
29 }
30 
31 static atomic_t afs_file_lock_debug_id;
32 
33 /*
34  * if the callback is broken on this vnode, then the lock may now be available
35  */
36 void afs_lock_may_be_available(struct afs_vnode *vnode)
37 {
38 	_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
39 
40 	spin_lock(&vnode->lock);
41 	if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
42 		afs_next_locker(vnode, 0);
43 	trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
44 	spin_unlock(&vnode->lock);
45 }
46 
47 /*
48  * the lock will time out in 5 minutes unless we extend it, so schedule
49  * extension in a bit less than that time
50  */
51 static void afs_schedule_lock_extension(struct afs_vnode *vnode)
52 {
53 	ktime_t expires_at, now, duration;
54 	u64 duration_j;
55 
56 	expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
57 	now = ktime_get_real();
58 	duration = ktime_sub(expires_at, now);
59 	if (duration <= 0)
60 		duration_j = 0;
61 	else
62 		duration_j = nsecs_to_jiffies(ktime_to_ns(duration));
63 
64 	queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
65 }
66 
67 /*
68  * In the case of successful completion of a lock operation, record the time
69  * the reply appeared and start the lock extension timer.
70  */
71 void afs_lock_op_done(struct afs_call *call)
72 {
73 	struct afs_operation *op = call->op;
74 	struct afs_vnode *vnode = op->file[0].vnode;
75 
76 	if (call->error == 0) {
77 		spin_lock(&vnode->lock);
78 		trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
79 		vnode->locked_at = call->reply_time;
80 		afs_schedule_lock_extension(vnode);
81 		spin_unlock(&vnode->lock);
82 	}
83 }
84 
85 /*
86  * grant one or more locks (readlocks are allowed to jump the queue if the
87  * first lock in the queue is itself a readlock)
88  * - the caller must hold the vnode lock
89  */
90 static void afs_grant_locks(struct afs_vnode *vnode)
91 {
92 	struct file_lock *p, *_p;
93 	bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
94 
95 	list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
96 		if (!exclusive && p->fl_type == F_WRLCK)
97 			continue;
98 
99 		list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
100 		p->fl_u.afs.state = AFS_LOCK_GRANTED;
101 		trace_afs_flock_op(vnode, p, afs_flock_op_grant);
102 		wake_up(&p->fl_wait);
103 	}
104 }
105 
106 /*
107  * If an error is specified, reject every pending lock that matches the
108  * authentication and type of the lock we failed to get.  If there are any
109  * remaining lockers, try to wake up one of them to have a go.
110  */
111 static void afs_next_locker(struct afs_vnode *vnode, int error)
112 {
113 	struct file_lock *p, *_p, *next = NULL;
114 	struct key *key = vnode->lock_key;
115 	unsigned int fl_type = F_RDLCK;
116 
117 	_enter("");
118 
119 	if (vnode->lock_type == AFS_LOCK_WRITE)
120 		fl_type = F_WRLCK;
121 
122 	list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
123 		if (error &&
124 		    p->fl_type == fl_type &&
125 		    afs_file_key(p->fl_file) == key) {
126 			list_del_init(&p->fl_u.afs.link);
127 			p->fl_u.afs.state = error;
128 			wake_up(&p->fl_wait);
129 		}
130 
131 		/* Select the next locker to hand off to. */
132 		if (next &&
133 		    (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
134 			continue;
135 		next = p;
136 	}
137 
138 	vnode->lock_key = NULL;
139 	key_put(key);
140 
141 	if (next) {
142 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
143 		next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
144 		trace_afs_flock_op(vnode, next, afs_flock_op_wake);
145 		wake_up(&next->fl_wait);
146 	} else {
147 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
148 		trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
149 	}
150 
151 	_leave("");
152 }
153 
154 /*
155  * Kill off all waiters in the the pending lock queue due to the vnode being
156  * deleted.
157  */
158 static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
159 {
160 	struct file_lock *p;
161 
162 	afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
163 
164 	while (!list_empty(&vnode->pending_locks)) {
165 		p = list_entry(vnode->pending_locks.next,
166 			       struct file_lock, fl_u.afs.link);
167 		list_del_init(&p->fl_u.afs.link);
168 		p->fl_u.afs.state = -ENOENT;
169 		wake_up(&p->fl_wait);
170 	}
171 
172 	key_put(vnode->lock_key);
173 	vnode->lock_key = NULL;
174 }
175 
176 static void afs_lock_success(struct afs_operation *op)
177 {
178 	_enter("op=%08x", op->debug_id);
179 	afs_vnode_commit_status(op, &op->file[0]);
180 }
181 
182 static const struct afs_operation_ops afs_set_lock_operation = {
183 	.issue_afs_rpc	= afs_fs_set_lock,
184 	.issue_yfs_rpc	= yfs_fs_set_lock,
185 	.success	= afs_lock_success,
186 	.aborted	= afs_check_for_remote_deletion,
187 };
188 
189 /*
190  * Get a lock on a file
191  */
192 static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
193 			afs_lock_type_t type)
194 {
195 	struct afs_operation *op;
196 
197 	_enter("%s{%llx:%llu.%u},%x,%u",
198 	       vnode->volume->name,
199 	       vnode->fid.vid,
200 	       vnode->fid.vnode,
201 	       vnode->fid.unique,
202 	       key_serial(key), type);
203 
204 	op = afs_alloc_operation(key, vnode->volume);
205 	if (IS_ERR(op))
206 		return PTR_ERR(op);
207 
208 	afs_op_set_vnode(op, 0, vnode);
209 
210 	op->lock.type	= type;
211 	op->ops		= &afs_set_lock_operation;
212 	return afs_do_sync_operation(op);
213 }
214 
215 static const struct afs_operation_ops afs_extend_lock_operation = {
216 	.issue_afs_rpc	= afs_fs_extend_lock,
217 	.issue_yfs_rpc	= yfs_fs_extend_lock,
218 	.success	= afs_lock_success,
219 };
220 
221 /*
222  * Extend a lock on a file
223  */
224 static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
225 {
226 	struct afs_operation *op;
227 
228 	_enter("%s{%llx:%llu.%u},%x",
229 	       vnode->volume->name,
230 	       vnode->fid.vid,
231 	       vnode->fid.vnode,
232 	       vnode->fid.unique,
233 	       key_serial(key));
234 
235 	op = afs_alloc_operation(key, vnode->volume);
236 	if (IS_ERR(op))
237 		return PTR_ERR(op);
238 
239 	afs_op_set_vnode(op, 0, vnode);
240 
241 	op->flags	|= AFS_OPERATION_UNINTR;
242 	op->ops		= &afs_extend_lock_operation;
243 	return afs_do_sync_operation(op);
244 }
245 
246 static const struct afs_operation_ops afs_release_lock_operation = {
247 	.issue_afs_rpc	= afs_fs_release_lock,
248 	.issue_yfs_rpc	= yfs_fs_release_lock,
249 	.success	= afs_lock_success,
250 };
251 
252 /*
253  * Release a lock on a file
254  */
255 static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
256 {
257 	struct afs_operation *op;
258 
259 	_enter("%s{%llx:%llu.%u},%x",
260 	       vnode->volume->name,
261 	       vnode->fid.vid,
262 	       vnode->fid.vnode,
263 	       vnode->fid.unique,
264 	       key_serial(key));
265 
266 	op = afs_alloc_operation(key, vnode->volume);
267 	if (IS_ERR(op))
268 		return PTR_ERR(op);
269 
270 	afs_op_set_vnode(op, 0, vnode);
271 
272 	op->flags	|= AFS_OPERATION_UNINTR;
273 	op->ops		= &afs_release_lock_operation;
274 	return afs_do_sync_operation(op);
275 }
276 
277 /*
278  * do work for a lock, including:
279  * - probing for a lock we're waiting on but didn't get immediately
280  * - extending a lock that's close to timing out
281  */
282 void afs_lock_work(struct work_struct *work)
283 {
284 	struct afs_vnode *vnode =
285 		container_of(work, struct afs_vnode, lock_work.work);
286 	struct key *key;
287 	int ret;
288 
289 	_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
290 
291 	spin_lock(&vnode->lock);
292 
293 again:
294 	_debug("wstate %u for %p", vnode->lock_state, vnode);
295 	switch (vnode->lock_state) {
296 	case AFS_VNODE_LOCK_NEED_UNLOCK:
297 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
298 		trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
299 		spin_unlock(&vnode->lock);
300 
301 		/* attempt to release the server lock; if it fails, we just
302 		 * wait 5 minutes and it'll expire anyway */
303 		ret = afs_release_lock(vnode, vnode->lock_key);
304 		if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
305 			trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
306 					   ret);
307 			printk(KERN_WARNING "AFS:"
308 			       " Failed to release lock on {%llx:%llx} error %d\n",
309 			       vnode->fid.vid, vnode->fid.vnode, ret);
310 		}
311 
312 		spin_lock(&vnode->lock);
313 		if (ret == -ENOENT)
314 			afs_kill_lockers_enoent(vnode);
315 		else
316 			afs_next_locker(vnode, 0);
317 		spin_unlock(&vnode->lock);
318 		return;
319 
320 	/* If we've already got a lock, then it must be time to extend that
321 	 * lock as AFS locks time out after 5 minutes.
322 	 */
323 	case AFS_VNODE_LOCK_GRANTED:
324 		_debug("extend");
325 
326 		ASSERT(!list_empty(&vnode->granted_locks));
327 
328 		key = key_get(vnode->lock_key);
329 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
330 		trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
331 		spin_unlock(&vnode->lock);
332 
333 		ret = afs_extend_lock(vnode, key); /* RPC */
334 		key_put(key);
335 
336 		if (ret < 0) {
337 			trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
338 					   ret);
339 			pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
340 				vnode->fid.vid, vnode->fid.vnode, ret);
341 		}
342 
343 		spin_lock(&vnode->lock);
344 
345 		if (ret == -ENOENT) {
346 			afs_kill_lockers_enoent(vnode);
347 			spin_unlock(&vnode->lock);
348 			return;
349 		}
350 
351 		if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
352 			goto again;
353 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
354 
355 		if (ret != 0)
356 			queue_delayed_work(afs_lock_manager, &vnode->lock_work,
357 					   HZ * 10);
358 		spin_unlock(&vnode->lock);
359 		_leave(" [ext]");
360 		return;
361 
362 	/* If we're waiting for a callback to indicate lock release, we can't
363 	 * actually rely on this, so need to recheck at regular intervals.  The
364 	 * problem is that the server might not notify us if the lock just
365 	 * expires (say because a client died) rather than being explicitly
366 	 * released.
367 	 */
368 	case AFS_VNODE_LOCK_WAITING_FOR_CB:
369 		_debug("retry");
370 		afs_next_locker(vnode, 0);
371 		spin_unlock(&vnode->lock);
372 		return;
373 
374 	case AFS_VNODE_LOCK_DELETED:
375 		afs_kill_lockers_enoent(vnode);
376 		spin_unlock(&vnode->lock);
377 		return;
378 
379 		/* Fall through */
380 	default:
381 		/* Looks like a lock request was withdrawn. */
382 		spin_unlock(&vnode->lock);
383 		_leave(" [no]");
384 		return;
385 	}
386 }
387 
388 /*
389  * pass responsibility for the unlocking of a vnode on the server to the
390  * manager thread, lest a pending signal in the calling thread interrupt
391  * AF_RXRPC
392  * - the caller must hold the vnode lock
393  */
394 static void afs_defer_unlock(struct afs_vnode *vnode)
395 {
396 	_enter("%u", vnode->lock_state);
397 
398 	if (list_empty(&vnode->granted_locks) &&
399 	    (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
400 	     vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
401 		cancel_delayed_work(&vnode->lock_work);
402 
403 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
404 		trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
405 		queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
406 	}
407 }
408 
409 /*
410  * Check that our view of the file metadata is up to date and check to see
411  * whether we think that we have a locking permit.
412  */
413 static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
414 			      enum afs_flock_mode mode, afs_lock_type_t type)
415 {
416 	afs_access_t access;
417 	int ret;
418 
419 	/* Make sure we've got a callback on this file and that our view of the
420 	 * data version is up to date.
421 	 */
422 	ret = afs_validate(vnode, key);
423 	if (ret < 0)
424 		return ret;
425 
426 	/* Check the permission set to see if we're actually going to be
427 	 * allowed to get a lock on this file.
428 	 */
429 	ret = afs_check_permit(vnode, key, &access);
430 	if (ret < 0)
431 		return ret;
432 
433 	/* At a rough estimation, you need LOCK, WRITE or INSERT perm to
434 	 * read-lock a file and WRITE or INSERT perm to write-lock a file.
435 	 *
436 	 * We can't rely on the server to do this for us since if we want to
437 	 * share a read lock that we already have, we won't go the server.
438 	 */
439 	if (type == AFS_LOCK_READ) {
440 		if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
441 			return -EACCES;
442 	} else {
443 		if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
444 			return -EACCES;
445 	}
446 
447 	return 0;
448 }
449 
450 /*
451  * request a lock on a file on the server
452  */
453 static int afs_do_setlk(struct file *file, struct file_lock *fl)
454 {
455 	struct inode *inode = locks_inode(file);
456 	struct afs_vnode *vnode = AFS_FS_I(inode);
457 	enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
458 	afs_lock_type_t type;
459 	struct key *key = afs_file_key(file);
460 	bool partial, no_server_lock = false;
461 	int ret;
462 
463 	if (mode == afs_flock_mode_unset)
464 		mode = afs_flock_mode_openafs;
465 
466 	_enter("{%llx:%llu},%llu-%llu,%u,%u",
467 	       vnode->fid.vid, vnode->fid.vnode,
468 	       fl->fl_start, fl->fl_end, fl->fl_type, mode);
469 
470 	fl->fl_ops = &afs_lock_ops;
471 	INIT_LIST_HEAD(&fl->fl_u.afs.link);
472 	fl->fl_u.afs.state = AFS_LOCK_PENDING;
473 
474 	partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
475 	type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
476 	if (mode == afs_flock_mode_write && partial)
477 		type = AFS_LOCK_WRITE;
478 
479 	ret = afs_do_setlk_check(vnode, key, mode, type);
480 	if (ret < 0)
481 		return ret;
482 
483 	trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
484 
485 	/* AFS3 protocol only supports full-file locks and doesn't provide any
486 	 * method of upgrade/downgrade, so we need to emulate for partial-file
487 	 * locks.
488 	 *
489 	 * The OpenAFS client only gets a server lock for a full-file lock and
490 	 * keeps partial-file locks local.  Allow this behaviour to be emulated
491 	 * (as the default).
492 	 */
493 	if (mode == afs_flock_mode_local ||
494 	    (partial && mode == afs_flock_mode_openafs)) {
495 		no_server_lock = true;
496 		goto skip_server_lock;
497 	}
498 
499 	spin_lock(&vnode->lock);
500 	list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
501 
502 	ret = -ENOENT;
503 	if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
504 		goto error_unlock;
505 
506 	/* If we've already got a lock on the server then try to move to having
507 	 * the VFS grant the requested lock.  Note that this means that other
508 	 * clients may get starved out.
509 	 */
510 	_debug("try %u", vnode->lock_state);
511 	if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
512 		if (type == AFS_LOCK_READ) {
513 			_debug("instant readlock");
514 			list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
515 			fl->fl_u.afs.state = AFS_LOCK_GRANTED;
516 			goto vnode_is_locked_u;
517 		}
518 
519 		if (vnode->lock_type == AFS_LOCK_WRITE) {
520 			_debug("instant writelock");
521 			list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
522 			fl->fl_u.afs.state = AFS_LOCK_GRANTED;
523 			goto vnode_is_locked_u;
524 		}
525 	}
526 
527 	if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
528 	    !(fl->fl_flags & FL_SLEEP)) {
529 		ret = -EAGAIN;
530 		if (type == AFS_LOCK_READ) {
531 			if (vnode->status.lock_count == -1)
532 				goto lock_is_contended; /* Write locked */
533 		} else {
534 			if (vnode->status.lock_count != 0)
535 				goto lock_is_contended; /* Locked */
536 		}
537 	}
538 
539 	if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
540 		goto need_to_wait;
541 
542 try_to_lock:
543 	/* We don't have a lock on this vnode and we aren't currently waiting
544 	 * for one either, so ask the server for a lock.
545 	 *
546 	 * Note that we need to be careful if we get interrupted by a signal
547 	 * after dispatching the request as we may still get the lock, even
548 	 * though we don't wait for the reply (it's not too bad a problem - the
549 	 * lock will expire in 5 mins anyway).
550 	 */
551 	trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
552 	vnode->lock_key = key_get(key);
553 	vnode->lock_type = type;
554 	afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
555 	spin_unlock(&vnode->lock);
556 
557 	ret = afs_set_lock(vnode, key, type); /* RPC */
558 
559 	spin_lock(&vnode->lock);
560 	switch (ret) {
561 	case -EKEYREJECTED:
562 	case -EKEYEXPIRED:
563 	case -EKEYREVOKED:
564 	case -EPERM:
565 	case -EACCES:
566 		fl->fl_u.afs.state = ret;
567 		trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
568 		list_del_init(&fl->fl_u.afs.link);
569 		afs_next_locker(vnode, ret);
570 		goto error_unlock;
571 
572 	case -ENOENT:
573 		fl->fl_u.afs.state = ret;
574 		trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
575 		list_del_init(&fl->fl_u.afs.link);
576 		afs_kill_lockers_enoent(vnode);
577 		goto error_unlock;
578 
579 	default:
580 		fl->fl_u.afs.state = ret;
581 		trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
582 		list_del_init(&fl->fl_u.afs.link);
583 		afs_next_locker(vnode, 0);
584 		goto error_unlock;
585 
586 	case -EWOULDBLOCK:
587 		/* The server doesn't have a lock-waiting queue, so the client
588 		 * will have to retry.  The server will break the outstanding
589 		 * callbacks on a file when a lock is released.
590 		 */
591 		ASSERT(list_empty(&vnode->granted_locks));
592 		ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
593 		goto lock_is_contended;
594 
595 	case 0:
596 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
597 		trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
598 		afs_grant_locks(vnode);
599 		goto vnode_is_locked_u;
600 	}
601 
602 vnode_is_locked_u:
603 	spin_unlock(&vnode->lock);
604 vnode_is_locked:
605 	/* the lock has been granted by the server... */
606 	ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
607 
608 skip_server_lock:
609 	/* ... but the VFS still needs to distribute access on this client. */
610 	trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
611 	ret = locks_lock_file_wait(file, fl);
612 	trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
613 	if (ret < 0)
614 		goto vfs_rejected_lock;
615 
616 	/* Again, make sure we've got a callback on this file and, again, make
617 	 * sure that our view of the data version is up to date (we ignore
618 	 * errors incurred here and deal with the consequences elsewhere).
619 	 */
620 	afs_validate(vnode, key);
621 	_leave(" = 0");
622 	return 0;
623 
624 lock_is_contended:
625 	if (!(fl->fl_flags & FL_SLEEP)) {
626 		list_del_init(&fl->fl_u.afs.link);
627 		afs_next_locker(vnode, 0);
628 		ret = -EAGAIN;
629 		goto error_unlock;
630 	}
631 
632 	afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
633 	trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
634 	queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
635 
636 need_to_wait:
637 	/* We're going to have to wait.  Either this client doesn't have a lock
638 	 * on the server yet and we need to wait for a callback to occur, or
639 	 * the client does have a lock on the server, but it's shared and we
640 	 * need an exclusive lock.
641 	 */
642 	spin_unlock(&vnode->lock);
643 
644 	trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
645 	ret = wait_event_interruptible(fl->fl_wait,
646 				       fl->fl_u.afs.state != AFS_LOCK_PENDING);
647 	trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
648 
649 	if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
650 		spin_lock(&vnode->lock);
651 
652 		switch (fl->fl_u.afs.state) {
653 		case AFS_LOCK_YOUR_TRY:
654 			fl->fl_u.afs.state = AFS_LOCK_PENDING;
655 			goto try_to_lock;
656 		case AFS_LOCK_PENDING:
657 			if (ret > 0) {
658 				/* We need to retry the lock.  We may not be
659 				 * notified by the server if it just expired
660 				 * rather than being released.
661 				 */
662 				ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
663 				afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
664 				fl->fl_u.afs.state = AFS_LOCK_PENDING;
665 				goto try_to_lock;
666 			}
667 			goto error_unlock;
668 		case AFS_LOCK_GRANTED:
669 		default:
670 			break;
671 		}
672 
673 		spin_unlock(&vnode->lock);
674 	}
675 
676 	if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
677 		goto vnode_is_locked;
678 	ret = fl->fl_u.afs.state;
679 	goto error;
680 
681 vfs_rejected_lock:
682 	/* The VFS rejected the lock we just obtained, so we have to discard
683 	 * what we just got.  We defer this to the lock manager work item to
684 	 * deal with.
685 	 */
686 	_debug("vfs refused %d", ret);
687 	if (no_server_lock)
688 		goto error;
689 	spin_lock(&vnode->lock);
690 	list_del_init(&fl->fl_u.afs.link);
691 	afs_defer_unlock(vnode);
692 
693 error_unlock:
694 	spin_unlock(&vnode->lock);
695 error:
696 	_leave(" = %d", ret);
697 	return ret;
698 }
699 
700 /*
701  * unlock on a file on the server
702  */
703 static int afs_do_unlk(struct file *file, struct file_lock *fl)
704 {
705 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
706 	int ret;
707 
708 	_enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
709 
710 	trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
711 
712 	/* Flush all pending writes before doing anything with locks. */
713 	vfs_fsync(file, 0);
714 
715 	ret = locks_lock_file_wait(file, fl);
716 	_leave(" = %d [%u]", ret, vnode->lock_state);
717 	return ret;
718 }
719 
720 /*
721  * return information about a lock we currently hold, if indeed we hold one
722  */
723 static int afs_do_getlk(struct file *file, struct file_lock *fl)
724 {
725 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
726 	struct key *key = afs_file_key(file);
727 	int ret, lock_count;
728 
729 	_enter("");
730 
731 	if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
732 		return -ENOENT;
733 
734 	fl->fl_type = F_UNLCK;
735 
736 	/* check local lock records first */
737 	posix_test_lock(file, fl);
738 	if (fl->fl_type == F_UNLCK) {
739 		/* no local locks; consult the server */
740 		ret = afs_fetch_status(vnode, key, false, NULL);
741 		if (ret < 0)
742 			goto error;
743 
744 		lock_count = READ_ONCE(vnode->status.lock_count);
745 		if (lock_count != 0) {
746 			if (lock_count > 0)
747 				fl->fl_type = F_RDLCK;
748 			else
749 				fl->fl_type = F_WRLCK;
750 			fl->fl_start = 0;
751 			fl->fl_end = OFFSET_MAX;
752 			fl->fl_pid = 0;
753 		}
754 	}
755 
756 	ret = 0;
757 error:
758 	_leave(" = %d [%hd]", ret, fl->fl_type);
759 	return ret;
760 }
761 
762 /*
763  * manage POSIX locks on a file
764  */
765 int afs_lock(struct file *file, int cmd, struct file_lock *fl)
766 {
767 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
768 	enum afs_flock_operation op;
769 	int ret;
770 
771 	_enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
772 	       vnode->fid.vid, vnode->fid.vnode, cmd,
773 	       fl->fl_type, fl->fl_flags,
774 	       (long long) fl->fl_start, (long long) fl->fl_end);
775 
776 	/* AFS doesn't support mandatory locks */
777 	if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
778 		return -ENOLCK;
779 
780 	if (IS_GETLK(cmd))
781 		return afs_do_getlk(file, fl);
782 
783 	fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
784 	trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
785 
786 	if (fl->fl_type == F_UNLCK)
787 		ret = afs_do_unlk(file, fl);
788 	else
789 		ret = afs_do_setlk(file, fl);
790 
791 	switch (ret) {
792 	case 0:		op = afs_flock_op_return_ok; break;
793 	case -EAGAIN:	op = afs_flock_op_return_eagain; break;
794 	case -EDEADLK:	op = afs_flock_op_return_edeadlk; break;
795 	default:	op = afs_flock_op_return_error; break;
796 	}
797 	trace_afs_flock_op(vnode, fl, op);
798 	return ret;
799 }
800 
801 /*
802  * manage FLOCK locks on a file
803  */
804 int afs_flock(struct file *file, int cmd, struct file_lock *fl)
805 {
806 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
807 	enum afs_flock_operation op;
808 	int ret;
809 
810 	_enter("{%llx:%llu},%d,{t=%x,fl=%x}",
811 	       vnode->fid.vid, vnode->fid.vnode, cmd,
812 	       fl->fl_type, fl->fl_flags);
813 
814 	/*
815 	 * No BSD flocks over NFS allowed.
816 	 * Note: we could try to fake a POSIX lock request here by
817 	 * using ((u32) filp | 0x80000000) or some such as the pid.
818 	 * Not sure whether that would be unique, though, or whether
819 	 * that would break in other places.
820 	 */
821 	if (!(fl->fl_flags & FL_FLOCK))
822 		return -ENOLCK;
823 
824 	fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
825 	trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
826 
827 	/* we're simulating flock() locks using posix locks on the server */
828 	if (fl->fl_type == F_UNLCK)
829 		ret = afs_do_unlk(file, fl);
830 	else
831 		ret = afs_do_setlk(file, fl);
832 
833 	switch (ret) {
834 	case 0:		op = afs_flock_op_return_ok; break;
835 	case -EAGAIN:	op = afs_flock_op_return_eagain; break;
836 	case -EDEADLK:	op = afs_flock_op_return_edeadlk; break;
837 	default:	op = afs_flock_op_return_error; break;
838 	}
839 	trace_afs_flock_op(vnode, fl, op);
840 	return ret;
841 }
842 
843 /*
844  * the POSIX lock management core VFS code copies the lock record and adds the
845  * copy into its own list, so we need to add that copy to the vnode's lock
846  * queue in the same place as the original (which will be deleted shortly
847  * after)
848  */
849 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
850 {
851 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
852 
853 	_enter("");
854 
855 	new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
856 
857 	spin_lock(&vnode->lock);
858 	trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
859 	list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
860 	spin_unlock(&vnode->lock);
861 }
862 
863 /*
864  * need to remove this lock from the vnode queue when it's removed from the
865  * VFS's list
866  */
867 static void afs_fl_release_private(struct file_lock *fl)
868 {
869 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
870 
871 	_enter("");
872 
873 	spin_lock(&vnode->lock);
874 
875 	trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
876 	list_del_init(&fl->fl_u.afs.link);
877 	if (list_empty(&vnode->granted_locks))
878 		afs_defer_unlock(vnode);
879 
880 	_debug("state %u for %p", vnode->lock_state, vnode);
881 	spin_unlock(&vnode->lock);
882 }
883