xref: /openbmc/linux/fs/afs/flock.c (revision e721eb06)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS file locking support
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include "internal.h"
9 
10 #define AFS_LOCK_GRANTED	0
11 #define AFS_LOCK_PENDING	1
12 #define AFS_LOCK_YOUR_TRY	2
13 
14 struct workqueue_struct *afs_lock_manager;
15 
16 static void afs_next_locker(struct afs_vnode *vnode, int error);
17 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
18 static void afs_fl_release_private(struct file_lock *fl);
19 
20 static const struct file_lock_operations afs_lock_ops = {
21 	.fl_copy_lock		= afs_fl_copy_lock,
22 	.fl_release_private	= afs_fl_release_private,
23 };
24 
25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
26 {
27 	_debug("STATE %u -> %u", vnode->lock_state, state);
28 	vnode->lock_state = state;
29 }
30 
31 static atomic_t afs_file_lock_debug_id;
32 
33 /*
34  * if the callback is broken on this vnode, then the lock may now be available
35  */
36 void afs_lock_may_be_available(struct afs_vnode *vnode)
37 {
38 	_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
39 
40 	spin_lock(&vnode->lock);
41 	if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
42 		afs_next_locker(vnode, 0);
43 	trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
44 	spin_unlock(&vnode->lock);
45 }
46 
47 /*
48  * the lock will time out in 5 minutes unless we extend it, so schedule
49  * extension in a bit less than that time
50  */
51 static void afs_schedule_lock_extension(struct afs_vnode *vnode)
52 {
53 	ktime_t expires_at, now, duration;
54 	u64 duration_j;
55 
56 	expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
57 	now = ktime_get_real();
58 	duration = ktime_sub(expires_at, now);
59 	if (duration <= 0)
60 		duration_j = 0;
61 	else
62 		duration_j = nsecs_to_jiffies(ktime_to_ns(duration));
63 
64 	queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
65 }
66 
67 /*
68  * In the case of successful completion of a lock operation, record the time
69  * the reply appeared and start the lock extension timer.
70  */
71 void afs_lock_op_done(struct afs_call *call)
72 {
73 	struct afs_operation *op = call->op;
74 	struct afs_vnode *vnode = op->file[0].vnode;
75 
76 	if (call->error == 0) {
77 		spin_lock(&vnode->lock);
78 		trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
79 		vnode->locked_at = call->reply_time;
80 		afs_schedule_lock_extension(vnode);
81 		spin_unlock(&vnode->lock);
82 	}
83 }
84 
85 /*
86  * grant one or more locks (readlocks are allowed to jump the queue if the
87  * first lock in the queue is itself a readlock)
88  * - the caller must hold the vnode lock
89  */
90 static void afs_grant_locks(struct afs_vnode *vnode)
91 {
92 	struct file_lock *p, *_p;
93 	bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
94 
95 	list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
96 		if (!exclusive && p->fl_type == F_WRLCK)
97 			continue;
98 
99 		list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
100 		p->fl_u.afs.state = AFS_LOCK_GRANTED;
101 		trace_afs_flock_op(vnode, p, afs_flock_op_grant);
102 		wake_up(&p->fl_wait);
103 	}
104 }
105 
106 /*
107  * If an error is specified, reject every pending lock that matches the
108  * authentication and type of the lock we failed to get.  If there are any
109  * remaining lockers, try to wake up one of them to have a go.
110  */
111 static void afs_next_locker(struct afs_vnode *vnode, int error)
112 {
113 	struct file_lock *p, *_p, *next = NULL;
114 	struct key *key = vnode->lock_key;
115 	unsigned int fl_type = F_RDLCK;
116 
117 	_enter("");
118 
119 	if (vnode->lock_type == AFS_LOCK_WRITE)
120 		fl_type = F_WRLCK;
121 
122 	list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
123 		if (error &&
124 		    p->fl_type == fl_type &&
125 		    afs_file_key(p->fl_file) == key) {
126 			list_del_init(&p->fl_u.afs.link);
127 			p->fl_u.afs.state = error;
128 			wake_up(&p->fl_wait);
129 		}
130 
131 		/* Select the next locker to hand off to. */
132 		if (next &&
133 		    (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
134 			continue;
135 		next = p;
136 	}
137 
138 	vnode->lock_key = NULL;
139 	key_put(key);
140 
141 	if (next) {
142 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
143 		next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
144 		trace_afs_flock_op(vnode, next, afs_flock_op_wake);
145 		wake_up(&next->fl_wait);
146 	} else {
147 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
148 		trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
149 	}
150 
151 	_leave("");
152 }
153 
154 /*
155  * Kill off all waiters in the the pending lock queue due to the vnode being
156  * deleted.
157  */
158 static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
159 {
160 	struct file_lock *p;
161 
162 	afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
163 
164 	while (!list_empty(&vnode->pending_locks)) {
165 		p = list_entry(vnode->pending_locks.next,
166 			       struct file_lock, fl_u.afs.link);
167 		list_del_init(&p->fl_u.afs.link);
168 		p->fl_u.afs.state = -ENOENT;
169 		wake_up(&p->fl_wait);
170 	}
171 
172 	key_put(vnode->lock_key);
173 	vnode->lock_key = NULL;
174 }
175 
176 static void afs_lock_success(struct afs_operation *op)
177 {
178 	struct afs_vnode *vnode = op->file[0].vnode;
179 
180 	_enter("op=%08x", op->debug_id);
181 	afs_check_for_remote_deletion(op, vnode);
182 	afs_vnode_commit_status(op, &op->file[0]);
183 }
184 
185 static const struct afs_operation_ops afs_set_lock_operation = {
186 	.issue_afs_rpc	= afs_fs_set_lock,
187 	.issue_yfs_rpc	= yfs_fs_set_lock,
188 	.success	= afs_lock_success,
189 };
190 
191 /*
192  * Get a lock on a file
193  */
194 static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
195 			afs_lock_type_t type)
196 {
197 	struct afs_operation *op;
198 
199 	_enter("%s{%llx:%llu.%u},%x,%u",
200 	       vnode->volume->name,
201 	       vnode->fid.vid,
202 	       vnode->fid.vnode,
203 	       vnode->fid.unique,
204 	       key_serial(key), type);
205 
206 	op = afs_alloc_operation(key, vnode->volume);
207 	if (IS_ERR(op))
208 		return PTR_ERR(op);
209 
210 	afs_op_set_vnode(op, 0, vnode);
211 
212 	op->lock.type	= type;
213 	op->ops		= &afs_set_lock_operation;
214 	return afs_do_sync_operation(op);
215 }
216 
217 static const struct afs_operation_ops afs_extend_lock_operation = {
218 	.issue_afs_rpc	= afs_fs_extend_lock,
219 	.issue_yfs_rpc	= yfs_fs_extend_lock,
220 	.success	= afs_lock_success,
221 };
222 
223 /*
224  * Extend a lock on a file
225  */
226 static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
227 {
228 	struct afs_operation *op;
229 
230 	_enter("%s{%llx:%llu.%u},%x",
231 	       vnode->volume->name,
232 	       vnode->fid.vid,
233 	       vnode->fid.vnode,
234 	       vnode->fid.unique,
235 	       key_serial(key));
236 
237 	op = afs_alloc_operation(key, vnode->volume);
238 	if (IS_ERR(op))
239 		return PTR_ERR(op);
240 
241 	afs_op_set_vnode(op, 0, vnode);
242 
243 	op->flags	|= AFS_OPERATION_UNINTR;
244 	op->ops		= &afs_extend_lock_operation;
245 	return afs_do_sync_operation(op);
246 }
247 
248 static const struct afs_operation_ops afs_release_lock_operation = {
249 	.issue_afs_rpc	= afs_fs_release_lock,
250 	.issue_yfs_rpc	= yfs_fs_release_lock,
251 	.success	= afs_lock_success,
252 };
253 
254 /*
255  * Release a lock on a file
256  */
257 static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
258 {
259 	struct afs_operation *op;
260 
261 	_enter("%s{%llx:%llu.%u},%x",
262 	       vnode->volume->name,
263 	       vnode->fid.vid,
264 	       vnode->fid.vnode,
265 	       vnode->fid.unique,
266 	       key_serial(key));
267 
268 	op = afs_alloc_operation(key, vnode->volume);
269 	if (IS_ERR(op))
270 		return PTR_ERR(op);
271 
272 	afs_op_set_vnode(op, 0, vnode);
273 
274 	op->flags	|= AFS_OPERATION_UNINTR;
275 	op->ops		= &afs_release_lock_operation;
276 	return afs_do_sync_operation(op);
277 }
278 
279 /*
280  * do work for a lock, including:
281  * - probing for a lock we're waiting on but didn't get immediately
282  * - extending a lock that's close to timing out
283  */
284 void afs_lock_work(struct work_struct *work)
285 {
286 	struct afs_vnode *vnode =
287 		container_of(work, struct afs_vnode, lock_work.work);
288 	struct key *key;
289 	int ret;
290 
291 	_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
292 
293 	spin_lock(&vnode->lock);
294 
295 again:
296 	_debug("wstate %u for %p", vnode->lock_state, vnode);
297 	switch (vnode->lock_state) {
298 	case AFS_VNODE_LOCK_NEED_UNLOCK:
299 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
300 		trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
301 		spin_unlock(&vnode->lock);
302 
303 		/* attempt to release the server lock; if it fails, we just
304 		 * wait 5 minutes and it'll expire anyway */
305 		ret = afs_release_lock(vnode, vnode->lock_key);
306 		if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
307 			trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
308 					   ret);
309 			printk(KERN_WARNING "AFS:"
310 			       " Failed to release lock on {%llx:%llx} error %d\n",
311 			       vnode->fid.vid, vnode->fid.vnode, ret);
312 		}
313 
314 		spin_lock(&vnode->lock);
315 		if (ret == -ENOENT)
316 			afs_kill_lockers_enoent(vnode);
317 		else
318 			afs_next_locker(vnode, 0);
319 		spin_unlock(&vnode->lock);
320 		return;
321 
322 	/* If we've already got a lock, then it must be time to extend that
323 	 * lock as AFS locks time out after 5 minutes.
324 	 */
325 	case AFS_VNODE_LOCK_GRANTED:
326 		_debug("extend");
327 
328 		ASSERT(!list_empty(&vnode->granted_locks));
329 
330 		key = key_get(vnode->lock_key);
331 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
332 		trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
333 		spin_unlock(&vnode->lock);
334 
335 		ret = afs_extend_lock(vnode, key); /* RPC */
336 		key_put(key);
337 
338 		if (ret < 0) {
339 			trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
340 					   ret);
341 			pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
342 				vnode->fid.vid, vnode->fid.vnode, ret);
343 		}
344 
345 		spin_lock(&vnode->lock);
346 
347 		if (ret == -ENOENT) {
348 			afs_kill_lockers_enoent(vnode);
349 			spin_unlock(&vnode->lock);
350 			return;
351 		}
352 
353 		if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
354 			goto again;
355 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
356 
357 		if (ret != 0)
358 			queue_delayed_work(afs_lock_manager, &vnode->lock_work,
359 					   HZ * 10);
360 		spin_unlock(&vnode->lock);
361 		_leave(" [ext]");
362 		return;
363 
364 	/* If we're waiting for a callback to indicate lock release, we can't
365 	 * actually rely on this, so need to recheck at regular intervals.  The
366 	 * problem is that the server might not notify us if the lock just
367 	 * expires (say because a client died) rather than being explicitly
368 	 * released.
369 	 */
370 	case AFS_VNODE_LOCK_WAITING_FOR_CB:
371 		_debug("retry");
372 		afs_next_locker(vnode, 0);
373 		spin_unlock(&vnode->lock);
374 		return;
375 
376 	case AFS_VNODE_LOCK_DELETED:
377 		afs_kill_lockers_enoent(vnode);
378 		spin_unlock(&vnode->lock);
379 		return;
380 
381 		/* Fall through */
382 	default:
383 		/* Looks like a lock request was withdrawn. */
384 		spin_unlock(&vnode->lock);
385 		_leave(" [no]");
386 		return;
387 	}
388 }
389 
390 /*
391  * pass responsibility for the unlocking of a vnode on the server to the
392  * manager thread, lest a pending signal in the calling thread interrupt
393  * AF_RXRPC
394  * - the caller must hold the vnode lock
395  */
396 static void afs_defer_unlock(struct afs_vnode *vnode)
397 {
398 	_enter("%u", vnode->lock_state);
399 
400 	if (list_empty(&vnode->granted_locks) &&
401 	    (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
402 	     vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
403 		cancel_delayed_work(&vnode->lock_work);
404 
405 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
406 		trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
407 		queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
408 	}
409 }
410 
411 /*
412  * Check that our view of the file metadata is up to date and check to see
413  * whether we think that we have a locking permit.
414  */
415 static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
416 			      enum afs_flock_mode mode, afs_lock_type_t type)
417 {
418 	afs_access_t access;
419 	int ret;
420 
421 	/* Make sure we've got a callback on this file and that our view of the
422 	 * data version is up to date.
423 	 */
424 	ret = afs_validate(vnode, key);
425 	if (ret < 0)
426 		return ret;
427 
428 	/* Check the permission set to see if we're actually going to be
429 	 * allowed to get a lock on this file.
430 	 */
431 	ret = afs_check_permit(vnode, key, &access);
432 	if (ret < 0)
433 		return ret;
434 
435 	/* At a rough estimation, you need LOCK, WRITE or INSERT perm to
436 	 * read-lock a file and WRITE or INSERT perm to write-lock a file.
437 	 *
438 	 * We can't rely on the server to do this for us since if we want to
439 	 * share a read lock that we already have, we won't go the server.
440 	 */
441 	if (type == AFS_LOCK_READ) {
442 		if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
443 			return -EACCES;
444 	} else {
445 		if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
446 			return -EACCES;
447 	}
448 
449 	return 0;
450 }
451 
452 /*
453  * request a lock on a file on the server
454  */
455 static int afs_do_setlk(struct file *file, struct file_lock *fl)
456 {
457 	struct inode *inode = locks_inode(file);
458 	struct afs_vnode *vnode = AFS_FS_I(inode);
459 	enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
460 	afs_lock_type_t type;
461 	struct key *key = afs_file_key(file);
462 	bool partial, no_server_lock = false;
463 	int ret;
464 
465 	if (mode == afs_flock_mode_unset)
466 		mode = afs_flock_mode_openafs;
467 
468 	_enter("{%llx:%llu},%llu-%llu,%u,%u",
469 	       vnode->fid.vid, vnode->fid.vnode,
470 	       fl->fl_start, fl->fl_end, fl->fl_type, mode);
471 
472 	fl->fl_ops = &afs_lock_ops;
473 	INIT_LIST_HEAD(&fl->fl_u.afs.link);
474 	fl->fl_u.afs.state = AFS_LOCK_PENDING;
475 
476 	partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
477 	type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
478 	if (mode == afs_flock_mode_write && partial)
479 		type = AFS_LOCK_WRITE;
480 
481 	ret = afs_do_setlk_check(vnode, key, mode, type);
482 	if (ret < 0)
483 		return ret;
484 
485 	trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
486 
487 	/* AFS3 protocol only supports full-file locks and doesn't provide any
488 	 * method of upgrade/downgrade, so we need to emulate for partial-file
489 	 * locks.
490 	 *
491 	 * The OpenAFS client only gets a server lock for a full-file lock and
492 	 * keeps partial-file locks local.  Allow this behaviour to be emulated
493 	 * (as the default).
494 	 */
495 	if (mode == afs_flock_mode_local ||
496 	    (partial && mode == afs_flock_mode_openafs)) {
497 		no_server_lock = true;
498 		goto skip_server_lock;
499 	}
500 
501 	spin_lock(&vnode->lock);
502 	list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
503 
504 	ret = -ENOENT;
505 	if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
506 		goto error_unlock;
507 
508 	/* If we've already got a lock on the server then try to move to having
509 	 * the VFS grant the requested lock.  Note that this means that other
510 	 * clients may get starved out.
511 	 */
512 	_debug("try %u", vnode->lock_state);
513 	if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
514 		if (type == AFS_LOCK_READ) {
515 			_debug("instant readlock");
516 			list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
517 			fl->fl_u.afs.state = AFS_LOCK_GRANTED;
518 			goto vnode_is_locked_u;
519 		}
520 
521 		if (vnode->lock_type == AFS_LOCK_WRITE) {
522 			_debug("instant writelock");
523 			list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
524 			fl->fl_u.afs.state = AFS_LOCK_GRANTED;
525 			goto vnode_is_locked_u;
526 		}
527 	}
528 
529 	if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
530 	    !(fl->fl_flags & FL_SLEEP)) {
531 		ret = -EAGAIN;
532 		if (type == AFS_LOCK_READ) {
533 			if (vnode->status.lock_count == -1)
534 				goto lock_is_contended; /* Write locked */
535 		} else {
536 			if (vnode->status.lock_count != 0)
537 				goto lock_is_contended; /* Locked */
538 		}
539 	}
540 
541 	if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
542 		goto need_to_wait;
543 
544 try_to_lock:
545 	/* We don't have a lock on this vnode and we aren't currently waiting
546 	 * for one either, so ask the server for a lock.
547 	 *
548 	 * Note that we need to be careful if we get interrupted by a signal
549 	 * after dispatching the request as we may still get the lock, even
550 	 * though we don't wait for the reply (it's not too bad a problem - the
551 	 * lock will expire in 5 mins anyway).
552 	 */
553 	trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
554 	vnode->lock_key = key_get(key);
555 	vnode->lock_type = type;
556 	afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
557 	spin_unlock(&vnode->lock);
558 
559 	ret = afs_set_lock(vnode, key, type); /* RPC */
560 
561 	spin_lock(&vnode->lock);
562 	switch (ret) {
563 	case -EKEYREJECTED:
564 	case -EKEYEXPIRED:
565 	case -EKEYREVOKED:
566 	case -EPERM:
567 	case -EACCES:
568 		fl->fl_u.afs.state = ret;
569 		trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
570 		list_del_init(&fl->fl_u.afs.link);
571 		afs_next_locker(vnode, ret);
572 		goto error_unlock;
573 
574 	case -ENOENT:
575 		fl->fl_u.afs.state = ret;
576 		trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
577 		list_del_init(&fl->fl_u.afs.link);
578 		afs_kill_lockers_enoent(vnode);
579 		goto error_unlock;
580 
581 	default:
582 		fl->fl_u.afs.state = ret;
583 		trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
584 		list_del_init(&fl->fl_u.afs.link);
585 		afs_next_locker(vnode, 0);
586 		goto error_unlock;
587 
588 	case -EWOULDBLOCK:
589 		/* The server doesn't have a lock-waiting queue, so the client
590 		 * will have to retry.  The server will break the outstanding
591 		 * callbacks on a file when a lock is released.
592 		 */
593 		ASSERT(list_empty(&vnode->granted_locks));
594 		ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
595 		goto lock_is_contended;
596 
597 	case 0:
598 		afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
599 		trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
600 		afs_grant_locks(vnode);
601 		goto vnode_is_locked_u;
602 	}
603 
604 vnode_is_locked_u:
605 	spin_unlock(&vnode->lock);
606 vnode_is_locked:
607 	/* the lock has been granted by the server... */
608 	ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
609 
610 skip_server_lock:
611 	/* ... but the VFS still needs to distribute access on this client. */
612 	trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
613 	ret = locks_lock_file_wait(file, fl);
614 	trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
615 	if (ret < 0)
616 		goto vfs_rejected_lock;
617 
618 	/* Again, make sure we've got a callback on this file and, again, make
619 	 * sure that our view of the data version is up to date (we ignore
620 	 * errors incurred here and deal with the consequences elsewhere).
621 	 */
622 	afs_validate(vnode, key);
623 	_leave(" = 0");
624 	return 0;
625 
626 lock_is_contended:
627 	if (!(fl->fl_flags & FL_SLEEP)) {
628 		list_del_init(&fl->fl_u.afs.link);
629 		afs_next_locker(vnode, 0);
630 		ret = -EAGAIN;
631 		goto error_unlock;
632 	}
633 
634 	afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
635 	trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
636 	queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
637 
638 need_to_wait:
639 	/* We're going to have to wait.  Either this client doesn't have a lock
640 	 * on the server yet and we need to wait for a callback to occur, or
641 	 * the client does have a lock on the server, but it's shared and we
642 	 * need an exclusive lock.
643 	 */
644 	spin_unlock(&vnode->lock);
645 
646 	trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
647 	ret = wait_event_interruptible(fl->fl_wait,
648 				       fl->fl_u.afs.state != AFS_LOCK_PENDING);
649 	trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
650 
651 	if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
652 		spin_lock(&vnode->lock);
653 
654 		switch (fl->fl_u.afs.state) {
655 		case AFS_LOCK_YOUR_TRY:
656 			fl->fl_u.afs.state = AFS_LOCK_PENDING;
657 			goto try_to_lock;
658 		case AFS_LOCK_PENDING:
659 			if (ret > 0) {
660 				/* We need to retry the lock.  We may not be
661 				 * notified by the server if it just expired
662 				 * rather than being released.
663 				 */
664 				ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
665 				afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
666 				fl->fl_u.afs.state = AFS_LOCK_PENDING;
667 				goto try_to_lock;
668 			}
669 			goto error_unlock;
670 		case AFS_LOCK_GRANTED:
671 		default:
672 			break;
673 		}
674 
675 		spin_unlock(&vnode->lock);
676 	}
677 
678 	if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
679 		goto vnode_is_locked;
680 	ret = fl->fl_u.afs.state;
681 	goto error;
682 
683 vfs_rejected_lock:
684 	/* The VFS rejected the lock we just obtained, so we have to discard
685 	 * what we just got.  We defer this to the lock manager work item to
686 	 * deal with.
687 	 */
688 	_debug("vfs refused %d", ret);
689 	if (no_server_lock)
690 		goto error;
691 	spin_lock(&vnode->lock);
692 	list_del_init(&fl->fl_u.afs.link);
693 	afs_defer_unlock(vnode);
694 
695 error_unlock:
696 	spin_unlock(&vnode->lock);
697 error:
698 	_leave(" = %d", ret);
699 	return ret;
700 }
701 
702 /*
703  * unlock on a file on the server
704  */
705 static int afs_do_unlk(struct file *file, struct file_lock *fl)
706 {
707 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
708 	int ret;
709 
710 	_enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
711 
712 	trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
713 
714 	/* Flush all pending writes before doing anything with locks. */
715 	vfs_fsync(file, 0);
716 
717 	ret = locks_lock_file_wait(file, fl);
718 	_leave(" = %d [%u]", ret, vnode->lock_state);
719 	return ret;
720 }
721 
722 /*
723  * return information about a lock we currently hold, if indeed we hold one
724  */
725 static int afs_do_getlk(struct file *file, struct file_lock *fl)
726 {
727 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
728 	struct key *key = afs_file_key(file);
729 	int ret, lock_count;
730 
731 	_enter("");
732 
733 	if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
734 		return -ENOENT;
735 
736 	fl->fl_type = F_UNLCK;
737 
738 	/* check local lock records first */
739 	posix_test_lock(file, fl);
740 	if (fl->fl_type == F_UNLCK) {
741 		/* no local locks; consult the server */
742 		ret = afs_fetch_status(vnode, key, false, NULL);
743 		if (ret < 0)
744 			goto error;
745 
746 		lock_count = READ_ONCE(vnode->status.lock_count);
747 		if (lock_count != 0) {
748 			if (lock_count > 0)
749 				fl->fl_type = F_RDLCK;
750 			else
751 				fl->fl_type = F_WRLCK;
752 			fl->fl_start = 0;
753 			fl->fl_end = OFFSET_MAX;
754 			fl->fl_pid = 0;
755 		}
756 	}
757 
758 	ret = 0;
759 error:
760 	_leave(" = %d [%hd]", ret, fl->fl_type);
761 	return ret;
762 }
763 
764 /*
765  * manage POSIX locks on a file
766  */
767 int afs_lock(struct file *file, int cmd, struct file_lock *fl)
768 {
769 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
770 	enum afs_flock_operation op;
771 	int ret;
772 
773 	_enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
774 	       vnode->fid.vid, vnode->fid.vnode, cmd,
775 	       fl->fl_type, fl->fl_flags,
776 	       (long long) fl->fl_start, (long long) fl->fl_end);
777 
778 	/* AFS doesn't support mandatory locks */
779 	if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK)
780 		return -ENOLCK;
781 
782 	if (IS_GETLK(cmd))
783 		return afs_do_getlk(file, fl);
784 
785 	fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
786 	trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
787 
788 	if (fl->fl_type == F_UNLCK)
789 		ret = afs_do_unlk(file, fl);
790 	else
791 		ret = afs_do_setlk(file, fl);
792 
793 	switch (ret) {
794 	case 0:		op = afs_flock_op_return_ok; break;
795 	case -EAGAIN:	op = afs_flock_op_return_eagain; break;
796 	case -EDEADLK:	op = afs_flock_op_return_edeadlk; break;
797 	default:	op = afs_flock_op_return_error; break;
798 	}
799 	trace_afs_flock_op(vnode, fl, op);
800 	return ret;
801 }
802 
803 /*
804  * manage FLOCK locks on a file
805  */
806 int afs_flock(struct file *file, int cmd, struct file_lock *fl)
807 {
808 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
809 	enum afs_flock_operation op;
810 	int ret;
811 
812 	_enter("{%llx:%llu},%d,{t=%x,fl=%x}",
813 	       vnode->fid.vid, vnode->fid.vnode, cmd,
814 	       fl->fl_type, fl->fl_flags);
815 
816 	/*
817 	 * No BSD flocks over NFS allowed.
818 	 * Note: we could try to fake a POSIX lock request here by
819 	 * using ((u32) filp | 0x80000000) or some such as the pid.
820 	 * Not sure whether that would be unique, though, or whether
821 	 * that would break in other places.
822 	 */
823 	if (!(fl->fl_flags & FL_FLOCK))
824 		return -ENOLCK;
825 
826 	fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
827 	trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
828 
829 	/* we're simulating flock() locks using posix locks on the server */
830 	if (fl->fl_type == F_UNLCK)
831 		ret = afs_do_unlk(file, fl);
832 	else
833 		ret = afs_do_setlk(file, fl);
834 
835 	switch (ret) {
836 	case 0:		op = afs_flock_op_return_ok; break;
837 	case -EAGAIN:	op = afs_flock_op_return_eagain; break;
838 	case -EDEADLK:	op = afs_flock_op_return_edeadlk; break;
839 	default:	op = afs_flock_op_return_error; break;
840 	}
841 	trace_afs_flock_op(vnode, fl, op);
842 	return ret;
843 }
844 
845 /*
846  * the POSIX lock management core VFS code copies the lock record and adds the
847  * copy into its own list, so we need to add that copy to the vnode's lock
848  * queue in the same place as the original (which will be deleted shortly
849  * after)
850  */
851 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
852 {
853 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
854 
855 	_enter("");
856 
857 	new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
858 
859 	spin_lock(&vnode->lock);
860 	trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
861 	list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
862 	spin_unlock(&vnode->lock);
863 }
864 
865 /*
866  * need to remove this lock from the vnode queue when it's removed from the
867  * VFS's list
868  */
869 static void afs_fl_release_private(struct file_lock *fl)
870 {
871 	struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
872 
873 	_enter("");
874 
875 	spin_lock(&vnode->lock);
876 
877 	trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
878 	list_del_init(&fl->fl_u.afs.link);
879 	if (list_empty(&vnode->granted_locks))
880 		afs_defer_unlock(vnode);
881 
882 	_debug("state %u for %p", vnode->lock_state, vnode);
883 	spin_unlock(&vnode->lock);
884 }
885