xref: /openbmc/linux/fs/ceph/caps.c (revision 6a613ac6)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/fs.h>
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/wait.h>
9 #include <linux/writeback.h>
10 
11 #include "super.h"
12 #include "mds_client.h"
13 #include "cache.h"
14 #include <linux/ceph/decode.h>
15 #include <linux/ceph/messenger.h>
16 
17 /*
18  * Capability management
19  *
20  * The Ceph metadata servers control client access to inode metadata
21  * and file data by issuing capabilities, granting clients permission
22  * to read and/or write both inode field and file data to OSDs
23  * (storage nodes).  Each capability consists of a set of bits
24  * indicating which operations are allowed.
25  *
26  * If the client holds a *_SHARED cap, the client has a coherent value
27  * that can be safely read from the cached inode.
28  *
29  * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
30  * client is allowed to change inode attributes (e.g., file size,
31  * mtime), note its dirty state in the ceph_cap, and asynchronously
32  * flush that metadata change to the MDS.
33  *
34  * In the event of a conflicting operation (perhaps by another
35  * client), the MDS will revoke the conflicting client capabilities.
36  *
37  * In order for a client to cache an inode, it must hold a capability
38  * with at least one MDS server.  When inodes are released, release
39  * notifications are batched and periodically sent en masse to the MDS
40  * cluster to release server state.
41  */
42 
43 
44 /*
45  * Generate readable cap strings for debugging output.
46  */
47 #define MAX_CAP_STR 20
48 static char cap_str[MAX_CAP_STR][40];
49 static DEFINE_SPINLOCK(cap_str_lock);
50 static int last_cap_str;
51 
52 static char *gcap_string(char *s, int c)
53 {
54 	if (c & CEPH_CAP_GSHARED)
55 		*s++ = 's';
56 	if (c & CEPH_CAP_GEXCL)
57 		*s++ = 'x';
58 	if (c & CEPH_CAP_GCACHE)
59 		*s++ = 'c';
60 	if (c & CEPH_CAP_GRD)
61 		*s++ = 'r';
62 	if (c & CEPH_CAP_GWR)
63 		*s++ = 'w';
64 	if (c & CEPH_CAP_GBUFFER)
65 		*s++ = 'b';
66 	if (c & CEPH_CAP_GLAZYIO)
67 		*s++ = 'l';
68 	return s;
69 }
70 
71 const char *ceph_cap_string(int caps)
72 {
73 	int i;
74 	char *s;
75 	int c;
76 
77 	spin_lock(&cap_str_lock);
78 	i = last_cap_str++;
79 	if (last_cap_str == MAX_CAP_STR)
80 		last_cap_str = 0;
81 	spin_unlock(&cap_str_lock);
82 
83 	s = cap_str[i];
84 
85 	if (caps & CEPH_CAP_PIN)
86 		*s++ = 'p';
87 
88 	c = (caps >> CEPH_CAP_SAUTH) & 3;
89 	if (c) {
90 		*s++ = 'A';
91 		s = gcap_string(s, c);
92 	}
93 
94 	c = (caps >> CEPH_CAP_SLINK) & 3;
95 	if (c) {
96 		*s++ = 'L';
97 		s = gcap_string(s, c);
98 	}
99 
100 	c = (caps >> CEPH_CAP_SXATTR) & 3;
101 	if (c) {
102 		*s++ = 'X';
103 		s = gcap_string(s, c);
104 	}
105 
106 	c = caps >> CEPH_CAP_SFILE;
107 	if (c) {
108 		*s++ = 'F';
109 		s = gcap_string(s, c);
110 	}
111 
112 	if (s == cap_str[i])
113 		*s++ = '-';
114 	*s = 0;
115 	return cap_str[i];
116 }
117 
118 void ceph_caps_init(struct ceph_mds_client *mdsc)
119 {
120 	INIT_LIST_HEAD(&mdsc->caps_list);
121 	spin_lock_init(&mdsc->caps_list_lock);
122 }
123 
124 void ceph_caps_finalize(struct ceph_mds_client *mdsc)
125 {
126 	struct ceph_cap *cap;
127 
128 	spin_lock(&mdsc->caps_list_lock);
129 	while (!list_empty(&mdsc->caps_list)) {
130 		cap = list_first_entry(&mdsc->caps_list,
131 				       struct ceph_cap, caps_item);
132 		list_del(&cap->caps_item);
133 		kmem_cache_free(ceph_cap_cachep, cap);
134 	}
135 	mdsc->caps_total_count = 0;
136 	mdsc->caps_avail_count = 0;
137 	mdsc->caps_use_count = 0;
138 	mdsc->caps_reserve_count = 0;
139 	mdsc->caps_min_count = 0;
140 	spin_unlock(&mdsc->caps_list_lock);
141 }
142 
143 void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
144 {
145 	spin_lock(&mdsc->caps_list_lock);
146 	mdsc->caps_min_count += delta;
147 	BUG_ON(mdsc->caps_min_count < 0);
148 	spin_unlock(&mdsc->caps_list_lock);
149 }
150 
151 void ceph_reserve_caps(struct ceph_mds_client *mdsc,
152 		      struct ceph_cap_reservation *ctx, int need)
153 {
154 	int i;
155 	struct ceph_cap *cap;
156 	int have;
157 	int alloc = 0;
158 	LIST_HEAD(newcaps);
159 
160 	dout("reserve caps ctx=%p need=%d\n", ctx, need);
161 
162 	/* first reserve any caps that are already allocated */
163 	spin_lock(&mdsc->caps_list_lock);
164 	if (mdsc->caps_avail_count >= need)
165 		have = need;
166 	else
167 		have = mdsc->caps_avail_count;
168 	mdsc->caps_avail_count -= have;
169 	mdsc->caps_reserve_count += have;
170 	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
171 					 mdsc->caps_reserve_count +
172 					 mdsc->caps_avail_count);
173 	spin_unlock(&mdsc->caps_list_lock);
174 
175 	for (i = have; i < need; i++) {
176 		cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
177 		if (!cap)
178 			break;
179 		list_add(&cap->caps_item, &newcaps);
180 		alloc++;
181 	}
182 	/* we didn't manage to reserve as much as we needed */
183 	if (have + alloc != need)
184 		pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
185 			ctx, need, have + alloc);
186 
187 	spin_lock(&mdsc->caps_list_lock);
188 	mdsc->caps_total_count += alloc;
189 	mdsc->caps_reserve_count += alloc;
190 	list_splice(&newcaps, &mdsc->caps_list);
191 
192 	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
193 					 mdsc->caps_reserve_count +
194 					 mdsc->caps_avail_count);
195 	spin_unlock(&mdsc->caps_list_lock);
196 
197 	ctx->count = need;
198 	dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
199 	     ctx, mdsc->caps_total_count, mdsc->caps_use_count,
200 	     mdsc->caps_reserve_count, mdsc->caps_avail_count);
201 }
202 
203 int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
204 			struct ceph_cap_reservation *ctx)
205 {
206 	dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
207 	if (ctx->count) {
208 		spin_lock(&mdsc->caps_list_lock);
209 		BUG_ON(mdsc->caps_reserve_count < ctx->count);
210 		mdsc->caps_reserve_count -= ctx->count;
211 		mdsc->caps_avail_count += ctx->count;
212 		ctx->count = 0;
213 		dout("unreserve caps %d = %d used + %d resv + %d avail\n",
214 		     mdsc->caps_total_count, mdsc->caps_use_count,
215 		     mdsc->caps_reserve_count, mdsc->caps_avail_count);
216 		BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
217 						 mdsc->caps_reserve_count +
218 						 mdsc->caps_avail_count);
219 		spin_unlock(&mdsc->caps_list_lock);
220 	}
221 	return 0;
222 }
223 
224 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
225 			      struct ceph_cap_reservation *ctx)
226 {
227 	struct ceph_cap *cap = NULL;
228 
229 	/* temporary, until we do something about cap import/export */
230 	if (!ctx) {
231 		cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
232 		if (cap) {
233 			spin_lock(&mdsc->caps_list_lock);
234 			mdsc->caps_use_count++;
235 			mdsc->caps_total_count++;
236 			spin_unlock(&mdsc->caps_list_lock);
237 		}
238 		return cap;
239 	}
240 
241 	spin_lock(&mdsc->caps_list_lock);
242 	dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
243 	     ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
244 	     mdsc->caps_reserve_count, mdsc->caps_avail_count);
245 	BUG_ON(!ctx->count);
246 	BUG_ON(ctx->count > mdsc->caps_reserve_count);
247 	BUG_ON(list_empty(&mdsc->caps_list));
248 
249 	ctx->count--;
250 	mdsc->caps_reserve_count--;
251 	mdsc->caps_use_count++;
252 
253 	cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
254 	list_del(&cap->caps_item);
255 
256 	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
257 	       mdsc->caps_reserve_count + mdsc->caps_avail_count);
258 	spin_unlock(&mdsc->caps_list_lock);
259 	return cap;
260 }
261 
262 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
263 {
264 	spin_lock(&mdsc->caps_list_lock);
265 	dout("put_cap %p %d = %d used + %d resv + %d avail\n",
266 	     cap, mdsc->caps_total_count, mdsc->caps_use_count,
267 	     mdsc->caps_reserve_count, mdsc->caps_avail_count);
268 	mdsc->caps_use_count--;
269 	/*
270 	 * Keep some preallocated caps around (ceph_min_count), to
271 	 * avoid lots of free/alloc churn.
272 	 */
273 	if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
274 				      mdsc->caps_min_count) {
275 		mdsc->caps_total_count--;
276 		kmem_cache_free(ceph_cap_cachep, cap);
277 	} else {
278 		mdsc->caps_avail_count++;
279 		list_add(&cap->caps_item, &mdsc->caps_list);
280 	}
281 
282 	BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
283 	       mdsc->caps_reserve_count + mdsc->caps_avail_count);
284 	spin_unlock(&mdsc->caps_list_lock);
285 }
286 
287 void ceph_reservation_status(struct ceph_fs_client *fsc,
288 			     int *total, int *avail, int *used, int *reserved,
289 			     int *min)
290 {
291 	struct ceph_mds_client *mdsc = fsc->mdsc;
292 
293 	if (total)
294 		*total = mdsc->caps_total_count;
295 	if (avail)
296 		*avail = mdsc->caps_avail_count;
297 	if (used)
298 		*used = mdsc->caps_use_count;
299 	if (reserved)
300 		*reserved = mdsc->caps_reserve_count;
301 	if (min)
302 		*min = mdsc->caps_min_count;
303 }
304 
305 /*
306  * Find ceph_cap for given mds, if any.
307  *
308  * Called with i_ceph_lock held.
309  */
310 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
311 {
312 	struct ceph_cap *cap;
313 	struct rb_node *n = ci->i_caps.rb_node;
314 
315 	while (n) {
316 		cap = rb_entry(n, struct ceph_cap, ci_node);
317 		if (mds < cap->mds)
318 			n = n->rb_left;
319 		else if (mds > cap->mds)
320 			n = n->rb_right;
321 		else
322 			return cap;
323 	}
324 	return NULL;
325 }
326 
327 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
328 {
329 	struct ceph_cap *cap;
330 
331 	spin_lock(&ci->i_ceph_lock);
332 	cap = __get_cap_for_mds(ci, mds);
333 	spin_unlock(&ci->i_ceph_lock);
334 	return cap;
335 }
336 
337 /*
338  * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
339  */
340 static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
341 {
342 	struct ceph_cap *cap;
343 	int mds = -1;
344 	struct rb_node *p;
345 
346 	/* prefer mds with WR|BUFFER|EXCL caps */
347 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
348 		cap = rb_entry(p, struct ceph_cap, ci_node);
349 		mds = cap->mds;
350 		if (cap->issued & (CEPH_CAP_FILE_WR |
351 				   CEPH_CAP_FILE_BUFFER |
352 				   CEPH_CAP_FILE_EXCL))
353 			break;
354 	}
355 	return mds;
356 }
357 
358 int ceph_get_cap_mds(struct inode *inode)
359 {
360 	struct ceph_inode_info *ci = ceph_inode(inode);
361 	int mds;
362 	spin_lock(&ci->i_ceph_lock);
363 	mds = __ceph_get_cap_mds(ceph_inode(inode));
364 	spin_unlock(&ci->i_ceph_lock);
365 	return mds;
366 }
367 
368 /*
369  * Called under i_ceph_lock.
370  */
371 static void __insert_cap_node(struct ceph_inode_info *ci,
372 			      struct ceph_cap *new)
373 {
374 	struct rb_node **p = &ci->i_caps.rb_node;
375 	struct rb_node *parent = NULL;
376 	struct ceph_cap *cap = NULL;
377 
378 	while (*p) {
379 		parent = *p;
380 		cap = rb_entry(parent, struct ceph_cap, ci_node);
381 		if (new->mds < cap->mds)
382 			p = &(*p)->rb_left;
383 		else if (new->mds > cap->mds)
384 			p = &(*p)->rb_right;
385 		else
386 			BUG();
387 	}
388 
389 	rb_link_node(&new->ci_node, parent, p);
390 	rb_insert_color(&new->ci_node, &ci->i_caps);
391 }
392 
393 /*
394  * (re)set cap hold timeouts, which control the delayed release
395  * of unused caps back to the MDS.  Should be called on cap use.
396  */
397 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
398 			       struct ceph_inode_info *ci)
399 {
400 	struct ceph_mount_options *ma = mdsc->fsc->mount_options;
401 
402 	ci->i_hold_caps_min = round_jiffies(jiffies +
403 					    ma->caps_wanted_delay_min * HZ);
404 	ci->i_hold_caps_max = round_jiffies(jiffies +
405 					    ma->caps_wanted_delay_max * HZ);
406 	dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
407 	     ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
408 }
409 
410 /*
411  * (Re)queue cap at the end of the delayed cap release list.
412  *
413  * If I_FLUSH is set, leave the inode at the front of the list.
414  *
415  * Caller holds i_ceph_lock
416  *    -> we take mdsc->cap_delay_lock
417  */
418 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
419 				struct ceph_inode_info *ci)
420 {
421 	__cap_set_timeouts(mdsc, ci);
422 	dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
423 	     ci->i_ceph_flags, ci->i_hold_caps_max);
424 	if (!mdsc->stopping) {
425 		spin_lock(&mdsc->cap_delay_lock);
426 		if (!list_empty(&ci->i_cap_delay_list)) {
427 			if (ci->i_ceph_flags & CEPH_I_FLUSH)
428 				goto no_change;
429 			list_del_init(&ci->i_cap_delay_list);
430 		}
431 		list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
432 no_change:
433 		spin_unlock(&mdsc->cap_delay_lock);
434 	}
435 }
436 
437 /*
438  * Queue an inode for immediate writeback.  Mark inode with I_FLUSH,
439  * indicating we should send a cap message to flush dirty metadata
440  * asap, and move to the front of the delayed cap list.
441  */
442 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
443 				      struct ceph_inode_info *ci)
444 {
445 	dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
446 	spin_lock(&mdsc->cap_delay_lock);
447 	ci->i_ceph_flags |= CEPH_I_FLUSH;
448 	if (!list_empty(&ci->i_cap_delay_list))
449 		list_del_init(&ci->i_cap_delay_list);
450 	list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
451 	spin_unlock(&mdsc->cap_delay_lock);
452 }
453 
454 /*
455  * Cancel delayed work on cap.
456  *
457  * Caller must hold i_ceph_lock.
458  */
459 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
460 			       struct ceph_inode_info *ci)
461 {
462 	dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
463 	if (list_empty(&ci->i_cap_delay_list))
464 		return;
465 	spin_lock(&mdsc->cap_delay_lock);
466 	list_del_init(&ci->i_cap_delay_list);
467 	spin_unlock(&mdsc->cap_delay_lock);
468 }
469 
470 /*
471  * Common issue checks for add_cap, handle_cap_grant.
472  */
473 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
474 			      unsigned issued)
475 {
476 	unsigned had = __ceph_caps_issued(ci, NULL);
477 
478 	/*
479 	 * Each time we receive FILE_CACHE anew, we increment
480 	 * i_rdcache_gen.
481 	 */
482 	if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
483 	    (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
484 		ci->i_rdcache_gen++;
485 	}
486 
487 	/*
488 	 * if we are newly issued FILE_SHARED, mark dir not complete; we
489 	 * don't know what happened to this directory while we didn't
490 	 * have the cap.
491 	 */
492 	if ((issued & CEPH_CAP_FILE_SHARED) &&
493 	    (had & CEPH_CAP_FILE_SHARED) == 0) {
494 		ci->i_shared_gen++;
495 		if (S_ISDIR(ci->vfs_inode.i_mode)) {
496 			dout(" marking %p NOT complete\n", &ci->vfs_inode);
497 			__ceph_dir_clear_complete(ci);
498 		}
499 	}
500 }
501 
502 /*
503  * Add a capability under the given MDS session.
504  *
505  * Caller should hold session snap_rwsem (read) and s_mutex.
506  *
507  * @fmode is the open file mode, if we are opening a file, otherwise
508  * it is < 0.  (This is so we can atomically add the cap and add an
509  * open file reference to it.)
510  */
511 void ceph_add_cap(struct inode *inode,
512 		  struct ceph_mds_session *session, u64 cap_id,
513 		  int fmode, unsigned issued, unsigned wanted,
514 		  unsigned seq, unsigned mseq, u64 realmino, int flags,
515 		  struct ceph_cap **new_cap)
516 {
517 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
518 	struct ceph_inode_info *ci = ceph_inode(inode);
519 	struct ceph_cap *cap;
520 	int mds = session->s_mds;
521 	int actual_wanted;
522 
523 	dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
524 	     session->s_mds, cap_id, ceph_cap_string(issued), seq);
525 
526 	/*
527 	 * If we are opening the file, include file mode wanted bits
528 	 * in wanted.
529 	 */
530 	if (fmode >= 0)
531 		wanted |= ceph_caps_for_mode(fmode);
532 
533 	cap = __get_cap_for_mds(ci, mds);
534 	if (!cap) {
535 		cap = *new_cap;
536 		*new_cap = NULL;
537 
538 		cap->issued = 0;
539 		cap->implemented = 0;
540 		cap->mds = mds;
541 		cap->mds_wanted = 0;
542 		cap->mseq = 0;
543 
544 		cap->ci = ci;
545 		__insert_cap_node(ci, cap);
546 
547 		/* add to session cap list */
548 		cap->session = session;
549 		spin_lock(&session->s_cap_lock);
550 		list_add_tail(&cap->session_caps, &session->s_caps);
551 		session->s_nr_caps++;
552 		spin_unlock(&session->s_cap_lock);
553 	} else {
554 		/*
555 		 * auth mds of the inode changed. we received the cap export
556 		 * message, but still haven't received the cap import message.
557 		 * handle_cap_export() updated the new auth MDS' cap.
558 		 *
559 		 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing
560 		 * a message that was send before the cap import message. So
561 		 * don't remove caps.
562 		 */
563 		if (ceph_seq_cmp(seq, cap->seq) <= 0) {
564 			WARN_ON(cap != ci->i_auth_cap);
565 			WARN_ON(cap->cap_id != cap_id);
566 			seq = cap->seq;
567 			mseq = cap->mseq;
568 			issued |= cap->issued;
569 			flags |= CEPH_CAP_FLAG_AUTH;
570 		}
571 	}
572 
573 	if (!ci->i_snap_realm) {
574 		/*
575 		 * add this inode to the appropriate snap realm
576 		 */
577 		struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
578 							       realmino);
579 		if (realm) {
580 			spin_lock(&realm->inodes_with_caps_lock);
581 			ci->i_snap_realm = realm;
582 			list_add(&ci->i_snap_realm_item,
583 				 &realm->inodes_with_caps);
584 			spin_unlock(&realm->inodes_with_caps_lock);
585 		} else {
586 			pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
587 			       realmino);
588 			WARN_ON(!realm);
589 		}
590 	}
591 
592 	__check_cap_issue(ci, cap, issued);
593 
594 	/*
595 	 * If we are issued caps we don't want, or the mds' wanted
596 	 * value appears to be off, queue a check so we'll release
597 	 * later and/or update the mds wanted value.
598 	 */
599 	actual_wanted = __ceph_caps_wanted(ci);
600 	if ((wanted & ~actual_wanted) ||
601 	    (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
602 		dout(" issued %s, mds wanted %s, actual %s, queueing\n",
603 		     ceph_cap_string(issued), ceph_cap_string(wanted),
604 		     ceph_cap_string(actual_wanted));
605 		__cap_delay_requeue(mdsc, ci);
606 	}
607 
608 	if (flags & CEPH_CAP_FLAG_AUTH) {
609 		if (ci->i_auth_cap == NULL ||
610 		    ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
611 			ci->i_auth_cap = cap;
612 			cap->mds_wanted = wanted;
613 		}
614 	} else {
615 		WARN_ON(ci->i_auth_cap == cap);
616 	}
617 
618 	dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
619 	     inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
620 	     ceph_cap_string(issued|cap->issued), seq, mds);
621 	cap->cap_id = cap_id;
622 	cap->issued = issued;
623 	cap->implemented |= issued;
624 	if (ceph_seq_cmp(mseq, cap->mseq) > 0)
625 		cap->mds_wanted = wanted;
626 	else
627 		cap->mds_wanted |= wanted;
628 	cap->seq = seq;
629 	cap->issue_seq = seq;
630 	cap->mseq = mseq;
631 	cap->cap_gen = session->s_cap_gen;
632 
633 	if (fmode >= 0)
634 		__ceph_get_fmode(ci, fmode);
635 }
636 
637 /*
638  * Return true if cap has not timed out and belongs to the current
639  * generation of the MDS session (i.e. has not gone 'stale' due to
640  * us losing touch with the mds).
641  */
642 static int __cap_is_valid(struct ceph_cap *cap)
643 {
644 	unsigned long ttl;
645 	u32 gen;
646 
647 	spin_lock(&cap->session->s_gen_ttl_lock);
648 	gen = cap->session->s_cap_gen;
649 	ttl = cap->session->s_cap_ttl;
650 	spin_unlock(&cap->session->s_gen_ttl_lock);
651 
652 	if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
653 		dout("__cap_is_valid %p cap %p issued %s "
654 		     "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
655 		     cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
656 		return 0;
657 	}
658 
659 	return 1;
660 }
661 
662 /*
663  * Return set of valid cap bits issued to us.  Note that caps time
664  * out, and may be invalidated in bulk if the client session times out
665  * and session->s_cap_gen is bumped.
666  */
667 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
668 {
669 	int have = ci->i_snap_caps;
670 	struct ceph_cap *cap;
671 	struct rb_node *p;
672 
673 	if (implemented)
674 		*implemented = 0;
675 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
676 		cap = rb_entry(p, struct ceph_cap, ci_node);
677 		if (!__cap_is_valid(cap))
678 			continue;
679 		dout("__ceph_caps_issued %p cap %p issued %s\n",
680 		     &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
681 		have |= cap->issued;
682 		if (implemented)
683 			*implemented |= cap->implemented;
684 	}
685 	/*
686 	 * exclude caps issued by non-auth MDS, but are been revoking
687 	 * by the auth MDS. The non-auth MDS should be revoking/exporting
688 	 * these caps, but the message is delayed.
689 	 */
690 	if (ci->i_auth_cap) {
691 		cap = ci->i_auth_cap;
692 		have &= ~cap->implemented | cap->issued;
693 	}
694 	return have;
695 }
696 
697 /*
698  * Get cap bits issued by caps other than @ocap
699  */
700 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
701 {
702 	int have = ci->i_snap_caps;
703 	struct ceph_cap *cap;
704 	struct rb_node *p;
705 
706 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
707 		cap = rb_entry(p, struct ceph_cap, ci_node);
708 		if (cap == ocap)
709 			continue;
710 		if (!__cap_is_valid(cap))
711 			continue;
712 		have |= cap->issued;
713 	}
714 	return have;
715 }
716 
717 /*
718  * Move a cap to the end of the LRU (oldest caps at list head, newest
719  * at list tail).
720  */
721 static void __touch_cap(struct ceph_cap *cap)
722 {
723 	struct ceph_mds_session *s = cap->session;
724 
725 	spin_lock(&s->s_cap_lock);
726 	if (s->s_cap_iterator == NULL) {
727 		dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
728 		     s->s_mds);
729 		list_move_tail(&cap->session_caps, &s->s_caps);
730 	} else {
731 		dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
732 		     &cap->ci->vfs_inode, cap, s->s_mds);
733 	}
734 	spin_unlock(&s->s_cap_lock);
735 }
736 
737 /*
738  * Check if we hold the given mask.  If so, move the cap(s) to the
739  * front of their respective LRUs.  (This is the preferred way for
740  * callers to check for caps they want.)
741  */
742 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
743 {
744 	struct ceph_cap *cap;
745 	struct rb_node *p;
746 	int have = ci->i_snap_caps;
747 
748 	if ((have & mask) == mask) {
749 		dout("__ceph_caps_issued_mask %p snap issued %s"
750 		     " (mask %s)\n", &ci->vfs_inode,
751 		     ceph_cap_string(have),
752 		     ceph_cap_string(mask));
753 		return 1;
754 	}
755 
756 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
757 		cap = rb_entry(p, struct ceph_cap, ci_node);
758 		if (!__cap_is_valid(cap))
759 			continue;
760 		if ((cap->issued & mask) == mask) {
761 			dout("__ceph_caps_issued_mask %p cap %p issued %s"
762 			     " (mask %s)\n", &ci->vfs_inode, cap,
763 			     ceph_cap_string(cap->issued),
764 			     ceph_cap_string(mask));
765 			if (touch)
766 				__touch_cap(cap);
767 			return 1;
768 		}
769 
770 		/* does a combination of caps satisfy mask? */
771 		have |= cap->issued;
772 		if ((have & mask) == mask) {
773 			dout("__ceph_caps_issued_mask %p combo issued %s"
774 			     " (mask %s)\n", &ci->vfs_inode,
775 			     ceph_cap_string(cap->issued),
776 			     ceph_cap_string(mask));
777 			if (touch) {
778 				struct rb_node *q;
779 
780 				/* touch this + preceding caps */
781 				__touch_cap(cap);
782 				for (q = rb_first(&ci->i_caps); q != p;
783 				     q = rb_next(q)) {
784 					cap = rb_entry(q, struct ceph_cap,
785 						       ci_node);
786 					if (!__cap_is_valid(cap))
787 						continue;
788 					__touch_cap(cap);
789 				}
790 			}
791 			return 1;
792 		}
793 	}
794 
795 	return 0;
796 }
797 
798 /*
799  * Return true if mask caps are currently being revoked by an MDS.
800  */
801 int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
802 			       struct ceph_cap *ocap, int mask)
803 {
804 	struct ceph_cap *cap;
805 	struct rb_node *p;
806 
807 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
808 		cap = rb_entry(p, struct ceph_cap, ci_node);
809 		if (cap != ocap &&
810 		    (cap->implemented & ~cap->issued & mask))
811 			return 1;
812 	}
813 	return 0;
814 }
815 
816 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
817 {
818 	struct inode *inode = &ci->vfs_inode;
819 	int ret;
820 
821 	spin_lock(&ci->i_ceph_lock);
822 	ret = __ceph_caps_revoking_other(ci, NULL, mask);
823 	spin_unlock(&ci->i_ceph_lock);
824 	dout("ceph_caps_revoking %p %s = %d\n", inode,
825 	     ceph_cap_string(mask), ret);
826 	return ret;
827 }
828 
829 int __ceph_caps_used(struct ceph_inode_info *ci)
830 {
831 	int used = 0;
832 	if (ci->i_pin_ref)
833 		used |= CEPH_CAP_PIN;
834 	if (ci->i_rd_ref)
835 		used |= CEPH_CAP_FILE_RD;
836 	if (ci->i_rdcache_ref ||
837 	    (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
838 	     ci->vfs_inode.i_data.nrpages))
839 		used |= CEPH_CAP_FILE_CACHE;
840 	if (ci->i_wr_ref)
841 		used |= CEPH_CAP_FILE_WR;
842 	if (ci->i_wb_ref || ci->i_wrbuffer_ref)
843 		used |= CEPH_CAP_FILE_BUFFER;
844 	return used;
845 }
846 
847 /*
848  * wanted, by virtue of open file modes
849  */
850 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
851 {
852 	int want = 0;
853 	int mode;
854 	for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
855 		if (ci->i_nr_by_mode[mode])
856 			want |= ceph_caps_for_mode(mode);
857 	return want;
858 }
859 
860 /*
861  * Return caps we have registered with the MDS(s) as 'wanted'.
862  */
863 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
864 {
865 	struct ceph_cap *cap;
866 	struct rb_node *p;
867 	int mds_wanted = 0;
868 
869 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
870 		cap = rb_entry(p, struct ceph_cap, ci_node);
871 		if (!__cap_is_valid(cap))
872 			continue;
873 		if (cap == ci->i_auth_cap)
874 			mds_wanted |= cap->mds_wanted;
875 		else
876 			mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR);
877 	}
878 	return mds_wanted;
879 }
880 
881 /*
882  * called under i_ceph_lock
883  */
884 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
885 {
886 	return !RB_EMPTY_ROOT(&ci->i_caps);
887 }
888 
889 int ceph_is_any_caps(struct inode *inode)
890 {
891 	struct ceph_inode_info *ci = ceph_inode(inode);
892 	int ret;
893 
894 	spin_lock(&ci->i_ceph_lock);
895 	ret = __ceph_is_any_caps(ci);
896 	spin_unlock(&ci->i_ceph_lock);
897 
898 	return ret;
899 }
900 
901 static void drop_inode_snap_realm(struct ceph_inode_info *ci)
902 {
903 	struct ceph_snap_realm *realm = ci->i_snap_realm;
904 	spin_lock(&realm->inodes_with_caps_lock);
905 	list_del_init(&ci->i_snap_realm_item);
906 	ci->i_snap_realm_counter++;
907 	ci->i_snap_realm = NULL;
908 	spin_unlock(&realm->inodes_with_caps_lock);
909 	ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
910 			    realm);
911 }
912 
913 /*
914  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
915  *
916  * caller should hold i_ceph_lock.
917  * caller will not hold session s_mutex if called from destroy_inode.
918  */
919 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
920 {
921 	struct ceph_mds_session *session = cap->session;
922 	struct ceph_inode_info *ci = cap->ci;
923 	struct ceph_mds_client *mdsc =
924 		ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
925 	int removed = 0;
926 
927 	dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
928 
929 	/* remove from session list */
930 	spin_lock(&session->s_cap_lock);
931 	if (session->s_cap_iterator == cap) {
932 		/* not yet, we are iterating over this very cap */
933 		dout("__ceph_remove_cap  delaying %p removal from session %p\n",
934 		     cap, cap->session);
935 	} else {
936 		list_del_init(&cap->session_caps);
937 		session->s_nr_caps--;
938 		cap->session = NULL;
939 		removed = 1;
940 	}
941 	/* protect backpointer with s_cap_lock: see iterate_session_caps */
942 	cap->ci = NULL;
943 
944 	/*
945 	 * s_cap_reconnect is protected by s_cap_lock. no one changes
946 	 * s_cap_gen while session is in the reconnect state.
947 	 */
948 	if (queue_release &&
949 	    (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
950 		cap->queue_release = 1;
951 		if (removed) {
952 			list_add_tail(&cap->session_caps,
953 				      &session->s_cap_releases);
954 			session->s_num_cap_releases++;
955 			removed = 0;
956 		}
957 	} else {
958 		cap->queue_release = 0;
959 	}
960 	cap->cap_ino = ci->i_vino.ino;
961 
962 	spin_unlock(&session->s_cap_lock);
963 
964 	/* remove from inode list */
965 	rb_erase(&cap->ci_node, &ci->i_caps);
966 	if (ci->i_auth_cap == cap)
967 		ci->i_auth_cap = NULL;
968 
969 	if (removed)
970 		ceph_put_cap(mdsc, cap);
971 
972 	/* when reconnect denied, we remove session caps forcibly,
973 	 * i_wr_ref can be non-zero. If there are ongoing write,
974 	 * keep i_snap_realm.
975 	 */
976 	if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
977 		drop_inode_snap_realm(ci);
978 
979 	if (!__ceph_is_any_real_caps(ci))
980 		__cap_delay_cancel(mdsc, ci);
981 }
982 
983 /*
984  * Build and send a cap message to the given MDS.
985  *
986  * Caller should be holding s_mutex.
987  */
988 static int send_cap_msg(struct ceph_mds_session *session,
989 			u64 ino, u64 cid, int op,
990 			int caps, int wanted, int dirty,
991 			u32 seq, u64 flush_tid, u64 oldest_flush_tid,
992 			u32 issue_seq, u32 mseq, u64 size, u64 max_size,
993 			struct timespec *mtime, struct timespec *atime,
994 			u64 time_warp_seq,
995 			kuid_t uid, kgid_t gid, umode_t mode,
996 			u64 xattr_version,
997 			struct ceph_buffer *xattrs_buf,
998 			u64 follows, bool inline_data)
999 {
1000 	struct ceph_mds_caps *fc;
1001 	struct ceph_msg *msg;
1002 	void *p;
1003 	size_t extra_len;
1004 
1005 	dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
1006 	     " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
1007 	     " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
1008 	     cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
1009 	     ceph_cap_string(dirty),
1010 	     seq, issue_seq, flush_tid, oldest_flush_tid,
1011 	     mseq, follows, size, max_size,
1012 	     xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
1013 
1014 	/* flock buffer size + inline version + inline data size +
1015 	 * osd_epoch_barrier + oldest_flush_tid */
1016 	extra_len = 4 + 8 + 4 + 4 + 8;
1017 	msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
1018 			   GFP_NOFS, false);
1019 	if (!msg)
1020 		return -ENOMEM;
1021 
1022 	msg->hdr.version = cpu_to_le16(6);
1023 	msg->hdr.tid = cpu_to_le64(flush_tid);
1024 
1025 	fc = msg->front.iov_base;
1026 	memset(fc, 0, sizeof(*fc));
1027 
1028 	fc->cap_id = cpu_to_le64(cid);
1029 	fc->op = cpu_to_le32(op);
1030 	fc->seq = cpu_to_le32(seq);
1031 	fc->issue_seq = cpu_to_le32(issue_seq);
1032 	fc->migrate_seq = cpu_to_le32(mseq);
1033 	fc->caps = cpu_to_le32(caps);
1034 	fc->wanted = cpu_to_le32(wanted);
1035 	fc->dirty = cpu_to_le32(dirty);
1036 	fc->ino = cpu_to_le64(ino);
1037 	fc->snap_follows = cpu_to_le64(follows);
1038 
1039 	fc->size = cpu_to_le64(size);
1040 	fc->max_size = cpu_to_le64(max_size);
1041 	if (mtime)
1042 		ceph_encode_timespec(&fc->mtime, mtime);
1043 	if (atime)
1044 		ceph_encode_timespec(&fc->atime, atime);
1045 	fc->time_warp_seq = cpu_to_le32(time_warp_seq);
1046 
1047 	fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
1048 	fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
1049 	fc->mode = cpu_to_le32(mode);
1050 
1051 	p = fc + 1;
1052 	/* flock buffer size */
1053 	ceph_encode_32(&p, 0);
1054 	/* inline version */
1055 	ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
1056 	/* inline data size */
1057 	ceph_encode_32(&p, 0);
1058 	/* osd_epoch_barrier */
1059 	ceph_encode_32(&p, 0);
1060 	/* oldest_flush_tid */
1061 	ceph_encode_64(&p, oldest_flush_tid);
1062 
1063 	fc->xattr_version = cpu_to_le64(xattr_version);
1064 	if (xattrs_buf) {
1065 		msg->middle = ceph_buffer_get(xattrs_buf);
1066 		fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1067 		msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1068 	}
1069 
1070 	ceph_con_send(&session->s_con, msg);
1071 	return 0;
1072 }
1073 
1074 /*
1075  * Queue cap releases when an inode is dropped from our cache.  Since
1076  * inode is about to be destroyed, there is no need for i_ceph_lock.
1077  */
1078 void ceph_queue_caps_release(struct inode *inode)
1079 {
1080 	struct ceph_inode_info *ci = ceph_inode(inode);
1081 	struct rb_node *p;
1082 
1083 	p = rb_first(&ci->i_caps);
1084 	while (p) {
1085 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1086 		p = rb_next(p);
1087 		__ceph_remove_cap(cap, true);
1088 	}
1089 }
1090 
1091 /*
1092  * Send a cap msg on the given inode.  Update our caps state, then
1093  * drop i_ceph_lock and send the message.
1094  *
1095  * Make note of max_size reported/requested from mds, revoked caps
1096  * that have now been implemented.
1097  *
1098  * Make half-hearted attempt ot to invalidate page cache if we are
1099  * dropping RDCACHE.  Note that this will leave behind locked pages
1100  * that we'll then need to deal with elsewhere.
1101  *
1102  * Return non-zero if delayed release, or we experienced an error
1103  * such that the caller should requeue + retry later.
1104  *
1105  * called with i_ceph_lock, then drops it.
1106  * caller should hold snap_rwsem (read), s_mutex.
1107  */
1108 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1109 		      int op, int used, int want, int retain, int flushing,
1110 		      u64 flush_tid, u64 oldest_flush_tid)
1111 	__releases(cap->ci->i_ceph_lock)
1112 {
1113 	struct ceph_inode_info *ci = cap->ci;
1114 	struct inode *inode = &ci->vfs_inode;
1115 	u64 cap_id = cap->cap_id;
1116 	int held, revoking, dropping, keep;
1117 	u64 seq, issue_seq, mseq, time_warp_seq, follows;
1118 	u64 size, max_size;
1119 	struct timespec mtime, atime;
1120 	int wake = 0;
1121 	umode_t mode;
1122 	kuid_t uid;
1123 	kgid_t gid;
1124 	struct ceph_mds_session *session;
1125 	u64 xattr_version = 0;
1126 	struct ceph_buffer *xattr_blob = NULL;
1127 	int delayed = 0;
1128 	int ret;
1129 	bool inline_data;
1130 
1131 	held = cap->issued | cap->implemented;
1132 	revoking = cap->implemented & ~cap->issued;
1133 	retain &= ~revoking;
1134 	dropping = cap->issued & ~retain;
1135 
1136 	dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1137 	     inode, cap, cap->session,
1138 	     ceph_cap_string(held), ceph_cap_string(held & retain),
1139 	     ceph_cap_string(revoking));
1140 	BUG_ON((retain & CEPH_CAP_PIN) == 0);
1141 
1142 	session = cap->session;
1143 
1144 	/* don't release wanted unless we've waited a bit. */
1145 	if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1146 	    time_before(jiffies, ci->i_hold_caps_min)) {
1147 		dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1148 		     ceph_cap_string(cap->issued),
1149 		     ceph_cap_string(cap->issued & retain),
1150 		     ceph_cap_string(cap->mds_wanted),
1151 		     ceph_cap_string(want));
1152 		want |= cap->mds_wanted;
1153 		retain |= cap->issued;
1154 		delayed = 1;
1155 	}
1156 	ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1157 
1158 	cap->issued &= retain;  /* drop bits we don't want */
1159 	if (cap->implemented & ~cap->issued) {
1160 		/*
1161 		 * Wake up any waiters on wanted -> needed transition.
1162 		 * This is due to the weird transition from buffered
1163 		 * to sync IO... we need to flush dirty pages _before_
1164 		 * allowing sync writes to avoid reordering.
1165 		 */
1166 		wake = 1;
1167 	}
1168 	cap->implemented &= cap->issued | used;
1169 	cap->mds_wanted = want;
1170 
1171 	follows = flushing ? ci->i_head_snapc->seq : 0;
1172 
1173 	keep = cap->implemented;
1174 	seq = cap->seq;
1175 	issue_seq = cap->issue_seq;
1176 	mseq = cap->mseq;
1177 	size = inode->i_size;
1178 	ci->i_reported_size = size;
1179 	max_size = ci->i_wanted_max_size;
1180 	ci->i_requested_max_size = max_size;
1181 	mtime = inode->i_mtime;
1182 	atime = inode->i_atime;
1183 	time_warp_seq = ci->i_time_warp_seq;
1184 	uid = inode->i_uid;
1185 	gid = inode->i_gid;
1186 	mode = inode->i_mode;
1187 
1188 	if (flushing & CEPH_CAP_XATTR_EXCL) {
1189 		__ceph_build_xattrs_blob(ci);
1190 		xattr_blob = ci->i_xattrs.blob;
1191 		xattr_version = ci->i_xattrs.version;
1192 	}
1193 
1194 	inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
1195 
1196 	spin_unlock(&ci->i_ceph_lock);
1197 
1198 	ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1199 		op, keep, want, flushing, seq,
1200 		flush_tid, oldest_flush_tid, issue_seq, mseq,
1201 		size, max_size, &mtime, &atime, time_warp_seq,
1202 		uid, gid, mode, xattr_version, xattr_blob,
1203 		follows, inline_data);
1204 	if (ret < 0) {
1205 		dout("error sending cap msg, must requeue %p\n", inode);
1206 		delayed = 1;
1207 	}
1208 
1209 	if (wake)
1210 		wake_up_all(&ci->i_cap_wq);
1211 
1212 	return delayed;
1213 }
1214 
1215 /*
1216  * When a snapshot is taken, clients accumulate dirty metadata on
1217  * inodes with capabilities in ceph_cap_snaps to describe the file
1218  * state at the time the snapshot was taken.  This must be flushed
1219  * asynchronously back to the MDS once sync writes complete and dirty
1220  * data is written out.
1221  *
1222  * Unless @kick is true, skip cap_snaps that were already sent to
1223  * the MDS (i.e., during this session).
1224  *
1225  * Called under i_ceph_lock.  Takes s_mutex as needed.
1226  */
1227 void __ceph_flush_snaps(struct ceph_inode_info *ci,
1228 			struct ceph_mds_session **psession,
1229 			int kick)
1230 		__releases(ci->i_ceph_lock)
1231 		__acquires(ci->i_ceph_lock)
1232 {
1233 	struct inode *inode = &ci->vfs_inode;
1234 	int mds;
1235 	struct ceph_cap_snap *capsnap;
1236 	u32 mseq;
1237 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1238 	struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1239 						    session->s_mutex */
1240 	u64 next_follows = 0;  /* keep track of how far we've gotten through the
1241 			     i_cap_snaps list, and skip these entries next time
1242 			     around to avoid an infinite loop */
1243 
1244 	if (psession)
1245 		session = *psession;
1246 
1247 	dout("__flush_snaps %p\n", inode);
1248 retry:
1249 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1250 		/* avoid an infiniute loop after retry */
1251 		if (capsnap->follows < next_follows)
1252 			continue;
1253 		/*
1254 		 * we need to wait for sync writes to complete and for dirty
1255 		 * pages to be written out.
1256 		 */
1257 		if (capsnap->dirty_pages || capsnap->writing)
1258 			break;
1259 
1260 		/* should be removed by ceph_try_drop_cap_snap() */
1261 		BUG_ON(!capsnap->need_flush);
1262 
1263 		/* pick mds, take s_mutex */
1264 		if (ci->i_auth_cap == NULL) {
1265 			dout("no auth cap (migrating?), doing nothing\n");
1266 			goto out;
1267 		}
1268 
1269 		/* only flush each capsnap once */
1270 		if (!kick && !list_empty(&capsnap->flushing_item)) {
1271 			dout("already flushed %p, skipping\n", capsnap);
1272 			continue;
1273 		}
1274 
1275 		mds = ci->i_auth_cap->session->s_mds;
1276 		mseq = ci->i_auth_cap->mseq;
1277 
1278 		if (session && session->s_mds != mds) {
1279 			dout("oops, wrong session %p mutex\n", session);
1280 			if (kick)
1281 				goto out;
1282 
1283 			mutex_unlock(&session->s_mutex);
1284 			ceph_put_mds_session(session);
1285 			session = NULL;
1286 		}
1287 		if (!session) {
1288 			spin_unlock(&ci->i_ceph_lock);
1289 			mutex_lock(&mdsc->mutex);
1290 			session = __ceph_lookup_mds_session(mdsc, mds);
1291 			mutex_unlock(&mdsc->mutex);
1292 			if (session) {
1293 				dout("inverting session/ino locks on %p\n",
1294 				     session);
1295 				mutex_lock(&session->s_mutex);
1296 			}
1297 			/*
1298 			 * if session == NULL, we raced against a cap
1299 			 * deletion or migration.  retry, and we'll
1300 			 * get a better @mds value next time.
1301 			 */
1302 			spin_lock(&ci->i_ceph_lock);
1303 			goto retry;
1304 		}
1305 
1306 		spin_lock(&mdsc->cap_dirty_lock);
1307 		capsnap->flush_tid = ++mdsc->last_cap_flush_tid;
1308 		spin_unlock(&mdsc->cap_dirty_lock);
1309 
1310 		atomic_inc(&capsnap->nref);
1311 		if (list_empty(&capsnap->flushing_item))
1312 			list_add_tail(&capsnap->flushing_item,
1313 				      &session->s_cap_snaps_flushing);
1314 		spin_unlock(&ci->i_ceph_lock);
1315 
1316 		dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
1317 		     inode, capsnap, capsnap->follows, capsnap->flush_tid);
1318 		send_cap_msg(session, ceph_vino(inode).ino, 0,
1319 			     CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1320 			     capsnap->dirty, 0, capsnap->flush_tid, 0,
1321 			     0, mseq, capsnap->size, 0,
1322 			     &capsnap->mtime, &capsnap->atime,
1323 			     capsnap->time_warp_seq,
1324 			     capsnap->uid, capsnap->gid, capsnap->mode,
1325 			     capsnap->xattr_version, capsnap->xattr_blob,
1326 			     capsnap->follows, capsnap->inline_data);
1327 
1328 		next_follows = capsnap->follows + 1;
1329 		ceph_put_cap_snap(capsnap);
1330 
1331 		spin_lock(&ci->i_ceph_lock);
1332 		goto retry;
1333 	}
1334 
1335 	/* we flushed them all; remove this inode from the queue */
1336 	spin_lock(&mdsc->snap_flush_lock);
1337 	list_del_init(&ci->i_snap_flush_item);
1338 	spin_unlock(&mdsc->snap_flush_lock);
1339 
1340 out:
1341 	if (psession)
1342 		*psession = session;
1343 	else if (session) {
1344 		mutex_unlock(&session->s_mutex);
1345 		ceph_put_mds_session(session);
1346 	}
1347 }
1348 
1349 static void ceph_flush_snaps(struct ceph_inode_info *ci)
1350 {
1351 	spin_lock(&ci->i_ceph_lock);
1352 	__ceph_flush_snaps(ci, NULL, 0);
1353 	spin_unlock(&ci->i_ceph_lock);
1354 }
1355 
1356 /*
1357  * Mark caps dirty.  If inode is newly dirty, return the dirty flags.
1358  * Caller is then responsible for calling __mark_inode_dirty with the
1359  * returned flags value.
1360  */
1361 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1362 			   struct ceph_cap_flush **pcf)
1363 {
1364 	struct ceph_mds_client *mdsc =
1365 		ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1366 	struct inode *inode = &ci->vfs_inode;
1367 	int was = ci->i_dirty_caps;
1368 	int dirty = 0;
1369 
1370 	if (!ci->i_auth_cap) {
1371 		pr_warn("__mark_dirty_caps %p %llx mask %s, "
1372 			"but no auth cap (session was closed?)\n",
1373 			inode, ceph_ino(inode), ceph_cap_string(mask));
1374 		return 0;
1375 	}
1376 
1377 	dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1378 	     ceph_cap_string(mask), ceph_cap_string(was),
1379 	     ceph_cap_string(was | mask));
1380 	ci->i_dirty_caps |= mask;
1381 	if (was == 0) {
1382 		WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1383 		swap(ci->i_prealloc_cap_flush, *pcf);
1384 
1385 		if (!ci->i_head_snapc) {
1386 			WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
1387 			ci->i_head_snapc = ceph_get_snap_context(
1388 				ci->i_snap_realm->cached_context);
1389 		}
1390 		dout(" inode %p now dirty snapc %p auth cap %p\n",
1391 		     &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1392 		BUG_ON(!list_empty(&ci->i_dirty_item));
1393 		spin_lock(&mdsc->cap_dirty_lock);
1394 		list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1395 		spin_unlock(&mdsc->cap_dirty_lock);
1396 		if (ci->i_flushing_caps == 0) {
1397 			ihold(inode);
1398 			dirty |= I_DIRTY_SYNC;
1399 		}
1400 	} else {
1401 		WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
1402 	}
1403 	BUG_ON(list_empty(&ci->i_dirty_item));
1404 	if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1405 	    (mask & CEPH_CAP_FILE_BUFFER))
1406 		dirty |= I_DIRTY_DATASYNC;
1407 	__cap_delay_requeue(mdsc, ci);
1408 	return dirty;
1409 }
1410 
1411 static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci,
1412 					struct ceph_cap_flush *cf)
1413 {
1414 	struct rb_node **p = &ci->i_cap_flush_tree.rb_node;
1415 	struct rb_node *parent = NULL;
1416 	struct ceph_cap_flush *other = NULL;
1417 
1418 	while (*p) {
1419 		parent = *p;
1420 		other = rb_entry(parent, struct ceph_cap_flush, i_node);
1421 
1422 		if (cf->tid < other->tid)
1423 			p = &(*p)->rb_left;
1424 		else if (cf->tid > other->tid)
1425 			p = &(*p)->rb_right;
1426 		else
1427 			BUG();
1428 	}
1429 
1430 	rb_link_node(&cf->i_node, parent, p);
1431 	rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree);
1432 }
1433 
1434 static void __add_cap_flushing_to_mdsc(struct ceph_mds_client *mdsc,
1435 				       struct ceph_cap_flush *cf)
1436 {
1437 	struct rb_node **p = &mdsc->cap_flush_tree.rb_node;
1438 	struct rb_node *parent = NULL;
1439 	struct ceph_cap_flush *other = NULL;
1440 
1441 	while (*p) {
1442 		parent = *p;
1443 		other = rb_entry(parent, struct ceph_cap_flush, g_node);
1444 
1445 		if (cf->tid < other->tid)
1446 			p = &(*p)->rb_left;
1447 		else if (cf->tid > other->tid)
1448 			p = &(*p)->rb_right;
1449 		else
1450 			BUG();
1451 	}
1452 
1453 	rb_link_node(&cf->g_node, parent, p);
1454 	rb_insert_color(&cf->g_node, &mdsc->cap_flush_tree);
1455 }
1456 
1457 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
1458 {
1459 	return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
1460 }
1461 
1462 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
1463 {
1464 	if (cf)
1465 		kmem_cache_free(ceph_cap_flush_cachep, cf);
1466 }
1467 
1468 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
1469 {
1470 	struct rb_node *n = rb_first(&mdsc->cap_flush_tree);
1471 	if (n) {
1472 		struct ceph_cap_flush *cf =
1473 			rb_entry(n, struct ceph_cap_flush, g_node);
1474 		return cf->tid;
1475 	}
1476 	return 0;
1477 }
1478 
1479 /*
1480  * Add dirty inode to the flushing list.  Assigned a seq number so we
1481  * can wait for caps to flush without starving.
1482  *
1483  * Called under i_ceph_lock.
1484  */
1485 static int __mark_caps_flushing(struct inode *inode,
1486 				struct ceph_mds_session *session,
1487 				u64 *flush_tid, u64 *oldest_flush_tid)
1488 {
1489 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1490 	struct ceph_inode_info *ci = ceph_inode(inode);
1491 	struct ceph_cap_flush *cf = NULL;
1492 	int flushing;
1493 
1494 	BUG_ON(ci->i_dirty_caps == 0);
1495 	BUG_ON(list_empty(&ci->i_dirty_item));
1496 	BUG_ON(!ci->i_prealloc_cap_flush);
1497 
1498 	flushing = ci->i_dirty_caps;
1499 	dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1500 	     ceph_cap_string(flushing),
1501 	     ceph_cap_string(ci->i_flushing_caps),
1502 	     ceph_cap_string(ci->i_flushing_caps | flushing));
1503 	ci->i_flushing_caps |= flushing;
1504 	ci->i_dirty_caps = 0;
1505 	dout(" inode %p now !dirty\n", inode);
1506 
1507 	swap(cf, ci->i_prealloc_cap_flush);
1508 	cf->caps = flushing;
1509 
1510 	spin_lock(&mdsc->cap_dirty_lock);
1511 	list_del_init(&ci->i_dirty_item);
1512 
1513 	cf->tid = ++mdsc->last_cap_flush_tid;
1514 	__add_cap_flushing_to_mdsc(mdsc, cf);
1515 	*oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1516 
1517 	if (list_empty(&ci->i_flushing_item)) {
1518 		list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1519 		mdsc->num_cap_flushing++;
1520 		dout(" inode %p now flushing tid %llu\n", inode, cf->tid);
1521 	} else {
1522 		list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1523 		dout(" inode %p now flushing (more) tid %llu\n",
1524 		     inode, cf->tid);
1525 	}
1526 	spin_unlock(&mdsc->cap_dirty_lock);
1527 
1528 	__add_cap_flushing_to_inode(ci, cf);
1529 
1530 	*flush_tid = cf->tid;
1531 	return flushing;
1532 }
1533 
1534 /*
1535  * try to invalidate mapping pages without blocking.
1536  */
1537 static int try_nonblocking_invalidate(struct inode *inode)
1538 {
1539 	struct ceph_inode_info *ci = ceph_inode(inode);
1540 	u32 invalidating_gen = ci->i_rdcache_gen;
1541 
1542 	spin_unlock(&ci->i_ceph_lock);
1543 	invalidate_mapping_pages(&inode->i_data, 0, -1);
1544 	spin_lock(&ci->i_ceph_lock);
1545 
1546 	if (inode->i_data.nrpages == 0 &&
1547 	    invalidating_gen == ci->i_rdcache_gen) {
1548 		/* success. */
1549 		dout("try_nonblocking_invalidate %p success\n", inode);
1550 		/* save any racing async invalidate some trouble */
1551 		ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1552 		return 0;
1553 	}
1554 	dout("try_nonblocking_invalidate %p failed\n", inode);
1555 	return -1;
1556 }
1557 
1558 /*
1559  * Swiss army knife function to examine currently used and wanted
1560  * versus held caps.  Release, flush, ack revoked caps to mds as
1561  * appropriate.
1562  *
1563  *  CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1564  *    cap release further.
1565  *  CHECK_CAPS_AUTHONLY - we should only check the auth cap
1566  *  CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1567  *    further delay.
1568  */
1569 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1570 		     struct ceph_mds_session *session)
1571 {
1572 	struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1573 	struct ceph_mds_client *mdsc = fsc->mdsc;
1574 	struct inode *inode = &ci->vfs_inode;
1575 	struct ceph_cap *cap;
1576 	u64 flush_tid, oldest_flush_tid;
1577 	int file_wanted, used, cap_used;
1578 	int took_snap_rwsem = 0;             /* true if mdsc->snap_rwsem held */
1579 	int issued, implemented, want, retain, revoking, flushing = 0;
1580 	int mds = -1;   /* keep track of how far we've gone through i_caps list
1581 			   to avoid an infinite loop on retry */
1582 	struct rb_node *p;
1583 	int tried_invalidate = 0;
1584 	int delayed = 0, sent = 0, force_requeue = 0, num;
1585 	int queue_invalidate = 0;
1586 	int is_delayed = flags & CHECK_CAPS_NODELAY;
1587 
1588 	/* if we are unmounting, flush any unused caps immediately. */
1589 	if (mdsc->stopping)
1590 		is_delayed = 1;
1591 
1592 	spin_lock(&ci->i_ceph_lock);
1593 
1594 	if (ci->i_ceph_flags & CEPH_I_FLUSH)
1595 		flags |= CHECK_CAPS_FLUSH;
1596 
1597 	/* flush snaps first time around only */
1598 	if (!list_empty(&ci->i_cap_snaps))
1599 		__ceph_flush_snaps(ci, &session, 0);
1600 	goto retry_locked;
1601 retry:
1602 	spin_lock(&ci->i_ceph_lock);
1603 retry_locked:
1604 	file_wanted = __ceph_caps_file_wanted(ci);
1605 	used = __ceph_caps_used(ci);
1606 	issued = __ceph_caps_issued(ci, &implemented);
1607 	revoking = implemented & ~issued;
1608 
1609 	want = file_wanted;
1610 	retain = file_wanted | used | CEPH_CAP_PIN;
1611 	if (!mdsc->stopping && inode->i_nlink > 0) {
1612 		if (file_wanted) {
1613 			retain |= CEPH_CAP_ANY;       /* be greedy */
1614 		} else if (S_ISDIR(inode->i_mode) &&
1615 			   (issued & CEPH_CAP_FILE_SHARED) &&
1616 			    __ceph_dir_is_complete(ci)) {
1617 			/*
1618 			 * If a directory is complete, we want to keep
1619 			 * the exclusive cap. So that MDS does not end up
1620 			 * revoking the shared cap on every create/unlink
1621 			 * operation.
1622 			 */
1623 			want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
1624 			retain |= want;
1625 		} else {
1626 
1627 			retain |= CEPH_CAP_ANY_SHARED;
1628 			/*
1629 			 * keep RD only if we didn't have the file open RW,
1630 			 * because then the mds would revoke it anyway to
1631 			 * journal max_size=0.
1632 			 */
1633 			if (ci->i_max_size == 0)
1634 				retain |= CEPH_CAP_ANY_RD;
1635 		}
1636 	}
1637 
1638 	dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1639 	     " issued %s revoking %s retain %s %s%s%s\n", inode,
1640 	     ceph_cap_string(file_wanted),
1641 	     ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1642 	     ceph_cap_string(ci->i_flushing_caps),
1643 	     ceph_cap_string(issued), ceph_cap_string(revoking),
1644 	     ceph_cap_string(retain),
1645 	     (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1646 	     (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1647 	     (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1648 
1649 	/*
1650 	 * If we no longer need to hold onto old our caps, and we may
1651 	 * have cached pages, but don't want them, then try to invalidate.
1652 	 * If we fail, it's because pages are locked.... try again later.
1653 	 */
1654 	if ((!is_delayed || mdsc->stopping) &&
1655 	    !S_ISDIR(inode->i_mode) &&		/* ignore readdir cache */
1656 	    ci->i_wrbuffer_ref == 0 &&		/* no dirty pages... */
1657 	    inode->i_data.nrpages &&		/* have cached pages */
1658 	    (revoking & (CEPH_CAP_FILE_CACHE|
1659 			 CEPH_CAP_FILE_LAZYIO)) && /*  or revoking cache */
1660 	    !tried_invalidate) {
1661 		dout("check_caps trying to invalidate on %p\n", inode);
1662 		if (try_nonblocking_invalidate(inode) < 0) {
1663 			if (revoking & (CEPH_CAP_FILE_CACHE|
1664 					CEPH_CAP_FILE_LAZYIO)) {
1665 				dout("check_caps queuing invalidate\n");
1666 				queue_invalidate = 1;
1667 				ci->i_rdcache_revoking = ci->i_rdcache_gen;
1668 			} else {
1669 				dout("check_caps failed to invalidate pages\n");
1670 				/* we failed to invalidate pages.  check these
1671 				   caps again later. */
1672 				force_requeue = 1;
1673 				__cap_set_timeouts(mdsc, ci);
1674 			}
1675 		}
1676 		tried_invalidate = 1;
1677 		goto retry_locked;
1678 	}
1679 
1680 	num = 0;
1681 	for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1682 		cap = rb_entry(p, struct ceph_cap, ci_node);
1683 		num++;
1684 
1685 		/* avoid looping forever */
1686 		if (mds >= cap->mds ||
1687 		    ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1688 			continue;
1689 
1690 		/* NOTE: no side-effects allowed, until we take s_mutex */
1691 
1692 		cap_used = used;
1693 		if (ci->i_auth_cap && cap != ci->i_auth_cap)
1694 			cap_used &= ~ci->i_auth_cap->issued;
1695 
1696 		revoking = cap->implemented & ~cap->issued;
1697 		dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1698 		     cap->mds, cap, ceph_cap_string(cap->issued),
1699 		     ceph_cap_string(cap_used),
1700 		     ceph_cap_string(cap->implemented),
1701 		     ceph_cap_string(revoking));
1702 
1703 		if (cap == ci->i_auth_cap &&
1704 		    (cap->issued & CEPH_CAP_FILE_WR)) {
1705 			/* request larger max_size from MDS? */
1706 			if (ci->i_wanted_max_size > ci->i_max_size &&
1707 			    ci->i_wanted_max_size > ci->i_requested_max_size) {
1708 				dout("requesting new max_size\n");
1709 				goto ack;
1710 			}
1711 
1712 			/* approaching file_max? */
1713 			if ((inode->i_size << 1) >= ci->i_max_size &&
1714 			    (ci->i_reported_size << 1) < ci->i_max_size) {
1715 				dout("i_size approaching max_size\n");
1716 				goto ack;
1717 			}
1718 		}
1719 		/* flush anything dirty? */
1720 		if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1721 		    ci->i_dirty_caps) {
1722 			dout("flushing dirty caps\n");
1723 			goto ack;
1724 		}
1725 
1726 		/* completed revocation? going down and there are no caps? */
1727 		if (revoking && (revoking & cap_used) == 0) {
1728 			dout("completed revocation of %s\n",
1729 			     ceph_cap_string(cap->implemented & ~cap->issued));
1730 			goto ack;
1731 		}
1732 
1733 		/* want more caps from mds? */
1734 		if (want & ~(cap->mds_wanted | cap->issued))
1735 			goto ack;
1736 
1737 		/* things we might delay */
1738 		if ((cap->issued & ~retain) == 0 &&
1739 		    cap->mds_wanted == want)
1740 			continue;     /* nope, all good */
1741 
1742 		if (is_delayed)
1743 			goto ack;
1744 
1745 		/* delay? */
1746 		if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1747 		    time_before(jiffies, ci->i_hold_caps_max)) {
1748 			dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1749 			     ceph_cap_string(cap->issued),
1750 			     ceph_cap_string(cap->issued & retain),
1751 			     ceph_cap_string(cap->mds_wanted),
1752 			     ceph_cap_string(want));
1753 			delayed++;
1754 			continue;
1755 		}
1756 
1757 ack:
1758 		if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1759 			dout(" skipping %p I_NOFLUSH set\n", inode);
1760 			continue;
1761 		}
1762 
1763 		if (session && session != cap->session) {
1764 			dout("oops, wrong session %p mutex\n", session);
1765 			mutex_unlock(&session->s_mutex);
1766 			session = NULL;
1767 		}
1768 		if (!session) {
1769 			session = cap->session;
1770 			if (mutex_trylock(&session->s_mutex) == 0) {
1771 				dout("inverting session/ino locks on %p\n",
1772 				     session);
1773 				spin_unlock(&ci->i_ceph_lock);
1774 				if (took_snap_rwsem) {
1775 					up_read(&mdsc->snap_rwsem);
1776 					took_snap_rwsem = 0;
1777 				}
1778 				mutex_lock(&session->s_mutex);
1779 				goto retry;
1780 			}
1781 		}
1782 		/* take snap_rwsem after session mutex */
1783 		if (!took_snap_rwsem) {
1784 			if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1785 				dout("inverting snap/in locks on %p\n",
1786 				     inode);
1787 				spin_unlock(&ci->i_ceph_lock);
1788 				down_read(&mdsc->snap_rwsem);
1789 				took_snap_rwsem = 1;
1790 				goto retry;
1791 			}
1792 			took_snap_rwsem = 1;
1793 		}
1794 
1795 		if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
1796 			flushing = __mark_caps_flushing(inode, session,
1797 							&flush_tid,
1798 							&oldest_flush_tid);
1799 		} else {
1800 			flushing = 0;
1801 			flush_tid = 0;
1802 			spin_lock(&mdsc->cap_dirty_lock);
1803 			oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1804 			spin_unlock(&mdsc->cap_dirty_lock);
1805 		}
1806 
1807 		mds = cap->mds;  /* remember mds, so we don't repeat */
1808 		sent++;
1809 
1810 		/* __send_cap drops i_ceph_lock */
1811 		delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
1812 				      want, retain, flushing,
1813 				      flush_tid, oldest_flush_tid);
1814 		goto retry; /* retake i_ceph_lock and restart our cap scan. */
1815 	}
1816 
1817 	/*
1818 	 * Reschedule delayed caps release if we delayed anything,
1819 	 * otherwise cancel.
1820 	 */
1821 	if (delayed && is_delayed)
1822 		force_requeue = 1;   /* __send_cap delayed release; requeue */
1823 	if (!delayed && !is_delayed)
1824 		__cap_delay_cancel(mdsc, ci);
1825 	else if (!is_delayed || force_requeue)
1826 		__cap_delay_requeue(mdsc, ci);
1827 
1828 	spin_unlock(&ci->i_ceph_lock);
1829 
1830 	if (queue_invalidate)
1831 		ceph_queue_invalidate(inode);
1832 
1833 	if (session)
1834 		mutex_unlock(&session->s_mutex);
1835 	if (took_snap_rwsem)
1836 		up_read(&mdsc->snap_rwsem);
1837 }
1838 
1839 /*
1840  * Try to flush dirty caps back to the auth mds.
1841  */
1842 static int try_flush_caps(struct inode *inode, u64 *ptid)
1843 {
1844 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1845 	struct ceph_inode_info *ci = ceph_inode(inode);
1846 	struct ceph_mds_session *session = NULL;
1847 	int flushing = 0;
1848 	u64 flush_tid = 0, oldest_flush_tid = 0;
1849 
1850 retry:
1851 	spin_lock(&ci->i_ceph_lock);
1852 	if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1853 		dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1854 		goto out;
1855 	}
1856 	if (ci->i_dirty_caps && ci->i_auth_cap) {
1857 		struct ceph_cap *cap = ci->i_auth_cap;
1858 		int used = __ceph_caps_used(ci);
1859 		int want = __ceph_caps_wanted(ci);
1860 		int delayed;
1861 
1862 		if (!session || session != cap->session) {
1863 			spin_unlock(&ci->i_ceph_lock);
1864 			if (session)
1865 				mutex_unlock(&session->s_mutex);
1866 			session = cap->session;
1867 			mutex_lock(&session->s_mutex);
1868 			goto retry;
1869 		}
1870 		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1871 			goto out;
1872 
1873 		flushing = __mark_caps_flushing(inode, session, &flush_tid,
1874 						&oldest_flush_tid);
1875 
1876 		/* __send_cap drops i_ceph_lock */
1877 		delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1878 				     (cap->issued | cap->implemented),
1879 				     flushing, flush_tid, oldest_flush_tid);
1880 
1881 		if (delayed) {
1882 			spin_lock(&ci->i_ceph_lock);
1883 			__cap_delay_requeue(mdsc, ci);
1884 			spin_unlock(&ci->i_ceph_lock);
1885 		}
1886 	} else {
1887 		struct rb_node *n = rb_last(&ci->i_cap_flush_tree);
1888 		if (n) {
1889 			struct ceph_cap_flush *cf =
1890 				rb_entry(n, struct ceph_cap_flush, i_node);
1891 			flush_tid = cf->tid;
1892 		}
1893 		flushing = ci->i_flushing_caps;
1894 		spin_unlock(&ci->i_ceph_lock);
1895 	}
1896 out:
1897 	if (session)
1898 		mutex_unlock(&session->s_mutex);
1899 
1900 	*ptid = flush_tid;
1901 	return flushing;
1902 }
1903 
1904 /*
1905  * Return true if we've flushed caps through the given flush_tid.
1906  */
1907 static int caps_are_flushed(struct inode *inode, u64 flush_tid)
1908 {
1909 	struct ceph_inode_info *ci = ceph_inode(inode);
1910 	struct ceph_cap_flush *cf;
1911 	struct rb_node *n;
1912 	int ret = 1;
1913 
1914 	spin_lock(&ci->i_ceph_lock);
1915 	n = rb_first(&ci->i_cap_flush_tree);
1916 	if (n) {
1917 		cf = rb_entry(n, struct ceph_cap_flush, i_node);
1918 		if (cf->tid <= flush_tid)
1919 			ret = 0;
1920 	}
1921 	spin_unlock(&ci->i_ceph_lock);
1922 	return ret;
1923 }
1924 
1925 /*
1926  * Wait on any unsafe replies for the given inode.  First wait on the
1927  * newest request, and make that the upper bound.  Then, if there are
1928  * more requests, keep waiting on the oldest as long as it is still older
1929  * than the original request.
1930  */
1931 static void sync_write_wait(struct inode *inode)
1932 {
1933 	struct ceph_inode_info *ci = ceph_inode(inode);
1934 	struct list_head *head = &ci->i_unsafe_writes;
1935 	struct ceph_osd_request *req;
1936 	u64 last_tid;
1937 
1938 	if (!S_ISREG(inode->i_mode))
1939 		return;
1940 
1941 	spin_lock(&ci->i_unsafe_lock);
1942 	if (list_empty(head))
1943 		goto out;
1944 
1945 	/* set upper bound as _last_ entry in chain */
1946 	req = list_last_entry(head, struct ceph_osd_request,
1947 			      r_unsafe_item);
1948 	last_tid = req->r_tid;
1949 
1950 	do {
1951 		ceph_osdc_get_request(req);
1952 		spin_unlock(&ci->i_unsafe_lock);
1953 		dout("sync_write_wait on tid %llu (until %llu)\n",
1954 		     req->r_tid, last_tid);
1955 		wait_for_completion(&req->r_safe_completion);
1956 		spin_lock(&ci->i_unsafe_lock);
1957 		ceph_osdc_put_request(req);
1958 
1959 		/*
1960 		 * from here on look at first entry in chain, since we
1961 		 * only want to wait for anything older than last_tid
1962 		 */
1963 		if (list_empty(head))
1964 			break;
1965 		req = list_first_entry(head, struct ceph_osd_request,
1966 				       r_unsafe_item);
1967 	} while (req->r_tid < last_tid);
1968 out:
1969 	spin_unlock(&ci->i_unsafe_lock);
1970 }
1971 
1972 /*
1973  * wait for any unsafe requests to complete.
1974  */
1975 static int unsafe_request_wait(struct inode *inode)
1976 {
1977 	struct ceph_inode_info *ci = ceph_inode(inode);
1978 	struct ceph_mds_request *req1 = NULL, *req2 = NULL;
1979 	int ret, err = 0;
1980 
1981 	spin_lock(&ci->i_unsafe_lock);
1982 	if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) {
1983 		req1 = list_last_entry(&ci->i_unsafe_dirops,
1984 					struct ceph_mds_request,
1985 					r_unsafe_dir_item);
1986 		ceph_mdsc_get_request(req1);
1987 	}
1988 	if (!list_empty(&ci->i_unsafe_iops)) {
1989 		req2 = list_last_entry(&ci->i_unsafe_iops,
1990 					struct ceph_mds_request,
1991 					r_unsafe_target_item);
1992 		ceph_mdsc_get_request(req2);
1993 	}
1994 	spin_unlock(&ci->i_unsafe_lock);
1995 
1996 	dout("unsafe_requeset_wait %p wait on tid %llu %llu\n",
1997 	     inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
1998 	if (req1) {
1999 		ret = !wait_for_completion_timeout(&req1->r_safe_completion,
2000 					ceph_timeout_jiffies(req1->r_timeout));
2001 		if (ret)
2002 			err = -EIO;
2003 		ceph_mdsc_put_request(req1);
2004 	}
2005 	if (req2) {
2006 		ret = !wait_for_completion_timeout(&req2->r_safe_completion,
2007 					ceph_timeout_jiffies(req2->r_timeout));
2008 		if (ret)
2009 			err = -EIO;
2010 		ceph_mdsc_put_request(req2);
2011 	}
2012 	return err;
2013 }
2014 
2015 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2016 {
2017 	struct inode *inode = file->f_mapping->host;
2018 	struct ceph_inode_info *ci = ceph_inode(inode);
2019 	u64 flush_tid;
2020 	int ret;
2021 	int dirty;
2022 
2023 	dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
2024 	sync_write_wait(inode);
2025 
2026 	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
2027 	if (ret < 0)
2028 		goto out;
2029 
2030 	if (datasync)
2031 		goto out;
2032 
2033 	mutex_lock(&inode->i_mutex);
2034 
2035 	dirty = try_flush_caps(inode, &flush_tid);
2036 	dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
2037 
2038 	ret = unsafe_request_wait(inode);
2039 
2040 	/*
2041 	 * only wait on non-file metadata writeback (the mds
2042 	 * can recover size and mtime, so we don't need to
2043 	 * wait for that)
2044 	 */
2045 	if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
2046 		ret = wait_event_interruptible(ci->i_cap_wq,
2047 					caps_are_flushed(inode, flush_tid));
2048 	}
2049 	mutex_unlock(&inode->i_mutex);
2050 out:
2051 	dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
2052 	return ret;
2053 }
2054 
2055 /*
2056  * Flush any dirty caps back to the mds.  If we aren't asked to wait,
2057  * queue inode for flush but don't do so immediately, because we can
2058  * get by with fewer MDS messages if we wait for data writeback to
2059  * complete first.
2060  */
2061 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
2062 {
2063 	struct ceph_inode_info *ci = ceph_inode(inode);
2064 	u64 flush_tid;
2065 	int err = 0;
2066 	int dirty;
2067 	int wait = wbc->sync_mode == WB_SYNC_ALL;
2068 
2069 	dout("write_inode %p wait=%d\n", inode, wait);
2070 	if (wait) {
2071 		dirty = try_flush_caps(inode, &flush_tid);
2072 		if (dirty)
2073 			err = wait_event_interruptible(ci->i_cap_wq,
2074 				       caps_are_flushed(inode, flush_tid));
2075 	} else {
2076 		struct ceph_mds_client *mdsc =
2077 			ceph_sb_to_client(inode->i_sb)->mdsc;
2078 
2079 		spin_lock(&ci->i_ceph_lock);
2080 		if (__ceph_caps_dirty(ci))
2081 			__cap_delay_requeue_front(mdsc, ci);
2082 		spin_unlock(&ci->i_ceph_lock);
2083 	}
2084 	return err;
2085 }
2086 
2087 /*
2088  * After a recovering MDS goes active, we need to resend any caps
2089  * we were flushing.
2090  *
2091  * Caller holds session->s_mutex.
2092  */
2093 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
2094 				   struct ceph_mds_session *session)
2095 {
2096 	struct ceph_cap_snap *capsnap;
2097 
2098 	dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
2099 	list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
2100 			    flushing_item) {
2101 		struct ceph_inode_info *ci = capsnap->ci;
2102 		struct inode *inode = &ci->vfs_inode;
2103 		struct ceph_cap *cap;
2104 
2105 		spin_lock(&ci->i_ceph_lock);
2106 		cap = ci->i_auth_cap;
2107 		if (cap && cap->session == session) {
2108 			dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
2109 			     cap, capsnap);
2110 			__ceph_flush_snaps(ci, &session, 1);
2111 		} else {
2112 			pr_err("%p auth cap %p not mds%d ???\n", inode,
2113 			       cap, session->s_mds);
2114 		}
2115 		spin_unlock(&ci->i_ceph_lock);
2116 	}
2117 }
2118 
2119 static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2120 				struct ceph_mds_session *session,
2121 				struct ceph_inode_info *ci)
2122 {
2123 	struct inode *inode = &ci->vfs_inode;
2124 	struct ceph_cap *cap;
2125 	struct ceph_cap_flush *cf;
2126 	struct rb_node *n;
2127 	int delayed = 0;
2128 	u64 first_tid = 0;
2129 	u64 oldest_flush_tid;
2130 
2131 	spin_lock(&mdsc->cap_dirty_lock);
2132 	oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2133 	spin_unlock(&mdsc->cap_dirty_lock);
2134 
2135 	while (true) {
2136 		spin_lock(&ci->i_ceph_lock);
2137 		cap = ci->i_auth_cap;
2138 		if (!(cap && cap->session == session)) {
2139 			pr_err("%p auth cap %p not mds%d ???\n", inode,
2140 					cap, session->s_mds);
2141 			spin_unlock(&ci->i_ceph_lock);
2142 			break;
2143 		}
2144 
2145 		for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2146 			cf = rb_entry(n, struct ceph_cap_flush, i_node);
2147 			if (cf->tid >= first_tid)
2148 				break;
2149 		}
2150 		if (!n) {
2151 			spin_unlock(&ci->i_ceph_lock);
2152 			break;
2153 		}
2154 
2155 		cf = rb_entry(n, struct ceph_cap_flush, i_node);
2156 
2157 		first_tid = cf->tid + 1;
2158 
2159 		dout("kick_flushing_caps %p cap %p tid %llu %s\n", inode,
2160 		     cap, cf->tid, ceph_cap_string(cf->caps));
2161 		delayed |= __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2162 				      __ceph_caps_used(ci),
2163 				      __ceph_caps_wanted(ci),
2164 				      cap->issued | cap->implemented,
2165 				      cf->caps, cf->tid, oldest_flush_tid);
2166 	}
2167 	return delayed;
2168 }
2169 
2170 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2171 				   struct ceph_mds_session *session)
2172 {
2173 	struct ceph_inode_info *ci;
2174 	struct ceph_cap *cap;
2175 
2176 	dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2177 	list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2178 		spin_lock(&ci->i_ceph_lock);
2179 		cap = ci->i_auth_cap;
2180 		if (!(cap && cap->session == session)) {
2181 			pr_err("%p auth cap %p not mds%d ???\n",
2182 				&ci->vfs_inode, cap, session->s_mds);
2183 			spin_unlock(&ci->i_ceph_lock);
2184 			continue;
2185 		}
2186 
2187 
2188 		/*
2189 		 * if flushing caps were revoked, we re-send the cap flush
2190 		 * in client reconnect stage. This guarantees MDS * processes
2191 		 * the cap flush message before issuing the flushing caps to
2192 		 * other client.
2193 		 */
2194 		if ((cap->issued & ci->i_flushing_caps) !=
2195 		    ci->i_flushing_caps) {
2196 			spin_unlock(&ci->i_ceph_lock);
2197 			if (!__kick_flushing_caps(mdsc, session, ci))
2198 				continue;
2199 			spin_lock(&ci->i_ceph_lock);
2200 		}
2201 
2202 		spin_unlock(&ci->i_ceph_lock);
2203 	}
2204 }
2205 
2206 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2207 			     struct ceph_mds_session *session)
2208 {
2209 	struct ceph_inode_info *ci;
2210 
2211 	kick_flushing_capsnaps(mdsc, session);
2212 
2213 	dout("kick_flushing_caps mds%d\n", session->s_mds);
2214 	list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2215 		int delayed = __kick_flushing_caps(mdsc, session, ci);
2216 		if (delayed) {
2217 			spin_lock(&ci->i_ceph_lock);
2218 			__cap_delay_requeue(mdsc, ci);
2219 			spin_unlock(&ci->i_ceph_lock);
2220 		}
2221 	}
2222 }
2223 
2224 static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2225 				     struct ceph_mds_session *session,
2226 				     struct inode *inode)
2227 {
2228 	struct ceph_inode_info *ci = ceph_inode(inode);
2229 	struct ceph_cap *cap;
2230 
2231 	spin_lock(&ci->i_ceph_lock);
2232 	cap = ci->i_auth_cap;
2233 	dout("kick_flushing_inode_caps %p flushing %s\n", inode,
2234 	     ceph_cap_string(ci->i_flushing_caps));
2235 
2236 	__ceph_flush_snaps(ci, &session, 1);
2237 
2238 	if (ci->i_flushing_caps) {
2239 		int delayed;
2240 
2241 		spin_lock(&mdsc->cap_dirty_lock);
2242 		list_move_tail(&ci->i_flushing_item,
2243 			       &cap->session->s_cap_flushing);
2244 		spin_unlock(&mdsc->cap_dirty_lock);
2245 
2246 		spin_unlock(&ci->i_ceph_lock);
2247 
2248 		delayed = __kick_flushing_caps(mdsc, session, ci);
2249 		if (delayed) {
2250 			spin_lock(&ci->i_ceph_lock);
2251 			__cap_delay_requeue(mdsc, ci);
2252 			spin_unlock(&ci->i_ceph_lock);
2253 		}
2254 	} else {
2255 		spin_unlock(&ci->i_ceph_lock);
2256 	}
2257 }
2258 
2259 
2260 /*
2261  * Take references to capabilities we hold, so that we don't release
2262  * them to the MDS prematurely.
2263  *
2264  * Protected by i_ceph_lock.
2265  */
2266 static void __take_cap_refs(struct ceph_inode_info *ci, int got,
2267 			    bool snap_rwsem_locked)
2268 {
2269 	if (got & CEPH_CAP_PIN)
2270 		ci->i_pin_ref++;
2271 	if (got & CEPH_CAP_FILE_RD)
2272 		ci->i_rd_ref++;
2273 	if (got & CEPH_CAP_FILE_CACHE)
2274 		ci->i_rdcache_ref++;
2275 	if (got & CEPH_CAP_FILE_WR) {
2276 		if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2277 			BUG_ON(!snap_rwsem_locked);
2278 			ci->i_head_snapc = ceph_get_snap_context(
2279 					ci->i_snap_realm->cached_context);
2280 		}
2281 		ci->i_wr_ref++;
2282 	}
2283 	if (got & CEPH_CAP_FILE_BUFFER) {
2284 		if (ci->i_wb_ref == 0)
2285 			ihold(&ci->vfs_inode);
2286 		ci->i_wb_ref++;
2287 		dout("__take_cap_refs %p wb %d -> %d (?)\n",
2288 		     &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2289 	}
2290 }
2291 
2292 /*
2293  * Try to grab cap references.  Specify those refs we @want, and the
2294  * minimal set we @need.  Also include the larger offset we are writing
2295  * to (when applicable), and check against max_size here as well.
2296  * Note that caller is responsible for ensuring max_size increases are
2297  * requested from the MDS.
2298  */
2299 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2300 			    loff_t endoff, bool nonblock, int *got, int *err)
2301 {
2302 	struct inode *inode = &ci->vfs_inode;
2303 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2304 	int ret = 0;
2305 	int have, implemented;
2306 	int file_wanted;
2307 	bool snap_rwsem_locked = false;
2308 
2309 	dout("get_cap_refs %p need %s want %s\n", inode,
2310 	     ceph_cap_string(need), ceph_cap_string(want));
2311 
2312 again:
2313 	spin_lock(&ci->i_ceph_lock);
2314 
2315 	/* make sure file is actually open */
2316 	file_wanted = __ceph_caps_file_wanted(ci);
2317 	if ((file_wanted & need) == 0) {
2318 		dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2319 		     ceph_cap_string(need), ceph_cap_string(file_wanted));
2320 		*err = -EBADF;
2321 		ret = 1;
2322 		goto out_unlock;
2323 	}
2324 
2325 	/* finish pending truncate */
2326 	while (ci->i_truncate_pending) {
2327 		spin_unlock(&ci->i_ceph_lock);
2328 		if (snap_rwsem_locked) {
2329 			up_read(&mdsc->snap_rwsem);
2330 			snap_rwsem_locked = false;
2331 		}
2332 		__ceph_do_pending_vmtruncate(inode);
2333 		spin_lock(&ci->i_ceph_lock);
2334 	}
2335 
2336 	have = __ceph_caps_issued(ci, &implemented);
2337 
2338 	if (have & need & CEPH_CAP_FILE_WR) {
2339 		if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2340 			dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2341 			     inode, endoff, ci->i_max_size);
2342 			if (endoff > ci->i_requested_max_size) {
2343 				*err = -EAGAIN;
2344 				ret = 1;
2345 			}
2346 			goto out_unlock;
2347 		}
2348 		/*
2349 		 * If a sync write is in progress, we must wait, so that we
2350 		 * can get a final snapshot value for size+mtime.
2351 		 */
2352 		if (__ceph_have_pending_cap_snap(ci)) {
2353 			dout("get_cap_refs %p cap_snap_pending\n", inode);
2354 			goto out_unlock;
2355 		}
2356 	}
2357 
2358 	if ((have & need) == need) {
2359 		/*
2360 		 * Look at (implemented & ~have & not) so that we keep waiting
2361 		 * on transition from wanted -> needed caps.  This is needed
2362 		 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2363 		 * going before a prior buffered writeback happens.
2364 		 */
2365 		int not = want & ~(have & need);
2366 		int revoking = implemented & ~have;
2367 		dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2368 		     inode, ceph_cap_string(have), ceph_cap_string(not),
2369 		     ceph_cap_string(revoking));
2370 		if ((revoking & not) == 0) {
2371 			if (!snap_rwsem_locked &&
2372 			    !ci->i_head_snapc &&
2373 			    (need & CEPH_CAP_FILE_WR)) {
2374 				if (!down_read_trylock(&mdsc->snap_rwsem)) {
2375 					/*
2376 					 * we can not call down_read() when
2377 					 * task isn't in TASK_RUNNING state
2378 					 */
2379 					if (nonblock) {
2380 						*err = -EAGAIN;
2381 						ret = 1;
2382 						goto out_unlock;
2383 					}
2384 
2385 					spin_unlock(&ci->i_ceph_lock);
2386 					down_read(&mdsc->snap_rwsem);
2387 					snap_rwsem_locked = true;
2388 					goto again;
2389 				}
2390 				snap_rwsem_locked = true;
2391 			}
2392 			*got = need | (have & want);
2393 			__take_cap_refs(ci, *got, true);
2394 			ret = 1;
2395 		}
2396 	} else {
2397 		int session_readonly = false;
2398 		if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2399 			struct ceph_mds_session *s = ci->i_auth_cap->session;
2400 			spin_lock(&s->s_cap_lock);
2401 			session_readonly = s->s_readonly;
2402 			spin_unlock(&s->s_cap_lock);
2403 		}
2404 		if (session_readonly) {
2405 			dout("get_cap_refs %p needed %s but mds%d readonly\n",
2406 			     inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2407 			*err = -EROFS;
2408 			ret = 1;
2409 			goto out_unlock;
2410 		}
2411 
2412 		if (!__ceph_is_any_caps(ci) &&
2413 		    ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2414 			dout("get_cap_refs %p forced umount\n", inode);
2415 			*err = -EIO;
2416 			ret = 1;
2417 			goto out_unlock;
2418 		}
2419 
2420 		dout("get_cap_refs %p have %s needed %s\n", inode,
2421 		     ceph_cap_string(have), ceph_cap_string(need));
2422 	}
2423 out_unlock:
2424 	spin_unlock(&ci->i_ceph_lock);
2425 	if (snap_rwsem_locked)
2426 		up_read(&mdsc->snap_rwsem);
2427 
2428 	dout("get_cap_refs %p ret %d got %s\n", inode,
2429 	     ret, ceph_cap_string(*got));
2430 	return ret;
2431 }
2432 
2433 /*
2434  * Check the offset we are writing up to against our current
2435  * max_size.  If necessary, tell the MDS we want to write to
2436  * a larger offset.
2437  */
2438 static void check_max_size(struct inode *inode, loff_t endoff)
2439 {
2440 	struct ceph_inode_info *ci = ceph_inode(inode);
2441 	int check = 0;
2442 
2443 	/* do we need to explicitly request a larger max_size? */
2444 	spin_lock(&ci->i_ceph_lock);
2445 	if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2446 		dout("write %p at large endoff %llu, req max_size\n",
2447 		     inode, endoff);
2448 		ci->i_wanted_max_size = endoff;
2449 	}
2450 	/* duplicate ceph_check_caps()'s logic */
2451 	if (ci->i_auth_cap &&
2452 	    (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2453 	    ci->i_wanted_max_size > ci->i_max_size &&
2454 	    ci->i_wanted_max_size > ci->i_requested_max_size)
2455 		check = 1;
2456 	spin_unlock(&ci->i_ceph_lock);
2457 	if (check)
2458 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2459 }
2460 
2461 /*
2462  * Wait for caps, and take cap references.  If we can't get a WR cap
2463  * due to a small max_size, make sure we check_max_size (and possibly
2464  * ask the mds) so we don't get hung up indefinitely.
2465  */
2466 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2467 		  loff_t endoff, int *got, struct page **pinned_page)
2468 {
2469 	int _got, ret, err = 0;
2470 
2471 	ret = ceph_pool_perm_check(ci, need);
2472 	if (ret < 0)
2473 		return ret;
2474 
2475 	while (true) {
2476 		if (endoff > 0)
2477 			check_max_size(&ci->vfs_inode, endoff);
2478 
2479 		err = 0;
2480 		_got = 0;
2481 		ret = try_get_cap_refs(ci, need, want, endoff,
2482 				       false, &_got, &err);
2483 		if (ret) {
2484 			if (err == -EAGAIN)
2485 				continue;
2486 			if (err < 0)
2487 				return err;
2488 		} else {
2489 			ret = wait_event_interruptible(ci->i_cap_wq,
2490 					try_get_cap_refs(ci, need, want, endoff,
2491 							 true, &_got, &err));
2492 			if (err == -EAGAIN)
2493 				continue;
2494 			if (err < 0)
2495 				ret = err;
2496 			if (ret < 0)
2497 				return ret;
2498 		}
2499 
2500 		if (ci->i_inline_version != CEPH_INLINE_NONE &&
2501 		    (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2502 		    i_size_read(&ci->vfs_inode) > 0) {
2503 			struct page *page =
2504 				find_get_page(ci->vfs_inode.i_mapping, 0);
2505 			if (page) {
2506 				if (PageUptodate(page)) {
2507 					*pinned_page = page;
2508 					break;
2509 				}
2510 				page_cache_release(page);
2511 			}
2512 			/*
2513 			 * drop cap refs first because getattr while
2514 			 * holding * caps refs can cause deadlock.
2515 			 */
2516 			ceph_put_cap_refs(ci, _got);
2517 			_got = 0;
2518 
2519 			/*
2520 			 * getattr request will bring inline data into
2521 			 * page cache
2522 			 */
2523 			ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2524 						CEPH_STAT_CAP_INLINE_DATA,
2525 						true);
2526 			if (ret < 0)
2527 				return ret;
2528 			continue;
2529 		}
2530 		break;
2531 	}
2532 
2533 	*got = _got;
2534 	return 0;
2535 }
2536 
2537 /*
2538  * Take cap refs.  Caller must already know we hold at least one ref
2539  * on the caps in question or we don't know this is safe.
2540  */
2541 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2542 {
2543 	spin_lock(&ci->i_ceph_lock);
2544 	__take_cap_refs(ci, caps, false);
2545 	spin_unlock(&ci->i_ceph_lock);
2546 }
2547 
2548 
2549 /*
2550  * drop cap_snap that is not associated with any snapshot.
2551  * we don't need to send FLUSHSNAP message for it.
2552  */
2553 static int ceph_try_drop_cap_snap(struct ceph_cap_snap *capsnap)
2554 {
2555 	if (!capsnap->need_flush &&
2556 	    !capsnap->writing && !capsnap->dirty_pages) {
2557 
2558 		dout("dropping cap_snap %p follows %llu\n",
2559 		     capsnap, capsnap->follows);
2560 		ceph_put_snap_context(capsnap->context);
2561 		list_del(&capsnap->ci_item);
2562 		list_del(&capsnap->flushing_item);
2563 		ceph_put_cap_snap(capsnap);
2564 		return 1;
2565 	}
2566 	return 0;
2567 }
2568 
2569 /*
2570  * Release cap refs.
2571  *
2572  * If we released the last ref on any given cap, call ceph_check_caps
2573  * to release (or schedule a release).
2574  *
2575  * If we are releasing a WR cap (from a sync write), finalize any affected
2576  * cap_snap, and wake up any waiters.
2577  */
2578 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2579 {
2580 	struct inode *inode = &ci->vfs_inode;
2581 	int last = 0, put = 0, flushsnaps = 0, wake = 0;
2582 
2583 	spin_lock(&ci->i_ceph_lock);
2584 	if (had & CEPH_CAP_PIN)
2585 		--ci->i_pin_ref;
2586 	if (had & CEPH_CAP_FILE_RD)
2587 		if (--ci->i_rd_ref == 0)
2588 			last++;
2589 	if (had & CEPH_CAP_FILE_CACHE)
2590 		if (--ci->i_rdcache_ref == 0)
2591 			last++;
2592 	if (had & CEPH_CAP_FILE_BUFFER) {
2593 		if (--ci->i_wb_ref == 0) {
2594 			last++;
2595 			put++;
2596 		}
2597 		dout("put_cap_refs %p wb %d -> %d (?)\n",
2598 		     inode, ci->i_wb_ref+1, ci->i_wb_ref);
2599 	}
2600 	if (had & CEPH_CAP_FILE_WR)
2601 		if (--ci->i_wr_ref == 0) {
2602 			last++;
2603 			if (__ceph_have_pending_cap_snap(ci)) {
2604 				struct ceph_cap_snap *capsnap =
2605 					list_last_entry(&ci->i_cap_snaps,
2606 							struct ceph_cap_snap,
2607 							ci_item);
2608 				capsnap->writing = 0;
2609 				if (ceph_try_drop_cap_snap(capsnap))
2610 					put++;
2611 				else if (__ceph_finish_cap_snap(ci, capsnap))
2612 					flushsnaps = 1;
2613 				wake = 1;
2614 			}
2615 			if (ci->i_wrbuffer_ref_head == 0 &&
2616 			    ci->i_dirty_caps == 0 &&
2617 			    ci->i_flushing_caps == 0) {
2618 				BUG_ON(!ci->i_head_snapc);
2619 				ceph_put_snap_context(ci->i_head_snapc);
2620 				ci->i_head_snapc = NULL;
2621 			}
2622 			/* see comment in __ceph_remove_cap() */
2623 			if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
2624 				drop_inode_snap_realm(ci);
2625 		}
2626 	spin_unlock(&ci->i_ceph_lock);
2627 
2628 	dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2629 	     last ? " last" : "", put ? " put" : "");
2630 
2631 	if (last && !flushsnaps)
2632 		ceph_check_caps(ci, 0, NULL);
2633 	else if (flushsnaps)
2634 		ceph_flush_snaps(ci);
2635 	if (wake)
2636 		wake_up_all(&ci->i_cap_wq);
2637 	while (put-- > 0)
2638 		iput(inode);
2639 }
2640 
2641 /*
2642  * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2643  * context.  Adjust per-snap dirty page accounting as appropriate.
2644  * Once all dirty data for a cap_snap is flushed, flush snapped file
2645  * metadata back to the MDS.  If we dropped the last ref, call
2646  * ceph_check_caps.
2647  */
2648 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2649 				struct ceph_snap_context *snapc)
2650 {
2651 	struct inode *inode = &ci->vfs_inode;
2652 	int last = 0;
2653 	int complete_capsnap = 0;
2654 	int drop_capsnap = 0;
2655 	int found = 0;
2656 	struct ceph_cap_snap *capsnap = NULL;
2657 
2658 	spin_lock(&ci->i_ceph_lock);
2659 	ci->i_wrbuffer_ref -= nr;
2660 	last = !ci->i_wrbuffer_ref;
2661 
2662 	if (ci->i_head_snapc == snapc) {
2663 		ci->i_wrbuffer_ref_head -= nr;
2664 		if (ci->i_wrbuffer_ref_head == 0 &&
2665 		    ci->i_wr_ref == 0 &&
2666 		    ci->i_dirty_caps == 0 &&
2667 		    ci->i_flushing_caps == 0) {
2668 			BUG_ON(!ci->i_head_snapc);
2669 			ceph_put_snap_context(ci->i_head_snapc);
2670 			ci->i_head_snapc = NULL;
2671 		}
2672 		dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2673 		     inode,
2674 		     ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2675 		     ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2676 		     last ? " LAST" : "");
2677 	} else {
2678 		list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2679 			if (capsnap->context == snapc) {
2680 				found = 1;
2681 				break;
2682 			}
2683 		}
2684 		BUG_ON(!found);
2685 		capsnap->dirty_pages -= nr;
2686 		if (capsnap->dirty_pages == 0) {
2687 			complete_capsnap = 1;
2688 			drop_capsnap = ceph_try_drop_cap_snap(capsnap);
2689 		}
2690 		dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2691 		     " snap %lld %d/%d -> %d/%d %s%s\n",
2692 		     inode, capsnap, capsnap->context->seq,
2693 		     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2694 		     ci->i_wrbuffer_ref, capsnap->dirty_pages,
2695 		     last ? " (wrbuffer last)" : "",
2696 		     complete_capsnap ? " (complete capsnap)" : "");
2697 	}
2698 
2699 	spin_unlock(&ci->i_ceph_lock);
2700 
2701 	if (last) {
2702 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2703 		iput(inode);
2704 	} else if (complete_capsnap) {
2705 		ceph_flush_snaps(ci);
2706 		wake_up_all(&ci->i_cap_wq);
2707 	}
2708 	if (drop_capsnap)
2709 		iput(inode);
2710 }
2711 
2712 /*
2713  * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2714  */
2715 static void invalidate_aliases(struct inode *inode)
2716 {
2717 	struct dentry *dn, *prev = NULL;
2718 
2719 	dout("invalidate_aliases inode %p\n", inode);
2720 	d_prune_aliases(inode);
2721 	/*
2722 	 * For non-directory inode, d_find_alias() only returns
2723 	 * hashed dentry. After calling d_invalidate(), the
2724 	 * dentry becomes unhashed.
2725 	 *
2726 	 * For directory inode, d_find_alias() can return
2727 	 * unhashed dentry. But directory inode should have
2728 	 * one alias at most.
2729 	 */
2730 	while ((dn = d_find_alias(inode))) {
2731 		if (dn == prev) {
2732 			dput(dn);
2733 			break;
2734 		}
2735 		d_invalidate(dn);
2736 		if (prev)
2737 			dput(prev);
2738 		prev = dn;
2739 	}
2740 	if (prev)
2741 		dput(prev);
2742 }
2743 
2744 /*
2745  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
2746  * actually be a revocation if it specifies a smaller cap set.)
2747  *
2748  * caller holds s_mutex and i_ceph_lock, we drop both.
2749  */
2750 static void handle_cap_grant(struct ceph_mds_client *mdsc,
2751 			     struct inode *inode, struct ceph_mds_caps *grant,
2752 			     u64 inline_version,
2753 			     void *inline_data, int inline_len,
2754 			     struct ceph_buffer *xattr_buf,
2755 			     struct ceph_mds_session *session,
2756 			     struct ceph_cap *cap, int issued)
2757 	__releases(ci->i_ceph_lock)
2758 	__releases(mdsc->snap_rwsem)
2759 {
2760 	struct ceph_inode_info *ci = ceph_inode(inode);
2761 	int mds = session->s_mds;
2762 	int seq = le32_to_cpu(grant->seq);
2763 	int newcaps = le32_to_cpu(grant->caps);
2764 	int used, wanted, dirty;
2765 	u64 size = le64_to_cpu(grant->size);
2766 	u64 max_size = le64_to_cpu(grant->max_size);
2767 	struct timespec mtime, atime, ctime;
2768 	int check_caps = 0;
2769 	bool wake = false;
2770 	bool writeback = false;
2771 	bool queue_trunc = false;
2772 	bool queue_invalidate = false;
2773 	bool queue_revalidate = false;
2774 	bool deleted_inode = false;
2775 	bool fill_inline = false;
2776 
2777 	dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2778 	     inode, cap, mds, seq, ceph_cap_string(newcaps));
2779 	dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2780 		inode->i_size);
2781 
2782 
2783 	/*
2784 	 * auth mds of the inode changed. we received the cap export message,
2785 	 * but still haven't received the cap import message. handle_cap_export
2786 	 * updated the new auth MDS' cap.
2787 	 *
2788 	 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message
2789 	 * that was sent before the cap import message. So don't remove caps.
2790 	 */
2791 	if (ceph_seq_cmp(seq, cap->seq) <= 0) {
2792 		WARN_ON(cap != ci->i_auth_cap);
2793 		WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id));
2794 		seq = cap->seq;
2795 		newcaps |= cap->issued;
2796 	}
2797 
2798 	/*
2799 	 * If CACHE is being revoked, and we have no dirty buffers,
2800 	 * try to invalidate (once).  (If there are dirty buffers, we
2801 	 * will invalidate _after_ writeback.)
2802 	 */
2803 	if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
2804 	    ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2805 	    (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2806 	    !ci->i_wrbuffer_ref) {
2807 		if (try_nonblocking_invalidate(inode)) {
2808 			/* there were locked pages.. invalidate later
2809 			   in a separate thread. */
2810 			if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2811 				queue_invalidate = true;
2812 				ci->i_rdcache_revoking = ci->i_rdcache_gen;
2813 			}
2814 		}
2815 
2816 		ceph_fscache_invalidate(inode);
2817 	}
2818 
2819 	/* side effects now are allowed */
2820 	cap->cap_gen = session->s_cap_gen;
2821 	cap->seq = seq;
2822 
2823 	__check_cap_issue(ci, cap, newcaps);
2824 
2825 	if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2826 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
2827 		inode->i_mode = le32_to_cpu(grant->mode);
2828 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2829 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2830 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2831 		     from_kuid(&init_user_ns, inode->i_uid),
2832 		     from_kgid(&init_user_ns, inode->i_gid));
2833 	}
2834 
2835 	if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2836 	    (issued & CEPH_CAP_LINK_EXCL) == 0) {
2837 		set_nlink(inode, le32_to_cpu(grant->nlink));
2838 		if (inode->i_nlink == 0 &&
2839 		    (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
2840 			deleted_inode = true;
2841 	}
2842 
2843 	if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2844 		int len = le32_to_cpu(grant->xattr_len);
2845 		u64 version = le64_to_cpu(grant->xattr_version);
2846 
2847 		if (version > ci->i_xattrs.version) {
2848 			dout(" got new xattrs v%llu on %p len %d\n",
2849 			     version, inode, len);
2850 			if (ci->i_xattrs.blob)
2851 				ceph_buffer_put(ci->i_xattrs.blob);
2852 			ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2853 			ci->i_xattrs.version = version;
2854 			ceph_forget_all_cached_acls(inode);
2855 		}
2856 	}
2857 
2858 	/* Do we need to revalidate our fscache cookie. Don't bother on the
2859 	 * first cache cap as we already validate at cookie creation time. */
2860 	if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1)
2861 		queue_revalidate = true;
2862 
2863 	if (newcaps & CEPH_CAP_ANY_RD) {
2864 		/* ctime/mtime/atime? */
2865 		ceph_decode_timespec(&mtime, &grant->mtime);
2866 		ceph_decode_timespec(&atime, &grant->atime);
2867 		ceph_decode_timespec(&ctime, &grant->ctime);
2868 		ceph_fill_file_time(inode, issued,
2869 				    le32_to_cpu(grant->time_warp_seq),
2870 				    &ctime, &mtime, &atime);
2871 	}
2872 
2873 	if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
2874 		/* file layout may have changed */
2875 		ci->i_layout = grant->layout;
2876 		/* size/truncate_seq? */
2877 		queue_trunc = ceph_fill_file_size(inode, issued,
2878 					le32_to_cpu(grant->truncate_seq),
2879 					le64_to_cpu(grant->truncate_size),
2880 					size);
2881 		/* max size increase? */
2882 		if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
2883 			dout("max_size %lld -> %llu\n",
2884 			     ci->i_max_size, max_size);
2885 			ci->i_max_size = max_size;
2886 			if (max_size >= ci->i_wanted_max_size) {
2887 				ci->i_wanted_max_size = 0;  /* reset */
2888 				ci->i_requested_max_size = 0;
2889 			}
2890 			wake = true;
2891 		}
2892 	}
2893 
2894 	/* check cap bits */
2895 	wanted = __ceph_caps_wanted(ci);
2896 	used = __ceph_caps_used(ci);
2897 	dirty = __ceph_caps_dirty(ci);
2898 	dout(" my wanted = %s, used = %s, dirty %s\n",
2899 	     ceph_cap_string(wanted),
2900 	     ceph_cap_string(used),
2901 	     ceph_cap_string(dirty));
2902 	if (wanted != le32_to_cpu(grant->wanted)) {
2903 		dout("mds wanted %s -> %s\n",
2904 		     ceph_cap_string(le32_to_cpu(grant->wanted)),
2905 		     ceph_cap_string(wanted));
2906 		/* imported cap may not have correct mds_wanted */
2907 		if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
2908 			check_caps = 1;
2909 	}
2910 
2911 	/* revocation, grant, or no-op? */
2912 	if (cap->issued & ~newcaps) {
2913 		int revoking = cap->issued & ~newcaps;
2914 
2915 		dout("revocation: %s -> %s (revoking %s)\n",
2916 		     ceph_cap_string(cap->issued),
2917 		     ceph_cap_string(newcaps),
2918 		     ceph_cap_string(revoking));
2919 		if (revoking & used & CEPH_CAP_FILE_BUFFER)
2920 			writeback = true;  /* initiate writeback; will delay ack */
2921 		else if (revoking == CEPH_CAP_FILE_CACHE &&
2922 			 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2923 			 queue_invalidate)
2924 			; /* do nothing yet, invalidation will be queued */
2925 		else if (cap == ci->i_auth_cap)
2926 			check_caps = 1; /* check auth cap only */
2927 		else
2928 			check_caps = 2; /* check all caps */
2929 		cap->issued = newcaps;
2930 		cap->implemented |= newcaps;
2931 	} else if (cap->issued == newcaps) {
2932 		dout("caps unchanged: %s -> %s\n",
2933 		     ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2934 	} else {
2935 		dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2936 		     ceph_cap_string(newcaps));
2937 		/* non-auth MDS is revoking the newly grant caps ? */
2938 		if (cap == ci->i_auth_cap &&
2939 		    __ceph_caps_revoking_other(ci, cap, newcaps))
2940 		    check_caps = 2;
2941 
2942 		cap->issued = newcaps;
2943 		cap->implemented |= newcaps; /* add bits only, to
2944 					      * avoid stepping on a
2945 					      * pending revocation */
2946 		wake = true;
2947 	}
2948 	BUG_ON(cap->issued & ~cap->implemented);
2949 
2950 	if (inline_version > 0 && inline_version >= ci->i_inline_version) {
2951 		ci->i_inline_version = inline_version;
2952 		if (ci->i_inline_version != CEPH_INLINE_NONE &&
2953 		    (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
2954 			fill_inline = true;
2955 	}
2956 
2957 	spin_unlock(&ci->i_ceph_lock);
2958 
2959 	if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
2960 		kick_flushing_inode_caps(mdsc, session, inode);
2961 		up_read(&mdsc->snap_rwsem);
2962 		if (newcaps & ~issued)
2963 			wake = true;
2964 	}
2965 
2966 	if (fill_inline)
2967 		ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
2968 
2969 	if (queue_trunc) {
2970 		ceph_queue_vmtruncate(inode);
2971 		ceph_queue_revalidate(inode);
2972 	} else if (queue_revalidate)
2973 		ceph_queue_revalidate(inode);
2974 
2975 	if (writeback)
2976 		/*
2977 		 * queue inode for writeback: we can't actually call
2978 		 * filemap_write_and_wait, etc. from message handler
2979 		 * context.
2980 		 */
2981 		ceph_queue_writeback(inode);
2982 	if (queue_invalidate)
2983 		ceph_queue_invalidate(inode);
2984 	if (deleted_inode)
2985 		invalidate_aliases(inode);
2986 	if (wake)
2987 		wake_up_all(&ci->i_cap_wq);
2988 
2989 	if (check_caps == 1)
2990 		ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2991 				session);
2992 	else if (check_caps == 2)
2993 		ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
2994 	else
2995 		mutex_unlock(&session->s_mutex);
2996 }
2997 
2998 /*
2999  * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
3000  * MDS has been safely committed.
3001  */
3002 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
3003 				 struct ceph_mds_caps *m,
3004 				 struct ceph_mds_session *session,
3005 				 struct ceph_cap *cap)
3006 	__releases(ci->i_ceph_lock)
3007 {
3008 	struct ceph_inode_info *ci = ceph_inode(inode);
3009 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3010 	struct ceph_cap_flush *cf;
3011 	struct rb_node *n;
3012 	LIST_HEAD(to_remove);
3013 	unsigned seq = le32_to_cpu(m->seq);
3014 	int dirty = le32_to_cpu(m->dirty);
3015 	int cleaned = 0;
3016 	int drop = 0;
3017 
3018 	n = rb_first(&ci->i_cap_flush_tree);
3019 	while (n) {
3020 		cf = rb_entry(n, struct ceph_cap_flush, i_node);
3021 		n = rb_next(&cf->i_node);
3022 		if (cf->tid == flush_tid)
3023 			cleaned = cf->caps;
3024 		if (cf->tid <= flush_tid) {
3025 			rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
3026 			list_add_tail(&cf->list, &to_remove);
3027 		} else {
3028 			cleaned &= ~cf->caps;
3029 			if (!cleaned)
3030 				break;
3031 		}
3032 	}
3033 
3034 	dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
3035 	     " flushing %s -> %s\n",
3036 	     inode, session->s_mds, seq, ceph_cap_string(dirty),
3037 	     ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
3038 	     ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3039 
3040 	if (list_empty(&to_remove) && !cleaned)
3041 		goto out;
3042 
3043 	ci->i_flushing_caps &= ~cleaned;
3044 
3045 	spin_lock(&mdsc->cap_dirty_lock);
3046 
3047 	if (!list_empty(&to_remove)) {
3048 		list_for_each_entry(cf, &to_remove, list)
3049 			rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
3050 
3051 		n = rb_first(&mdsc->cap_flush_tree);
3052 		cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
3053 		if (!cf || cf->tid > flush_tid)
3054 			wake_up_all(&mdsc->cap_flushing_wq);
3055 	}
3056 
3057 	if (ci->i_flushing_caps == 0) {
3058 		list_del_init(&ci->i_flushing_item);
3059 		if (!list_empty(&session->s_cap_flushing))
3060 			dout(" mds%d still flushing cap on %p\n",
3061 			     session->s_mds,
3062 			     &list_entry(session->s_cap_flushing.next,
3063 					 struct ceph_inode_info,
3064 					 i_flushing_item)->vfs_inode);
3065 		mdsc->num_cap_flushing--;
3066 		dout(" inode %p now !flushing\n", inode);
3067 
3068 		if (ci->i_dirty_caps == 0) {
3069 			dout(" inode %p now clean\n", inode);
3070 			BUG_ON(!list_empty(&ci->i_dirty_item));
3071 			drop = 1;
3072 			if (ci->i_wr_ref == 0 &&
3073 			    ci->i_wrbuffer_ref_head == 0) {
3074 				BUG_ON(!ci->i_head_snapc);
3075 				ceph_put_snap_context(ci->i_head_snapc);
3076 				ci->i_head_snapc = NULL;
3077 			}
3078 		} else {
3079 			BUG_ON(list_empty(&ci->i_dirty_item));
3080 		}
3081 	}
3082 	spin_unlock(&mdsc->cap_dirty_lock);
3083 	wake_up_all(&ci->i_cap_wq);
3084 
3085 out:
3086 	spin_unlock(&ci->i_ceph_lock);
3087 
3088 	while (!list_empty(&to_remove)) {
3089 		cf = list_first_entry(&to_remove,
3090 				      struct ceph_cap_flush, list);
3091 		list_del(&cf->list);
3092 		ceph_free_cap_flush(cf);
3093 	}
3094 	if (drop)
3095 		iput(inode);
3096 }
3097 
3098 /*
3099  * Handle FLUSHSNAP_ACK.  MDS has flushed snap data to disk and we can
3100  * throw away our cap_snap.
3101  *
3102  * Caller hold s_mutex.
3103  */
3104 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
3105 				     struct ceph_mds_caps *m,
3106 				     struct ceph_mds_session *session)
3107 {
3108 	struct ceph_inode_info *ci = ceph_inode(inode);
3109 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3110 	u64 follows = le64_to_cpu(m->snap_follows);
3111 	struct ceph_cap_snap *capsnap;
3112 	int drop = 0;
3113 
3114 	dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
3115 	     inode, ci, session->s_mds, follows);
3116 
3117 	spin_lock(&ci->i_ceph_lock);
3118 	list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
3119 		if (capsnap->follows == follows) {
3120 			if (capsnap->flush_tid != flush_tid) {
3121 				dout(" cap_snap %p follows %lld tid %lld !="
3122 				     " %lld\n", capsnap, follows,
3123 				     flush_tid, capsnap->flush_tid);
3124 				break;
3125 			}
3126 			WARN_ON(capsnap->dirty_pages || capsnap->writing);
3127 			dout(" removing %p cap_snap %p follows %lld\n",
3128 			     inode, capsnap, follows);
3129 			ceph_put_snap_context(capsnap->context);
3130 			list_del(&capsnap->ci_item);
3131 			list_del(&capsnap->flushing_item);
3132 			ceph_put_cap_snap(capsnap);
3133 			wake_up_all(&mdsc->cap_flushing_wq);
3134 			drop = 1;
3135 			break;
3136 		} else {
3137 			dout(" skipping cap_snap %p follows %lld\n",
3138 			     capsnap, capsnap->follows);
3139 		}
3140 	}
3141 	spin_unlock(&ci->i_ceph_lock);
3142 	if (drop)
3143 		iput(inode);
3144 }
3145 
3146 /*
3147  * Handle TRUNC from MDS, indicating file truncation.
3148  *
3149  * caller hold s_mutex.
3150  */
3151 static void handle_cap_trunc(struct inode *inode,
3152 			     struct ceph_mds_caps *trunc,
3153 			     struct ceph_mds_session *session)
3154 	__releases(ci->i_ceph_lock)
3155 {
3156 	struct ceph_inode_info *ci = ceph_inode(inode);
3157 	int mds = session->s_mds;
3158 	int seq = le32_to_cpu(trunc->seq);
3159 	u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
3160 	u64 truncate_size = le64_to_cpu(trunc->truncate_size);
3161 	u64 size = le64_to_cpu(trunc->size);
3162 	int implemented = 0;
3163 	int dirty = __ceph_caps_dirty(ci);
3164 	int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
3165 	int queue_trunc = 0;
3166 
3167 	issued |= implemented | dirty;
3168 
3169 	dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
3170 	     inode, mds, seq, truncate_size, truncate_seq);
3171 	queue_trunc = ceph_fill_file_size(inode, issued,
3172 					  truncate_seq, truncate_size, size);
3173 	spin_unlock(&ci->i_ceph_lock);
3174 
3175 	if (queue_trunc) {
3176 		ceph_queue_vmtruncate(inode);
3177 		ceph_fscache_invalidate(inode);
3178 	}
3179 }
3180 
3181 /*
3182  * Handle EXPORT from MDS.  Cap is being migrated _from_ this mds to a
3183  * different one.  If we are the most recent migration we've seen (as
3184  * indicated by mseq), make note of the migrating cap bits for the
3185  * duration (until we see the corresponding IMPORT).
3186  *
3187  * caller holds s_mutex
3188  */
3189 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
3190 			      struct ceph_mds_cap_peer *ph,
3191 			      struct ceph_mds_session *session)
3192 {
3193 	struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
3194 	struct ceph_mds_session *tsession = NULL;
3195 	struct ceph_cap *cap, *tcap, *new_cap = NULL;
3196 	struct ceph_inode_info *ci = ceph_inode(inode);
3197 	u64 t_cap_id;
3198 	unsigned mseq = le32_to_cpu(ex->migrate_seq);
3199 	unsigned t_seq, t_mseq;
3200 	int target, issued;
3201 	int mds = session->s_mds;
3202 
3203 	if (ph) {
3204 		t_cap_id = le64_to_cpu(ph->cap_id);
3205 		t_seq = le32_to_cpu(ph->seq);
3206 		t_mseq = le32_to_cpu(ph->mseq);
3207 		target = le32_to_cpu(ph->mds);
3208 	} else {
3209 		t_cap_id = t_seq = t_mseq = 0;
3210 		target = -1;
3211 	}
3212 
3213 	dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
3214 	     inode, ci, mds, mseq, target);
3215 retry:
3216 	spin_lock(&ci->i_ceph_lock);
3217 	cap = __get_cap_for_mds(ci, mds);
3218 	if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
3219 		goto out_unlock;
3220 
3221 	if (target < 0) {
3222 		__ceph_remove_cap(cap, false);
3223 		goto out_unlock;
3224 	}
3225 
3226 	/*
3227 	 * now we know we haven't received the cap import message yet
3228 	 * because the exported cap still exist.
3229 	 */
3230 
3231 	issued = cap->issued;
3232 	WARN_ON(issued != cap->implemented);
3233 
3234 	tcap = __get_cap_for_mds(ci, target);
3235 	if (tcap) {
3236 		/* already have caps from the target */
3237 		if (tcap->cap_id != t_cap_id ||
3238 		    ceph_seq_cmp(tcap->seq, t_seq) < 0) {
3239 			dout(" updating import cap %p mds%d\n", tcap, target);
3240 			tcap->cap_id = t_cap_id;
3241 			tcap->seq = t_seq - 1;
3242 			tcap->issue_seq = t_seq - 1;
3243 			tcap->mseq = t_mseq;
3244 			tcap->issued |= issued;
3245 			tcap->implemented |= issued;
3246 			if (cap == ci->i_auth_cap)
3247 				ci->i_auth_cap = tcap;
3248 			if (ci->i_flushing_caps && ci->i_auth_cap == tcap) {
3249 				spin_lock(&mdsc->cap_dirty_lock);
3250 				list_move_tail(&ci->i_flushing_item,
3251 					       &tcap->session->s_cap_flushing);
3252 				spin_unlock(&mdsc->cap_dirty_lock);
3253 			}
3254 		}
3255 		__ceph_remove_cap(cap, false);
3256 		goto out_unlock;
3257 	} else if (tsession) {
3258 		/* add placeholder for the export tagert */
3259 		int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
3260 		ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
3261 			     t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
3262 
3263 		__ceph_remove_cap(cap, false);
3264 		goto out_unlock;
3265 	}
3266 
3267 	spin_unlock(&ci->i_ceph_lock);
3268 	mutex_unlock(&session->s_mutex);
3269 
3270 	/* open target session */
3271 	tsession = ceph_mdsc_open_export_target_session(mdsc, target);
3272 	if (!IS_ERR(tsession)) {
3273 		if (mds > target) {
3274 			mutex_lock(&session->s_mutex);
3275 			mutex_lock_nested(&tsession->s_mutex,
3276 					  SINGLE_DEPTH_NESTING);
3277 		} else {
3278 			mutex_lock(&tsession->s_mutex);
3279 			mutex_lock_nested(&session->s_mutex,
3280 					  SINGLE_DEPTH_NESTING);
3281 		}
3282 		new_cap = ceph_get_cap(mdsc, NULL);
3283 	} else {
3284 		WARN_ON(1);
3285 		tsession = NULL;
3286 		target = -1;
3287 	}
3288 	goto retry;
3289 
3290 out_unlock:
3291 	spin_unlock(&ci->i_ceph_lock);
3292 	mutex_unlock(&session->s_mutex);
3293 	if (tsession) {
3294 		mutex_unlock(&tsession->s_mutex);
3295 		ceph_put_mds_session(tsession);
3296 	}
3297 	if (new_cap)
3298 		ceph_put_cap(mdsc, new_cap);
3299 }
3300 
3301 /*
3302  * Handle cap IMPORT.
3303  *
3304  * caller holds s_mutex. acquires i_ceph_lock
3305  */
3306 static void handle_cap_import(struct ceph_mds_client *mdsc,
3307 			      struct inode *inode, struct ceph_mds_caps *im,
3308 			      struct ceph_mds_cap_peer *ph,
3309 			      struct ceph_mds_session *session,
3310 			      struct ceph_cap **target_cap, int *old_issued)
3311 	__acquires(ci->i_ceph_lock)
3312 {
3313 	struct ceph_inode_info *ci = ceph_inode(inode);
3314 	struct ceph_cap *cap, *ocap, *new_cap = NULL;
3315 	int mds = session->s_mds;
3316 	int issued;
3317 	unsigned caps = le32_to_cpu(im->caps);
3318 	unsigned wanted = le32_to_cpu(im->wanted);
3319 	unsigned seq = le32_to_cpu(im->seq);
3320 	unsigned mseq = le32_to_cpu(im->migrate_seq);
3321 	u64 realmino = le64_to_cpu(im->realm);
3322 	u64 cap_id = le64_to_cpu(im->cap_id);
3323 	u64 p_cap_id;
3324 	int peer;
3325 
3326 	if (ph) {
3327 		p_cap_id = le64_to_cpu(ph->cap_id);
3328 		peer = le32_to_cpu(ph->mds);
3329 	} else {
3330 		p_cap_id = 0;
3331 		peer = -1;
3332 	}
3333 
3334 	dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
3335 	     inode, ci, mds, mseq, peer);
3336 
3337 retry:
3338 	spin_lock(&ci->i_ceph_lock);
3339 	cap = __get_cap_for_mds(ci, mds);
3340 	if (!cap) {
3341 		if (!new_cap) {
3342 			spin_unlock(&ci->i_ceph_lock);
3343 			new_cap = ceph_get_cap(mdsc, NULL);
3344 			goto retry;
3345 		}
3346 		cap = new_cap;
3347 	} else {
3348 		if (new_cap) {
3349 			ceph_put_cap(mdsc, new_cap);
3350 			new_cap = NULL;
3351 		}
3352 	}
3353 
3354 	__ceph_caps_issued(ci, &issued);
3355 	issued |= __ceph_caps_dirty(ci);
3356 
3357 	ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
3358 		     realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
3359 
3360 	ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
3361 	if (ocap && ocap->cap_id == p_cap_id) {
3362 		dout(" remove export cap %p mds%d flags %d\n",
3363 		     ocap, peer, ph->flags);
3364 		if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
3365 		    (ocap->seq != le32_to_cpu(ph->seq) ||
3366 		     ocap->mseq != le32_to_cpu(ph->mseq))) {
3367 			pr_err("handle_cap_import: mismatched seq/mseq: "
3368 			       "ino (%llx.%llx) mds%d seq %d mseq %d "
3369 			       "importer mds%d has peer seq %d mseq %d\n",
3370 			       ceph_vinop(inode), peer, ocap->seq,
3371 			       ocap->mseq, mds, le32_to_cpu(ph->seq),
3372 			       le32_to_cpu(ph->mseq));
3373 		}
3374 		__ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
3375 	}
3376 
3377 	/* make sure we re-request max_size, if necessary */
3378 	ci->i_wanted_max_size = 0;
3379 	ci->i_requested_max_size = 0;
3380 
3381 	*old_issued = issued;
3382 	*target_cap = cap;
3383 }
3384 
3385 /*
3386  * Handle a caps message from the MDS.
3387  *
3388  * Identify the appropriate session, inode, and call the right handler
3389  * based on the cap op.
3390  */
3391 void ceph_handle_caps(struct ceph_mds_session *session,
3392 		      struct ceph_msg *msg)
3393 {
3394 	struct ceph_mds_client *mdsc = session->s_mdsc;
3395 	struct super_block *sb = mdsc->fsc->sb;
3396 	struct inode *inode;
3397 	struct ceph_inode_info *ci;
3398 	struct ceph_cap *cap;
3399 	struct ceph_mds_caps *h;
3400 	struct ceph_mds_cap_peer *peer = NULL;
3401 	struct ceph_snap_realm *realm;
3402 	int mds = session->s_mds;
3403 	int op, issued;
3404 	u32 seq, mseq;
3405 	struct ceph_vino vino;
3406 	u64 cap_id;
3407 	u64 size, max_size;
3408 	u64 tid;
3409 	u64 inline_version = 0;
3410 	void *inline_data = NULL;
3411 	u32  inline_len = 0;
3412 	void *snaptrace;
3413 	size_t snaptrace_len;
3414 	void *p, *end;
3415 
3416 	dout("handle_caps from mds%d\n", mds);
3417 
3418 	/* decode */
3419 	end = msg->front.iov_base + msg->front.iov_len;
3420 	tid = le64_to_cpu(msg->hdr.tid);
3421 	if (msg->front.iov_len < sizeof(*h))
3422 		goto bad;
3423 	h = msg->front.iov_base;
3424 	op = le32_to_cpu(h->op);
3425 	vino.ino = le64_to_cpu(h->ino);
3426 	vino.snap = CEPH_NOSNAP;
3427 	cap_id = le64_to_cpu(h->cap_id);
3428 	seq = le32_to_cpu(h->seq);
3429 	mseq = le32_to_cpu(h->migrate_seq);
3430 	size = le64_to_cpu(h->size);
3431 	max_size = le64_to_cpu(h->max_size);
3432 
3433 	snaptrace = h + 1;
3434 	snaptrace_len = le32_to_cpu(h->snap_trace_len);
3435 	p = snaptrace + snaptrace_len;
3436 
3437 	if (le16_to_cpu(msg->hdr.version) >= 2) {
3438 		u32 flock_len;
3439 		ceph_decode_32_safe(&p, end, flock_len, bad);
3440 		if (p + flock_len > end)
3441 			goto bad;
3442 		p += flock_len;
3443 	}
3444 
3445 	if (le16_to_cpu(msg->hdr.version) >= 3) {
3446 		if (op == CEPH_CAP_OP_IMPORT) {
3447 			if (p + sizeof(*peer) > end)
3448 				goto bad;
3449 			peer = p;
3450 			p += sizeof(*peer);
3451 		} else if (op == CEPH_CAP_OP_EXPORT) {
3452 			/* recorded in unused fields */
3453 			peer = (void *)&h->size;
3454 		}
3455 	}
3456 
3457 	if (le16_to_cpu(msg->hdr.version) >= 4) {
3458 		ceph_decode_64_safe(&p, end, inline_version, bad);
3459 		ceph_decode_32_safe(&p, end, inline_len, bad);
3460 		if (p + inline_len > end)
3461 			goto bad;
3462 		inline_data = p;
3463 		p += inline_len;
3464 	}
3465 
3466 	/* lookup ino */
3467 	inode = ceph_find_inode(sb, vino);
3468 	ci = ceph_inode(inode);
3469 	dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
3470 	     vino.snap, inode);
3471 
3472 	mutex_lock(&session->s_mutex);
3473 	session->s_seq++;
3474 	dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
3475 	     (unsigned)seq);
3476 
3477 	if (!inode) {
3478 		dout(" i don't have ino %llx\n", vino.ino);
3479 
3480 		if (op == CEPH_CAP_OP_IMPORT) {
3481 			cap = ceph_get_cap(mdsc, NULL);
3482 			cap->cap_ino = vino.ino;
3483 			cap->queue_release = 1;
3484 			cap->cap_id = cap_id;
3485 			cap->mseq = mseq;
3486 			cap->seq = seq;
3487 			spin_lock(&session->s_cap_lock);
3488 			list_add_tail(&cap->session_caps,
3489 					&session->s_cap_releases);
3490 			session->s_num_cap_releases++;
3491 			spin_unlock(&session->s_cap_lock);
3492 		}
3493 		goto flush_cap_releases;
3494 	}
3495 
3496 	/* these will work even if we don't have a cap yet */
3497 	switch (op) {
3498 	case CEPH_CAP_OP_FLUSHSNAP_ACK:
3499 		handle_cap_flushsnap_ack(inode, tid, h, session);
3500 		goto done;
3501 
3502 	case CEPH_CAP_OP_EXPORT:
3503 		handle_cap_export(inode, h, peer, session);
3504 		goto done_unlocked;
3505 
3506 	case CEPH_CAP_OP_IMPORT:
3507 		realm = NULL;
3508 		if (snaptrace_len) {
3509 			down_write(&mdsc->snap_rwsem);
3510 			ceph_update_snap_trace(mdsc, snaptrace,
3511 					       snaptrace + snaptrace_len,
3512 					       false, &realm);
3513 			downgrade_write(&mdsc->snap_rwsem);
3514 		} else {
3515 			down_read(&mdsc->snap_rwsem);
3516 		}
3517 		handle_cap_import(mdsc, inode, h, peer, session,
3518 				  &cap, &issued);
3519 		handle_cap_grant(mdsc, inode, h,
3520 				 inline_version, inline_data, inline_len,
3521 				 msg->middle, session, cap, issued);
3522 		if (realm)
3523 			ceph_put_snap_realm(mdsc, realm);
3524 		goto done_unlocked;
3525 	}
3526 
3527 	/* the rest require a cap */
3528 	spin_lock(&ci->i_ceph_lock);
3529 	cap = __get_cap_for_mds(ceph_inode(inode), mds);
3530 	if (!cap) {
3531 		dout(" no cap on %p ino %llx.%llx from mds%d\n",
3532 		     inode, ceph_ino(inode), ceph_snap(inode), mds);
3533 		spin_unlock(&ci->i_ceph_lock);
3534 		goto flush_cap_releases;
3535 	}
3536 
3537 	/* note that each of these drops i_ceph_lock for us */
3538 	switch (op) {
3539 	case CEPH_CAP_OP_REVOKE:
3540 	case CEPH_CAP_OP_GRANT:
3541 		__ceph_caps_issued(ci, &issued);
3542 		issued |= __ceph_caps_dirty(ci);
3543 		handle_cap_grant(mdsc, inode, h,
3544 				 inline_version, inline_data, inline_len,
3545 				 msg->middle, session, cap, issued);
3546 		goto done_unlocked;
3547 
3548 	case CEPH_CAP_OP_FLUSH_ACK:
3549 		handle_cap_flush_ack(inode, tid, h, session, cap);
3550 		break;
3551 
3552 	case CEPH_CAP_OP_TRUNC:
3553 		handle_cap_trunc(inode, h, session);
3554 		break;
3555 
3556 	default:
3557 		spin_unlock(&ci->i_ceph_lock);
3558 		pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
3559 		       ceph_cap_op_name(op));
3560 	}
3561 
3562 	goto done;
3563 
3564 flush_cap_releases:
3565 	/*
3566 	 * send any cap release message to try to move things
3567 	 * along for the mds (who clearly thinks we still have this
3568 	 * cap).
3569 	 */
3570 	ceph_send_cap_releases(mdsc, session);
3571 
3572 done:
3573 	mutex_unlock(&session->s_mutex);
3574 done_unlocked:
3575 	iput(inode);
3576 	return;
3577 
3578 bad:
3579 	pr_err("ceph_handle_caps: corrupt message\n");
3580 	ceph_msg_dump(msg);
3581 	return;
3582 }
3583 
3584 /*
3585  * Delayed work handler to process end of delayed cap release LRU list.
3586  */
3587 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
3588 {
3589 	struct ceph_inode_info *ci;
3590 	int flags = CHECK_CAPS_NODELAY;
3591 
3592 	dout("check_delayed_caps\n");
3593 	while (1) {
3594 		spin_lock(&mdsc->cap_delay_lock);
3595 		if (list_empty(&mdsc->cap_delay_list))
3596 			break;
3597 		ci = list_first_entry(&mdsc->cap_delay_list,
3598 				      struct ceph_inode_info,
3599 				      i_cap_delay_list);
3600 		if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3601 		    time_before(jiffies, ci->i_hold_caps_max))
3602 			break;
3603 		list_del_init(&ci->i_cap_delay_list);
3604 		spin_unlock(&mdsc->cap_delay_lock);
3605 		dout("check_delayed_caps on %p\n", &ci->vfs_inode);
3606 		ceph_check_caps(ci, flags, NULL);
3607 	}
3608 	spin_unlock(&mdsc->cap_delay_lock);
3609 }
3610 
3611 /*
3612  * Flush all dirty caps to the mds
3613  */
3614 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3615 {
3616 	struct ceph_inode_info *ci;
3617 	struct inode *inode;
3618 
3619 	dout("flush_dirty_caps\n");
3620 	spin_lock(&mdsc->cap_dirty_lock);
3621 	while (!list_empty(&mdsc->cap_dirty)) {
3622 		ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3623 				      i_dirty_item);
3624 		inode = &ci->vfs_inode;
3625 		ihold(inode);
3626 		dout("flush_dirty_caps %p\n", inode);
3627 		spin_unlock(&mdsc->cap_dirty_lock);
3628 		ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3629 		iput(inode);
3630 		spin_lock(&mdsc->cap_dirty_lock);
3631 	}
3632 	spin_unlock(&mdsc->cap_dirty_lock);
3633 	dout("flush_dirty_caps done\n");
3634 }
3635 
3636 /*
3637  * Drop open file reference.  If we were the last open file,
3638  * we may need to release capabilities to the MDS (or schedule
3639  * their delayed release).
3640  */
3641 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3642 {
3643 	struct inode *inode = &ci->vfs_inode;
3644 	int last = 0;
3645 
3646 	spin_lock(&ci->i_ceph_lock);
3647 	dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
3648 	     ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
3649 	BUG_ON(ci->i_nr_by_mode[fmode] == 0);
3650 	if (--ci->i_nr_by_mode[fmode] == 0)
3651 		last++;
3652 	spin_unlock(&ci->i_ceph_lock);
3653 
3654 	if (last && ci->i_vino.snap == CEPH_NOSNAP)
3655 		ceph_check_caps(ci, 0, NULL);
3656 }
3657 
3658 /*
3659  * Helpers for embedding cap and dentry lease releases into mds
3660  * requests.
3661  *
3662  * @force is used by dentry_release (below) to force inclusion of a
3663  * record for the directory inode, even when there aren't any caps to
3664  * drop.
3665  */
3666 int ceph_encode_inode_release(void **p, struct inode *inode,
3667 			      int mds, int drop, int unless, int force)
3668 {
3669 	struct ceph_inode_info *ci = ceph_inode(inode);
3670 	struct ceph_cap *cap;
3671 	struct ceph_mds_request_release *rel = *p;
3672 	int used, dirty;
3673 	int ret = 0;
3674 
3675 	spin_lock(&ci->i_ceph_lock);
3676 	used = __ceph_caps_used(ci);
3677 	dirty = __ceph_caps_dirty(ci);
3678 
3679 	dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3680 	     inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3681 	     ceph_cap_string(unless));
3682 
3683 	/* only drop unused, clean caps */
3684 	drop &= ~(used | dirty);
3685 
3686 	cap = __get_cap_for_mds(ci, mds);
3687 	if (cap && __cap_is_valid(cap)) {
3688 		if (force ||
3689 		    ((cap->issued & drop) &&
3690 		     (cap->issued & unless) == 0)) {
3691 			if ((cap->issued & drop) &&
3692 			    (cap->issued & unless) == 0) {
3693 				int wanted = __ceph_caps_wanted(ci);
3694 				if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3695 					wanted |= cap->mds_wanted;
3696 				dout("encode_inode_release %p cap %p "
3697 				     "%s -> %s, wanted %s -> %s\n", inode, cap,
3698 				     ceph_cap_string(cap->issued),
3699 				     ceph_cap_string(cap->issued & ~drop),
3700 				     ceph_cap_string(cap->mds_wanted),
3701 				     ceph_cap_string(wanted));
3702 
3703 				cap->issued &= ~drop;
3704 				cap->implemented &= ~drop;
3705 				cap->mds_wanted = wanted;
3706 			} else {
3707 				dout("encode_inode_release %p cap %p %s"
3708 				     " (force)\n", inode, cap,
3709 				     ceph_cap_string(cap->issued));
3710 			}
3711 
3712 			rel->ino = cpu_to_le64(ceph_ino(inode));
3713 			rel->cap_id = cpu_to_le64(cap->cap_id);
3714 			rel->seq = cpu_to_le32(cap->seq);
3715 			rel->issue_seq = cpu_to_le32(cap->issue_seq);
3716 			rel->mseq = cpu_to_le32(cap->mseq);
3717 			rel->caps = cpu_to_le32(cap->implemented);
3718 			rel->wanted = cpu_to_le32(cap->mds_wanted);
3719 			rel->dname_len = 0;
3720 			rel->dname_seq = 0;
3721 			*p += sizeof(*rel);
3722 			ret = 1;
3723 		} else {
3724 			dout("encode_inode_release %p cap %p %s\n",
3725 			     inode, cap, ceph_cap_string(cap->issued));
3726 		}
3727 	}
3728 	spin_unlock(&ci->i_ceph_lock);
3729 	return ret;
3730 }
3731 
3732 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3733 			       int mds, int drop, int unless)
3734 {
3735 	struct inode *dir = d_inode(dentry->d_parent);
3736 	struct ceph_mds_request_release *rel = *p;
3737 	struct ceph_dentry_info *di = ceph_dentry(dentry);
3738 	int force = 0;
3739 	int ret;
3740 
3741 	/*
3742 	 * force an record for the directory caps if we have a dentry lease.
3743 	 * this is racy (can't take i_ceph_lock and d_lock together), but it
3744 	 * doesn't have to be perfect; the mds will revoke anything we don't
3745 	 * release.
3746 	 */
3747 	spin_lock(&dentry->d_lock);
3748 	if (di->lease_session && di->lease_session->s_mds == mds)
3749 		force = 1;
3750 	spin_unlock(&dentry->d_lock);
3751 
3752 	ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3753 
3754 	spin_lock(&dentry->d_lock);
3755 	if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3756 		dout("encode_dentry_release %p mds%d seq %d\n",
3757 		     dentry, mds, (int)di->lease_seq);
3758 		rel->dname_len = cpu_to_le32(dentry->d_name.len);
3759 		memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3760 		*p += dentry->d_name.len;
3761 		rel->dname_seq = cpu_to_le32(di->lease_seq);
3762 		__ceph_mdsc_drop_dentry_lease(dentry);
3763 	}
3764 	spin_unlock(&dentry->d_lock);
3765 	return ret;
3766 }
3767