xref: /openbmc/linux/fs/ceph/inode.c (revision 405db98b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 #include <linux/ceph/decode.h>
22 
23 /*
24  * Ceph inode operations
25  *
26  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
27  * setattr, etc.), xattr helpers, and helpers for assimilating
28  * metadata returned by the MDS into our cache.
29  *
30  * Also define helpers for doing asynchronous writeback, invalidation,
31  * and truncation for the benefit of those who can't afford to block
32  * (typically because they are in the message handler path).
33  */
34 
35 static const struct inode_operations ceph_symlink_iops;
36 
37 static void ceph_inode_work(struct work_struct *work);
38 
39 /*
40  * find or create an inode, given the ceph ino number
41  */
42 static int ceph_set_ino_cb(struct inode *inode, void *data)
43 {
44 	struct ceph_inode_info *ci = ceph_inode(inode);
45 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
46 
47 	ci->i_vino = *(struct ceph_vino *)data;
48 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
49 	inode_set_iversion_raw(inode, 0);
50 	percpu_counter_inc(&mdsc->metric.total_inodes);
51 
52 	return 0;
53 }
54 
55 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
56 {
57 	struct inode *inode;
58 
59 	if (ceph_vino_is_reserved(vino))
60 		return ERR_PTR(-EREMOTEIO);
61 
62 	inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
63 			     ceph_set_ino_cb, &vino);
64 	if (!inode)
65 		return ERR_PTR(-ENOMEM);
66 
67 	dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
68 	     ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
69 	return inode;
70 }
71 
72 /*
73  * get/constuct snapdir inode for a given directory
74  */
75 struct inode *ceph_get_snapdir(struct inode *parent)
76 {
77 	struct ceph_vino vino = {
78 		.ino = ceph_ino(parent),
79 		.snap = CEPH_SNAPDIR,
80 	};
81 	struct inode *inode = ceph_get_inode(parent->i_sb, vino);
82 	struct ceph_inode_info *ci = ceph_inode(inode);
83 
84 	if (IS_ERR(inode))
85 		return inode;
86 
87 	if (!S_ISDIR(parent->i_mode)) {
88 		pr_warn_once("bad snapdir parent type (mode=0%o)\n",
89 			     parent->i_mode);
90 		return ERR_PTR(-ENOTDIR);
91 	}
92 
93 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
94 		pr_warn_once("bad snapdir inode type (mode=0%o)\n",
95 			     inode->i_mode);
96 		return ERR_PTR(-ENOTDIR);
97 	}
98 
99 	inode->i_mode = parent->i_mode;
100 	inode->i_uid = parent->i_uid;
101 	inode->i_gid = parent->i_gid;
102 	inode->i_mtime = parent->i_mtime;
103 	inode->i_ctime = parent->i_ctime;
104 	inode->i_atime = parent->i_atime;
105 	ci->i_rbytes = 0;
106 	ci->i_btime = ceph_inode(parent)->i_btime;
107 
108 	if (inode->i_state & I_NEW) {
109 		inode->i_op = &ceph_snapdir_iops;
110 		inode->i_fop = &ceph_snapdir_fops;
111 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
112 		unlock_new_inode(inode);
113 	}
114 
115 	return inode;
116 }
117 
118 const struct inode_operations ceph_file_iops = {
119 	.permission = ceph_permission,
120 	.setattr = ceph_setattr,
121 	.getattr = ceph_getattr,
122 	.listxattr = ceph_listxattr,
123 	.get_acl = ceph_get_acl,
124 	.set_acl = ceph_set_acl,
125 };
126 
127 
128 /*
129  * We use a 'frag tree' to keep track of the MDS's directory fragments
130  * for a given inode (usually there is just a single fragment).  We
131  * need to know when a child frag is delegated to a new MDS, or when
132  * it is flagged as replicated, so we can direct our requests
133  * accordingly.
134  */
135 
136 /*
137  * find/create a frag in the tree
138  */
139 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
140 						    u32 f)
141 {
142 	struct rb_node **p;
143 	struct rb_node *parent = NULL;
144 	struct ceph_inode_frag *frag;
145 	int c;
146 
147 	p = &ci->i_fragtree.rb_node;
148 	while (*p) {
149 		parent = *p;
150 		frag = rb_entry(parent, struct ceph_inode_frag, node);
151 		c = ceph_frag_compare(f, frag->frag);
152 		if (c < 0)
153 			p = &(*p)->rb_left;
154 		else if (c > 0)
155 			p = &(*p)->rb_right;
156 		else
157 			return frag;
158 	}
159 
160 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
161 	if (!frag)
162 		return ERR_PTR(-ENOMEM);
163 
164 	frag->frag = f;
165 	frag->split_by = 0;
166 	frag->mds = -1;
167 	frag->ndist = 0;
168 
169 	rb_link_node(&frag->node, parent, p);
170 	rb_insert_color(&frag->node, &ci->i_fragtree);
171 
172 	dout("get_or_create_frag added %llx.%llx frag %x\n",
173 	     ceph_vinop(&ci->vfs_inode), f);
174 	return frag;
175 }
176 
177 /*
178  * find a specific frag @f
179  */
180 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
181 {
182 	struct rb_node *n = ci->i_fragtree.rb_node;
183 
184 	while (n) {
185 		struct ceph_inode_frag *frag =
186 			rb_entry(n, struct ceph_inode_frag, node);
187 		int c = ceph_frag_compare(f, frag->frag);
188 		if (c < 0)
189 			n = n->rb_left;
190 		else if (c > 0)
191 			n = n->rb_right;
192 		else
193 			return frag;
194 	}
195 	return NULL;
196 }
197 
198 /*
199  * Choose frag containing the given value @v.  If @pfrag is
200  * specified, copy the frag delegation info to the caller if
201  * it is present.
202  */
203 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
204 			      struct ceph_inode_frag *pfrag, int *found)
205 {
206 	u32 t = ceph_frag_make(0, 0);
207 	struct ceph_inode_frag *frag;
208 	unsigned nway, i;
209 	u32 n;
210 
211 	if (found)
212 		*found = 0;
213 
214 	while (1) {
215 		WARN_ON(!ceph_frag_contains_value(t, v));
216 		frag = __ceph_find_frag(ci, t);
217 		if (!frag)
218 			break; /* t is a leaf */
219 		if (frag->split_by == 0) {
220 			if (pfrag)
221 				memcpy(pfrag, frag, sizeof(*pfrag));
222 			if (found)
223 				*found = 1;
224 			break;
225 		}
226 
227 		/* choose child */
228 		nway = 1 << frag->split_by;
229 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
230 		     frag->split_by, nway);
231 		for (i = 0; i < nway; i++) {
232 			n = ceph_frag_make_child(t, frag->split_by, i);
233 			if (ceph_frag_contains_value(n, v)) {
234 				t = n;
235 				break;
236 			}
237 		}
238 		BUG_ON(i == nway);
239 	}
240 	dout("choose_frag(%x) = %x\n", v, t);
241 
242 	return t;
243 }
244 
245 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
246 		     struct ceph_inode_frag *pfrag, int *found)
247 {
248 	u32 ret;
249 	mutex_lock(&ci->i_fragtree_mutex);
250 	ret = __ceph_choose_frag(ci, v, pfrag, found);
251 	mutex_unlock(&ci->i_fragtree_mutex);
252 	return ret;
253 }
254 
255 /*
256  * Process dirfrag (delegation) info from the mds.  Include leaf
257  * fragment in tree ONLY if ndist > 0.  Otherwise, only
258  * branches/splits are included in i_fragtree)
259  */
260 static int ceph_fill_dirfrag(struct inode *inode,
261 			     struct ceph_mds_reply_dirfrag *dirinfo)
262 {
263 	struct ceph_inode_info *ci = ceph_inode(inode);
264 	struct ceph_inode_frag *frag;
265 	u32 id = le32_to_cpu(dirinfo->frag);
266 	int mds = le32_to_cpu(dirinfo->auth);
267 	int ndist = le32_to_cpu(dirinfo->ndist);
268 	int diri_auth = -1;
269 	int i;
270 	int err = 0;
271 
272 	spin_lock(&ci->i_ceph_lock);
273 	if (ci->i_auth_cap)
274 		diri_auth = ci->i_auth_cap->mds;
275 	spin_unlock(&ci->i_ceph_lock);
276 
277 	if (mds == -1) /* CDIR_AUTH_PARENT */
278 		mds = diri_auth;
279 
280 	mutex_lock(&ci->i_fragtree_mutex);
281 	if (ndist == 0 && mds == diri_auth) {
282 		/* no delegation info needed. */
283 		frag = __ceph_find_frag(ci, id);
284 		if (!frag)
285 			goto out;
286 		if (frag->split_by == 0) {
287 			/* tree leaf, remove */
288 			dout("fill_dirfrag removed %llx.%llx frag %x"
289 			     " (no ref)\n", ceph_vinop(inode), id);
290 			rb_erase(&frag->node, &ci->i_fragtree);
291 			kfree(frag);
292 		} else {
293 			/* tree branch, keep and clear */
294 			dout("fill_dirfrag cleared %llx.%llx frag %x"
295 			     " referral\n", ceph_vinop(inode), id);
296 			frag->mds = -1;
297 			frag->ndist = 0;
298 		}
299 		goto out;
300 	}
301 
302 
303 	/* find/add this frag to store mds delegation info */
304 	frag = __get_or_create_frag(ci, id);
305 	if (IS_ERR(frag)) {
306 		/* this is not the end of the world; we can continue
307 		   with bad/inaccurate delegation info */
308 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
309 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
310 		err = -ENOMEM;
311 		goto out;
312 	}
313 
314 	frag->mds = mds;
315 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
316 	for (i = 0; i < frag->ndist; i++)
317 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
318 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
319 	     ceph_vinop(inode), frag->frag, frag->ndist);
320 
321 out:
322 	mutex_unlock(&ci->i_fragtree_mutex);
323 	return err;
324 }
325 
326 static int frag_tree_split_cmp(const void *l, const void *r)
327 {
328 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
329 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
330 	return ceph_frag_compare(le32_to_cpu(ls->frag),
331 				 le32_to_cpu(rs->frag));
332 }
333 
334 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
335 {
336 	if (!frag)
337 		return f == ceph_frag_make(0, 0);
338 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
339 		return false;
340 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
341 }
342 
343 static int ceph_fill_fragtree(struct inode *inode,
344 			      struct ceph_frag_tree_head *fragtree,
345 			      struct ceph_mds_reply_dirfrag *dirinfo)
346 {
347 	struct ceph_inode_info *ci = ceph_inode(inode);
348 	struct ceph_inode_frag *frag, *prev_frag = NULL;
349 	struct rb_node *rb_node;
350 	unsigned i, split_by, nsplits;
351 	u32 id;
352 	bool update = false;
353 
354 	mutex_lock(&ci->i_fragtree_mutex);
355 	nsplits = le32_to_cpu(fragtree->nsplits);
356 	if (nsplits != ci->i_fragtree_nsplits) {
357 		update = true;
358 	} else if (nsplits) {
359 		i = prandom_u32() % nsplits;
360 		id = le32_to_cpu(fragtree->splits[i].frag);
361 		if (!__ceph_find_frag(ci, id))
362 			update = true;
363 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
364 		rb_node = rb_first(&ci->i_fragtree);
365 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
366 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
367 			update = true;
368 	}
369 	if (!update && dirinfo) {
370 		id = le32_to_cpu(dirinfo->frag);
371 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
372 			update = true;
373 	}
374 	if (!update)
375 		goto out_unlock;
376 
377 	if (nsplits > 1) {
378 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
379 		     frag_tree_split_cmp, NULL);
380 	}
381 
382 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
383 	rb_node = rb_first(&ci->i_fragtree);
384 	for (i = 0; i < nsplits; i++) {
385 		id = le32_to_cpu(fragtree->splits[i].frag);
386 		split_by = le32_to_cpu(fragtree->splits[i].by);
387 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
388 			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
389 			       "frag %x split by %d\n", ceph_vinop(inode),
390 			       i, nsplits, id, split_by);
391 			continue;
392 		}
393 		frag = NULL;
394 		while (rb_node) {
395 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
396 			if (ceph_frag_compare(frag->frag, id) >= 0) {
397 				if (frag->frag != id)
398 					frag = NULL;
399 				else
400 					rb_node = rb_next(rb_node);
401 				break;
402 			}
403 			rb_node = rb_next(rb_node);
404 			/* delete stale split/leaf node */
405 			if (frag->split_by > 0 ||
406 			    !is_frag_child(frag->frag, prev_frag)) {
407 				rb_erase(&frag->node, &ci->i_fragtree);
408 				if (frag->split_by > 0)
409 					ci->i_fragtree_nsplits--;
410 				kfree(frag);
411 			}
412 			frag = NULL;
413 		}
414 		if (!frag) {
415 			frag = __get_or_create_frag(ci, id);
416 			if (IS_ERR(frag))
417 				continue;
418 		}
419 		if (frag->split_by == 0)
420 			ci->i_fragtree_nsplits++;
421 		frag->split_by = split_by;
422 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
423 		prev_frag = frag;
424 	}
425 	while (rb_node) {
426 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
427 		rb_node = rb_next(rb_node);
428 		/* delete stale split/leaf node */
429 		if (frag->split_by > 0 ||
430 		    !is_frag_child(frag->frag, prev_frag)) {
431 			rb_erase(&frag->node, &ci->i_fragtree);
432 			if (frag->split_by > 0)
433 				ci->i_fragtree_nsplits--;
434 			kfree(frag);
435 		}
436 	}
437 out_unlock:
438 	mutex_unlock(&ci->i_fragtree_mutex);
439 	return 0;
440 }
441 
442 /*
443  * initialize a newly allocated inode.
444  */
445 struct inode *ceph_alloc_inode(struct super_block *sb)
446 {
447 	struct ceph_inode_info *ci;
448 	int i;
449 
450 	ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
451 	if (!ci)
452 		return NULL;
453 
454 	dout("alloc_inode %p\n", &ci->vfs_inode);
455 
456 	spin_lock_init(&ci->i_ceph_lock);
457 
458 	ci->i_version = 0;
459 	ci->i_inline_version = 0;
460 	ci->i_time_warp_seq = 0;
461 	ci->i_ceph_flags = 0;
462 	atomic64_set(&ci->i_ordered_count, 1);
463 	atomic64_set(&ci->i_release_count, 1);
464 	atomic64_set(&ci->i_complete_seq[0], 0);
465 	atomic64_set(&ci->i_complete_seq[1], 0);
466 	ci->i_symlink = NULL;
467 
468 	ci->i_max_bytes = 0;
469 	ci->i_max_files = 0;
470 
471 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
472 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
473 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
474 
475 	ci->i_fragtree = RB_ROOT;
476 	mutex_init(&ci->i_fragtree_mutex);
477 
478 	ci->i_xattrs.blob = NULL;
479 	ci->i_xattrs.prealloc_blob = NULL;
480 	ci->i_xattrs.dirty = false;
481 	ci->i_xattrs.index = RB_ROOT;
482 	ci->i_xattrs.count = 0;
483 	ci->i_xattrs.names_size = 0;
484 	ci->i_xattrs.vals_size = 0;
485 	ci->i_xattrs.version = 0;
486 	ci->i_xattrs.index_version = 0;
487 
488 	ci->i_caps = RB_ROOT;
489 	ci->i_auth_cap = NULL;
490 	ci->i_dirty_caps = 0;
491 	ci->i_flushing_caps = 0;
492 	INIT_LIST_HEAD(&ci->i_dirty_item);
493 	INIT_LIST_HEAD(&ci->i_flushing_item);
494 	ci->i_prealloc_cap_flush = NULL;
495 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
496 	init_waitqueue_head(&ci->i_cap_wq);
497 	ci->i_hold_caps_max = 0;
498 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
499 	INIT_LIST_HEAD(&ci->i_cap_snaps);
500 	ci->i_head_snapc = NULL;
501 	ci->i_snap_caps = 0;
502 
503 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
504 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
505 		ci->i_nr_by_mode[i] = 0;
506 
507 	mutex_init(&ci->i_truncate_mutex);
508 	ci->i_truncate_seq = 0;
509 	ci->i_truncate_size = 0;
510 	ci->i_truncate_pending = 0;
511 
512 	ci->i_max_size = 0;
513 	ci->i_reported_size = 0;
514 	ci->i_wanted_max_size = 0;
515 	ci->i_requested_max_size = 0;
516 
517 	ci->i_pin_ref = 0;
518 	ci->i_rd_ref = 0;
519 	ci->i_rdcache_ref = 0;
520 	ci->i_wr_ref = 0;
521 	ci->i_wb_ref = 0;
522 	ci->i_fx_ref = 0;
523 	ci->i_wrbuffer_ref = 0;
524 	ci->i_wrbuffer_ref_head = 0;
525 	atomic_set(&ci->i_filelock_ref, 0);
526 	atomic_set(&ci->i_shared_gen, 1);
527 	ci->i_rdcache_gen = 0;
528 	ci->i_rdcache_revoking = 0;
529 
530 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
531 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
532 	spin_lock_init(&ci->i_unsafe_lock);
533 
534 	ci->i_snap_realm = NULL;
535 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
536 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
537 
538 	INIT_WORK(&ci->i_work, ceph_inode_work);
539 	ci->i_work_mask = 0;
540 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
541 
542 	ceph_fscache_inode_init(ci);
543 
544 	return &ci->vfs_inode;
545 }
546 
547 void ceph_free_inode(struct inode *inode)
548 {
549 	struct ceph_inode_info *ci = ceph_inode(inode);
550 
551 	kfree(ci->i_symlink);
552 	kmem_cache_free(ceph_inode_cachep, ci);
553 }
554 
555 void ceph_evict_inode(struct inode *inode)
556 {
557 	struct ceph_inode_info *ci = ceph_inode(inode);
558 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
559 	struct ceph_inode_frag *frag;
560 	struct rb_node *n;
561 
562 	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
563 
564 	percpu_counter_dec(&mdsc->metric.total_inodes);
565 
566 	truncate_inode_pages_final(&inode->i_data);
567 	clear_inode(inode);
568 
569 	ceph_fscache_unregister_inode_cookie(ci);
570 
571 	__ceph_remove_caps(ci);
572 
573 	if (__ceph_has_any_quota(ci))
574 		ceph_adjust_quota_realms_count(inode, false);
575 
576 	/*
577 	 * we may still have a snap_realm reference if there are stray
578 	 * caps in i_snap_caps.
579 	 */
580 	if (ci->i_snap_realm) {
581 		if (ceph_snap(inode) == CEPH_NOSNAP) {
582 			dout(" dropping residual ref to snap realm %p\n",
583 			     ci->i_snap_realm);
584 			ceph_change_snap_realm(inode, NULL);
585 		} else {
586 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
587 			ci->i_snap_realm = NULL;
588 		}
589 	}
590 
591 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
592 		frag = rb_entry(n, struct ceph_inode_frag, node);
593 		rb_erase(n, &ci->i_fragtree);
594 		kfree(frag);
595 	}
596 	ci->i_fragtree_nsplits = 0;
597 
598 	__ceph_destroy_xattrs(ci);
599 	if (ci->i_xattrs.blob)
600 		ceph_buffer_put(ci->i_xattrs.blob);
601 	if (ci->i_xattrs.prealloc_blob)
602 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
603 
604 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
605 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
606 }
607 
608 static inline blkcnt_t calc_inode_blocks(u64 size)
609 {
610 	return (size + (1<<9) - 1) >> 9;
611 }
612 
613 /*
614  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
615  * careful because either the client or MDS may have more up to date
616  * info, depending on which capabilities are held, and whether
617  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
618  * and size are monotonically increasing, except when utimes() or
619  * truncate() increments the corresponding _seq values.)
620  */
621 int ceph_fill_file_size(struct inode *inode, int issued,
622 			u32 truncate_seq, u64 truncate_size, u64 size)
623 {
624 	struct ceph_inode_info *ci = ceph_inode(inode);
625 	int queue_trunc = 0;
626 	loff_t isize = i_size_read(inode);
627 
628 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
629 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
630 		dout("size %lld -> %llu\n", isize, size);
631 		if (size > 0 && S_ISDIR(inode->i_mode)) {
632 			pr_err("fill_file_size non-zero size for directory\n");
633 			size = 0;
634 		}
635 		i_size_write(inode, size);
636 		inode->i_blocks = calc_inode_blocks(size);
637 		ci->i_reported_size = size;
638 		if (truncate_seq != ci->i_truncate_seq) {
639 			dout("truncate_seq %u -> %u\n",
640 			     ci->i_truncate_seq, truncate_seq);
641 			ci->i_truncate_seq = truncate_seq;
642 
643 			/* the MDS should have revoked these caps */
644 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
645 					       CEPH_CAP_FILE_RD |
646 					       CEPH_CAP_FILE_WR |
647 					       CEPH_CAP_FILE_LAZYIO));
648 			/*
649 			 * If we hold relevant caps, or in the case where we're
650 			 * not the only client referencing this file and we
651 			 * don't hold those caps, then we need to check whether
652 			 * the file is either opened or mmaped
653 			 */
654 			if ((issued & (CEPH_CAP_FILE_CACHE|
655 				       CEPH_CAP_FILE_BUFFER)) ||
656 			    mapping_mapped(inode->i_mapping) ||
657 			    __ceph_is_file_opened(ci)) {
658 				ci->i_truncate_pending++;
659 				queue_trunc = 1;
660 			}
661 		}
662 	}
663 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
664 	    ci->i_truncate_size != truncate_size) {
665 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
666 		     truncate_size);
667 		ci->i_truncate_size = truncate_size;
668 	}
669 
670 	if (queue_trunc)
671 		ceph_fscache_invalidate(inode);
672 
673 	return queue_trunc;
674 }
675 
676 void ceph_fill_file_time(struct inode *inode, int issued,
677 			 u64 time_warp_seq, struct timespec64 *ctime,
678 			 struct timespec64 *mtime, struct timespec64 *atime)
679 {
680 	struct ceph_inode_info *ci = ceph_inode(inode);
681 	int warn = 0;
682 
683 	if (issued & (CEPH_CAP_FILE_EXCL|
684 		      CEPH_CAP_FILE_WR|
685 		      CEPH_CAP_FILE_BUFFER|
686 		      CEPH_CAP_AUTH_EXCL|
687 		      CEPH_CAP_XATTR_EXCL)) {
688 		if (ci->i_version == 0 ||
689 		    timespec64_compare(ctime, &inode->i_ctime) > 0) {
690 			dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
691 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
692 			     ctime->tv_sec, ctime->tv_nsec);
693 			inode->i_ctime = *ctime;
694 		}
695 		if (ci->i_version == 0 ||
696 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
697 			/* the MDS did a utimes() */
698 			dout("mtime %lld.%09ld -> %lld.%09ld "
699 			     "tw %d -> %d\n",
700 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
701 			     mtime->tv_sec, mtime->tv_nsec,
702 			     ci->i_time_warp_seq, (int)time_warp_seq);
703 
704 			inode->i_mtime = *mtime;
705 			inode->i_atime = *atime;
706 			ci->i_time_warp_seq = time_warp_seq;
707 		} else if (time_warp_seq == ci->i_time_warp_seq) {
708 			/* nobody did utimes(); take the max */
709 			if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
710 				dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
711 				     inode->i_mtime.tv_sec,
712 				     inode->i_mtime.tv_nsec,
713 				     mtime->tv_sec, mtime->tv_nsec);
714 				inode->i_mtime = *mtime;
715 			}
716 			if (timespec64_compare(atime, &inode->i_atime) > 0) {
717 				dout("atime %lld.%09ld -> %lld.%09ld inc\n",
718 				     inode->i_atime.tv_sec,
719 				     inode->i_atime.tv_nsec,
720 				     atime->tv_sec, atime->tv_nsec);
721 				inode->i_atime = *atime;
722 			}
723 		} else if (issued & CEPH_CAP_FILE_EXCL) {
724 			/* we did a utimes(); ignore mds values */
725 		} else {
726 			warn = 1;
727 		}
728 	} else {
729 		/* we have no write|excl caps; whatever the MDS says is true */
730 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
731 			inode->i_ctime = *ctime;
732 			inode->i_mtime = *mtime;
733 			inode->i_atime = *atime;
734 			ci->i_time_warp_seq = time_warp_seq;
735 		} else {
736 			warn = 1;
737 		}
738 	}
739 	if (warn) /* time_warp_seq shouldn't go backwards */
740 		dout("%p mds time_warp_seq %llu < %u\n",
741 		     inode, time_warp_seq, ci->i_time_warp_seq);
742 }
743 
744 /*
745  * Populate an inode based on info from mds.  May be called on new or
746  * existing inodes.
747  */
748 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
749 		    struct ceph_mds_reply_info_in *iinfo,
750 		    struct ceph_mds_reply_dirfrag *dirinfo,
751 		    struct ceph_mds_session *session, int cap_fmode,
752 		    struct ceph_cap_reservation *caps_reservation)
753 {
754 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
755 	struct ceph_mds_reply_inode *info = iinfo->in;
756 	struct ceph_inode_info *ci = ceph_inode(inode);
757 	int issued, new_issued, info_caps;
758 	struct timespec64 mtime, atime, ctime;
759 	struct ceph_buffer *xattr_blob = NULL;
760 	struct ceph_buffer *old_blob = NULL;
761 	struct ceph_string *pool_ns = NULL;
762 	struct ceph_cap *new_cap = NULL;
763 	int err = 0;
764 	bool wake = false;
765 	bool queue_trunc = false;
766 	bool new_version = false;
767 	bool fill_inline = false;
768 	umode_t mode = le32_to_cpu(info->mode);
769 	dev_t rdev = le32_to_cpu(info->rdev);
770 
771 	lockdep_assert_held(&mdsc->snap_rwsem);
772 
773 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
774 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
775 	     ci->i_version);
776 
777 	/* Once I_NEW is cleared, we can't change type or dev numbers */
778 	if (inode->i_state & I_NEW) {
779 		inode->i_mode = mode;
780 	} else {
781 		if (inode_wrong_type(inode, mode)) {
782 			pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
783 				     ceph_vinop(inode), inode->i_mode, mode);
784 			return -ESTALE;
785 		}
786 
787 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
788 			pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
789 				     ceph_vinop(inode), MAJOR(inode->i_rdev),
790 				     MINOR(inode->i_rdev), MAJOR(rdev),
791 				     MINOR(rdev));
792 			return -ESTALE;
793 		}
794 	}
795 
796 	info_caps = le32_to_cpu(info->cap.caps);
797 
798 	/* prealloc new cap struct */
799 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
800 		new_cap = ceph_get_cap(mdsc, caps_reservation);
801 		if (!new_cap)
802 			return -ENOMEM;
803 	}
804 
805 	/*
806 	 * prealloc xattr data, if it looks like we'll need it.  only
807 	 * if len > 4 (meaning there are actually xattrs; the first 4
808 	 * bytes are the xattr count).
809 	 */
810 	if (iinfo->xattr_len > 4) {
811 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
812 		if (!xattr_blob)
813 			pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
814 			       iinfo->xattr_len);
815 	}
816 
817 	if (iinfo->pool_ns_len > 0)
818 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
819 						     iinfo->pool_ns_len);
820 
821 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
822 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
823 
824 	spin_lock(&ci->i_ceph_lock);
825 
826 	/*
827 	 * provided version will be odd if inode value is projected,
828 	 * even if stable.  skip the update if we have newer stable
829 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
830 	 * we are getting projected (unstable) info (in which case the
831 	 * version is odd, and we want ours>theirs).
832 	 *   us   them
833 	 *   2    2     skip
834 	 *   3    2     skip
835 	 *   3    3     update
836 	 */
837 	if (ci->i_version == 0 ||
838 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
839 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
840 		new_version = true;
841 
842 	/* Update change_attribute */
843 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
844 
845 	__ceph_caps_issued(ci, &issued);
846 	issued |= __ceph_caps_dirty(ci);
847 	new_issued = ~issued & info_caps;
848 
849 	/* directories have fl_stripe_unit set to zero */
850 	if (le32_to_cpu(info->layout.fl_stripe_unit))
851 		inode->i_blkbits =
852 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
853 	else
854 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
855 
856 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
857 
858 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
859 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
860 		inode->i_mode = mode;
861 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
862 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
863 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
864 		     from_kuid(&init_user_ns, inode->i_uid),
865 		     from_kgid(&init_user_ns, inode->i_gid));
866 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
867 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
868 	}
869 
870 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
871 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
872 		set_nlink(inode, le32_to_cpu(info->nlink));
873 
874 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
875 		/* be careful with mtime, atime, size */
876 		ceph_decode_timespec64(&atime, &info->atime);
877 		ceph_decode_timespec64(&mtime, &info->mtime);
878 		ceph_decode_timespec64(&ctime, &info->ctime);
879 		ceph_fill_file_time(inode, issued,
880 				le32_to_cpu(info->time_warp_seq),
881 				&ctime, &mtime, &atime);
882 	}
883 
884 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
885 		ci->i_files = le64_to_cpu(info->files);
886 		ci->i_subdirs = le64_to_cpu(info->subdirs);
887 	}
888 
889 	if (new_version ||
890 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
891 		s64 old_pool = ci->i_layout.pool_id;
892 		struct ceph_string *old_ns;
893 
894 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
895 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
896 					lockdep_is_held(&ci->i_ceph_lock));
897 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
898 
899 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
900 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
901 
902 		pool_ns = old_ns;
903 
904 		queue_trunc = ceph_fill_file_size(inode, issued,
905 					le32_to_cpu(info->truncate_seq),
906 					le64_to_cpu(info->truncate_size),
907 					le64_to_cpu(info->size));
908 		/* only update max_size on auth cap */
909 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
910 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
911 			dout("max_size %lld -> %llu\n", ci->i_max_size,
912 					le64_to_cpu(info->max_size));
913 			ci->i_max_size = le64_to_cpu(info->max_size);
914 		}
915 	}
916 
917 	/* layout and rstat are not tracked by capability, update them if
918 	 * the inode info is from auth mds */
919 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
920 		if (S_ISDIR(inode->i_mode)) {
921 			ci->i_dir_layout = iinfo->dir_layout;
922 			ci->i_rbytes = le64_to_cpu(info->rbytes);
923 			ci->i_rfiles = le64_to_cpu(info->rfiles);
924 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
925 			ci->i_dir_pin = iinfo->dir_pin;
926 			ci->i_rsnaps = iinfo->rsnaps;
927 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
928 		}
929 	}
930 
931 	/* xattrs */
932 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
933 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
934 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
935 		if (ci->i_xattrs.blob)
936 			old_blob = ci->i_xattrs.blob;
937 		ci->i_xattrs.blob = xattr_blob;
938 		if (xattr_blob)
939 			memcpy(ci->i_xattrs.blob->vec.iov_base,
940 			       iinfo->xattr_data, iinfo->xattr_len);
941 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
942 		ceph_forget_all_cached_acls(inode);
943 		ceph_security_invalidate_secctx(inode);
944 		xattr_blob = NULL;
945 	}
946 
947 	/* finally update i_version */
948 	if (le64_to_cpu(info->version) > ci->i_version)
949 		ci->i_version = le64_to_cpu(info->version);
950 
951 	inode->i_mapping->a_ops = &ceph_aops;
952 
953 	switch (inode->i_mode & S_IFMT) {
954 	case S_IFIFO:
955 	case S_IFBLK:
956 	case S_IFCHR:
957 	case S_IFSOCK:
958 		inode->i_blkbits = PAGE_SHIFT;
959 		init_special_inode(inode, inode->i_mode, rdev);
960 		inode->i_op = &ceph_file_iops;
961 		break;
962 	case S_IFREG:
963 		inode->i_op = &ceph_file_iops;
964 		inode->i_fop = &ceph_file_fops;
965 		break;
966 	case S_IFLNK:
967 		inode->i_op = &ceph_symlink_iops;
968 		if (!ci->i_symlink) {
969 			u32 symlen = iinfo->symlink_len;
970 			char *sym;
971 
972 			spin_unlock(&ci->i_ceph_lock);
973 
974 			if (symlen != i_size_read(inode)) {
975 				pr_err("%s %llx.%llx BAD symlink "
976 					"size %lld\n", __func__,
977 					ceph_vinop(inode),
978 					i_size_read(inode));
979 				i_size_write(inode, symlen);
980 				inode->i_blocks = calc_inode_blocks(symlen);
981 			}
982 
983 			err = -ENOMEM;
984 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
985 			if (!sym)
986 				goto out;
987 
988 			spin_lock(&ci->i_ceph_lock);
989 			if (!ci->i_symlink)
990 				ci->i_symlink = sym;
991 			else
992 				kfree(sym); /* lost a race */
993 		}
994 		inode->i_link = ci->i_symlink;
995 		break;
996 	case S_IFDIR:
997 		inode->i_op = &ceph_dir_iops;
998 		inode->i_fop = &ceph_dir_fops;
999 		break;
1000 	default:
1001 		pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
1002 		       ceph_vinop(inode), inode->i_mode);
1003 	}
1004 
1005 	/* were we issued a capability? */
1006 	if (info_caps) {
1007 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1008 			ceph_add_cap(inode, session,
1009 				     le64_to_cpu(info->cap.cap_id),
1010 				     info_caps,
1011 				     le32_to_cpu(info->cap.wanted),
1012 				     le32_to_cpu(info->cap.seq),
1013 				     le32_to_cpu(info->cap.mseq),
1014 				     le64_to_cpu(info->cap.realm),
1015 				     info->cap.flags, &new_cap);
1016 
1017 			/* set dir completion flag? */
1018 			if (S_ISDIR(inode->i_mode) &&
1019 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1020 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1021 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1022 			    !__ceph_dir_is_complete(ci)) {
1023 				dout(" marking %p complete (empty)\n", inode);
1024 				i_size_write(inode, 0);
1025 				__ceph_dir_set_complete(ci,
1026 					atomic64_read(&ci->i_release_count),
1027 					atomic64_read(&ci->i_ordered_count));
1028 			}
1029 
1030 			wake = true;
1031 		} else {
1032 			dout(" %p got snap_caps %s\n", inode,
1033 			     ceph_cap_string(info_caps));
1034 			ci->i_snap_caps |= info_caps;
1035 		}
1036 	}
1037 
1038 	if (iinfo->inline_version > 0 &&
1039 	    iinfo->inline_version >= ci->i_inline_version) {
1040 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1041 		ci->i_inline_version = iinfo->inline_version;
1042 		if (ci->i_inline_version != CEPH_INLINE_NONE &&
1043 		    (locked_page || (info_caps & cache_caps)))
1044 			fill_inline = true;
1045 	}
1046 
1047 	if (cap_fmode >= 0) {
1048 		if (!info_caps)
1049 			pr_warn("mds issued no caps on %llx.%llx\n",
1050 				ceph_vinop(inode));
1051 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1052 	}
1053 
1054 	spin_unlock(&ci->i_ceph_lock);
1055 
1056 	if (fill_inline)
1057 		ceph_fill_inline_data(inode, locked_page,
1058 				      iinfo->inline_data, iinfo->inline_len);
1059 
1060 	if (wake)
1061 		wake_up_all(&ci->i_cap_wq);
1062 
1063 	/* queue truncate if we saw i_size decrease */
1064 	if (queue_trunc)
1065 		ceph_queue_vmtruncate(inode);
1066 
1067 	/* populate frag tree */
1068 	if (S_ISDIR(inode->i_mode))
1069 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1070 
1071 	/* update delegation info? */
1072 	if (dirinfo)
1073 		ceph_fill_dirfrag(inode, dirinfo);
1074 
1075 	err = 0;
1076 out:
1077 	if (new_cap)
1078 		ceph_put_cap(mdsc, new_cap);
1079 	ceph_buffer_put(old_blob);
1080 	ceph_buffer_put(xattr_blob);
1081 	ceph_put_string(pool_ns);
1082 	return err;
1083 }
1084 
1085 /*
1086  * caller should hold session s_mutex and dentry->d_lock.
1087  */
1088 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1089 				  struct ceph_mds_reply_lease *lease,
1090 				  struct ceph_mds_session *session,
1091 				  unsigned long from_time,
1092 				  struct ceph_mds_session **old_lease_session)
1093 {
1094 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1095 	unsigned mask = le16_to_cpu(lease->mask);
1096 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1097 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1098 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1099 
1100 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1101 	     dentry, duration, ttl);
1102 
1103 	/* only track leases on regular dentries */
1104 	if (ceph_snap(dir) != CEPH_NOSNAP)
1105 		return;
1106 
1107 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1108 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1109 	else
1110 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1111 
1112 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1113 	if (!(mask & CEPH_LEASE_VALID)) {
1114 		__ceph_dentry_dir_lease_touch(di);
1115 		return;
1116 	}
1117 
1118 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1119 	    time_before(ttl, di->time))
1120 		return;  /* we already have a newer lease. */
1121 
1122 	if (di->lease_session && di->lease_session != session) {
1123 		*old_lease_session = di->lease_session;
1124 		di->lease_session = NULL;
1125 	}
1126 
1127 	if (!di->lease_session)
1128 		di->lease_session = ceph_get_mds_session(session);
1129 	di->lease_gen = atomic_read(&session->s_cap_gen);
1130 	di->lease_seq = le32_to_cpu(lease->seq);
1131 	di->lease_renew_after = half_ttl;
1132 	di->lease_renew_from = 0;
1133 	di->time = ttl;
1134 
1135 	__ceph_dentry_lease_touch(di);
1136 }
1137 
1138 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1139 					struct ceph_mds_reply_lease *lease,
1140 					struct ceph_mds_session *session,
1141 					unsigned long from_time)
1142 {
1143 	struct ceph_mds_session *old_lease_session = NULL;
1144 	spin_lock(&dentry->d_lock);
1145 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1146 			      &old_lease_session);
1147 	spin_unlock(&dentry->d_lock);
1148 	ceph_put_mds_session(old_lease_session);
1149 }
1150 
1151 /*
1152  * update dentry lease without having parent inode locked
1153  */
1154 static void update_dentry_lease_careful(struct dentry *dentry,
1155 					struct ceph_mds_reply_lease *lease,
1156 					struct ceph_mds_session *session,
1157 					unsigned long from_time,
1158 					char *dname, u32 dname_len,
1159 					struct ceph_vino *pdvino,
1160 					struct ceph_vino *ptvino)
1161 
1162 {
1163 	struct inode *dir;
1164 	struct ceph_mds_session *old_lease_session = NULL;
1165 
1166 	spin_lock(&dentry->d_lock);
1167 	/* make sure dentry's name matches target */
1168 	if (dentry->d_name.len != dname_len ||
1169 	    memcmp(dentry->d_name.name, dname, dname_len))
1170 		goto out_unlock;
1171 
1172 	dir = d_inode(dentry->d_parent);
1173 	/* make sure parent matches dvino */
1174 	if (!ceph_ino_compare(dir, pdvino))
1175 		goto out_unlock;
1176 
1177 	/* make sure dentry's inode matches target. NULL ptvino means that
1178 	 * we expect a negative dentry */
1179 	if (ptvino) {
1180 		if (d_really_is_negative(dentry))
1181 			goto out_unlock;
1182 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1183 			goto out_unlock;
1184 	} else {
1185 		if (d_really_is_positive(dentry))
1186 			goto out_unlock;
1187 	}
1188 
1189 	__update_dentry_lease(dir, dentry, lease, session,
1190 			      from_time, &old_lease_session);
1191 out_unlock:
1192 	spin_unlock(&dentry->d_lock);
1193 	ceph_put_mds_session(old_lease_session);
1194 }
1195 
1196 /*
1197  * splice a dentry to an inode.
1198  * caller must hold directory i_mutex for this to be safe.
1199  */
1200 static int splice_dentry(struct dentry **pdn, struct inode *in)
1201 {
1202 	struct dentry *dn = *pdn;
1203 	struct dentry *realdn;
1204 
1205 	BUG_ON(d_inode(dn));
1206 
1207 	if (S_ISDIR(in->i_mode)) {
1208 		/* If inode is directory, d_splice_alias() below will remove
1209 		 * 'realdn' from its origin parent. We need to ensure that
1210 		 * origin parent's readdir cache will not reference 'realdn'
1211 		 */
1212 		realdn = d_find_any_alias(in);
1213 		if (realdn) {
1214 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1215 			spin_lock(&realdn->d_lock);
1216 
1217 			realdn->d_op->d_prune(realdn);
1218 
1219 			di->time = jiffies;
1220 			di->lease_shared_gen = 0;
1221 			di->offset = 0;
1222 
1223 			spin_unlock(&realdn->d_lock);
1224 			dput(realdn);
1225 		}
1226 	}
1227 
1228 	/* dn must be unhashed */
1229 	if (!d_unhashed(dn))
1230 		d_drop(dn);
1231 	realdn = d_splice_alias(in, dn);
1232 	if (IS_ERR(realdn)) {
1233 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1234 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
1235 		return PTR_ERR(realdn);
1236 	}
1237 
1238 	if (realdn) {
1239 		dout("dn %p (%d) spliced with %p (%d) "
1240 		     "inode %p ino %llx.%llx\n",
1241 		     dn, d_count(dn),
1242 		     realdn, d_count(realdn),
1243 		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
1244 		dput(dn);
1245 		*pdn = realdn;
1246 	} else {
1247 		BUG_ON(!ceph_dentry(dn));
1248 		dout("dn %p attached to %p ino %llx.%llx\n",
1249 		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1250 	}
1251 	return 0;
1252 }
1253 
1254 /*
1255  * Incorporate results into the local cache.  This is either just
1256  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1257  * after a lookup).
1258  *
1259  * A reply may contain
1260  *         a directory inode along with a dentry.
1261  *  and/or a target inode
1262  *
1263  * Called with snap_rwsem (read).
1264  */
1265 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1266 {
1267 	struct ceph_mds_session *session = req->r_session;
1268 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1269 	struct inode *in = NULL;
1270 	struct ceph_vino tvino, dvino;
1271 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1272 	int err = 0;
1273 
1274 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
1275 	     rinfo->head->is_dentry, rinfo->head->is_target);
1276 
1277 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1278 		dout("fill_trace reply is empty!\n");
1279 		if (rinfo->head->result == 0 && req->r_parent)
1280 			ceph_invalidate_dir_request(req);
1281 		return 0;
1282 	}
1283 
1284 	if (rinfo->head->is_dentry) {
1285 		struct inode *dir = req->r_parent;
1286 
1287 		if (dir) {
1288 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1289 					      rinfo->dirfrag, session, -1,
1290 					      &req->r_caps_reservation);
1291 			if (err < 0)
1292 				goto done;
1293 		} else {
1294 			WARN_ON_ONCE(1);
1295 		}
1296 
1297 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1298 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1299 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1300 			struct qstr dname;
1301 			struct dentry *dn, *parent;
1302 
1303 			BUG_ON(!rinfo->head->is_target);
1304 			BUG_ON(req->r_dentry);
1305 
1306 			parent = d_find_any_alias(dir);
1307 			BUG_ON(!parent);
1308 
1309 			dname.name = rinfo->dname;
1310 			dname.len = rinfo->dname_len;
1311 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1312 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1313 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1314 retry_lookup:
1315 			dn = d_lookup(parent, &dname);
1316 			dout("d_lookup on parent=%p name=%.*s got %p\n",
1317 			     parent, dname.len, dname.name, dn);
1318 
1319 			if (!dn) {
1320 				dn = d_alloc(parent, &dname);
1321 				dout("d_alloc %p '%.*s' = %p\n", parent,
1322 				     dname.len, dname.name, dn);
1323 				if (!dn) {
1324 					dput(parent);
1325 					err = -ENOMEM;
1326 					goto done;
1327 				}
1328 				err = 0;
1329 			} else if (d_really_is_positive(dn) &&
1330 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1331 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1332 				dout(" dn %p points to wrong inode %p\n",
1333 				     dn, d_inode(dn));
1334 				ceph_dir_clear_ordered(dir);
1335 				d_delete(dn);
1336 				dput(dn);
1337 				goto retry_lookup;
1338 			}
1339 
1340 			req->r_dentry = dn;
1341 			dput(parent);
1342 		}
1343 	}
1344 
1345 	if (rinfo->head->is_target) {
1346 		/* Should be filled in by handle_reply */
1347 		BUG_ON(!req->r_target_inode);
1348 
1349 		in = req->r_target_inode;
1350 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1351 				NULL, session,
1352 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1353 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1354 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1355 				&req->r_caps_reservation);
1356 		if (err < 0) {
1357 			pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1358 				in, ceph_vinop(in));
1359 			req->r_target_inode = NULL;
1360 			if (in->i_state & I_NEW)
1361 				discard_new_inode(in);
1362 			else
1363 				iput(in);
1364 			goto done;
1365 		}
1366 		if (in->i_state & I_NEW)
1367 			unlock_new_inode(in);
1368 	}
1369 
1370 	/*
1371 	 * ignore null lease/binding on snapdir ENOENT, or else we
1372 	 * will have trouble splicing in the virtual snapdir later
1373 	 */
1374 	if (rinfo->head->is_dentry &&
1375             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1376 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1377 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1378 					       fsc->mount_options->snapdir_name,
1379 					       req->r_dentry->d_name.len))) {
1380 		/*
1381 		 * lookup link rename   : null -> possibly existing inode
1382 		 * mknod symlink mkdir  : null -> new inode
1383 		 * unlink               : linked -> null
1384 		 */
1385 		struct inode *dir = req->r_parent;
1386 		struct dentry *dn = req->r_dentry;
1387 		bool have_dir_cap, have_lease;
1388 
1389 		BUG_ON(!dn);
1390 		BUG_ON(!dir);
1391 		BUG_ON(d_inode(dn->d_parent) != dir);
1392 
1393 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1394 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1395 
1396 		BUG_ON(ceph_ino(dir) != dvino.ino);
1397 		BUG_ON(ceph_snap(dir) != dvino.snap);
1398 
1399 		/* do we have a lease on the whole dir? */
1400 		have_dir_cap =
1401 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1402 			 CEPH_CAP_FILE_SHARED);
1403 
1404 		/* do we have a dn lease? */
1405 		have_lease = have_dir_cap ||
1406 			le32_to_cpu(rinfo->dlease->duration_ms);
1407 		if (!have_lease)
1408 			dout("fill_trace  no dentry lease or dir cap\n");
1409 
1410 		/* rename? */
1411 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1412 			struct inode *olddir = req->r_old_dentry_dir;
1413 			BUG_ON(!olddir);
1414 
1415 			dout(" src %p '%pd' dst %p '%pd'\n",
1416 			     req->r_old_dentry,
1417 			     req->r_old_dentry,
1418 			     dn, dn);
1419 			dout("fill_trace doing d_move %p -> %p\n",
1420 			     req->r_old_dentry, dn);
1421 
1422 			/* d_move screws up sibling dentries' offsets */
1423 			ceph_dir_clear_ordered(dir);
1424 			ceph_dir_clear_ordered(olddir);
1425 
1426 			d_move(req->r_old_dentry, dn);
1427 			dout(" src %p '%pd' dst %p '%pd'\n",
1428 			     req->r_old_dentry,
1429 			     req->r_old_dentry,
1430 			     dn, dn);
1431 
1432 			/* ensure target dentry is invalidated, despite
1433 			   rehashing bug in vfs_rename_dir */
1434 			ceph_invalidate_dentry_lease(dn);
1435 
1436 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1437 			     ceph_dentry(req->r_old_dentry)->offset);
1438 
1439 			/* swap r_dentry and r_old_dentry in case that
1440 			 * splice_dentry() gets called later. This is safe
1441 			 * because no other place will use them */
1442 			req->r_dentry = req->r_old_dentry;
1443 			req->r_old_dentry = dn;
1444 			dn = req->r_dentry;
1445 		}
1446 
1447 		/* null dentry? */
1448 		if (!rinfo->head->is_target) {
1449 			dout("fill_trace null dentry\n");
1450 			if (d_really_is_positive(dn)) {
1451 				dout("d_delete %p\n", dn);
1452 				ceph_dir_clear_ordered(dir);
1453 				d_delete(dn);
1454 			} else if (have_lease) {
1455 				if (d_unhashed(dn))
1456 					d_add(dn, NULL);
1457 				update_dentry_lease(dir, dn,
1458 						    rinfo->dlease, session,
1459 						    req->r_request_started);
1460 			}
1461 			goto done;
1462 		}
1463 
1464 		/* attach proper inode */
1465 		if (d_really_is_negative(dn)) {
1466 			ceph_dir_clear_ordered(dir);
1467 			ihold(in);
1468 			err = splice_dentry(&req->r_dentry, in);
1469 			if (err < 0)
1470 				goto done;
1471 			dn = req->r_dentry;  /* may have spliced */
1472 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1473 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1474 			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1475 			     ceph_vinop(in));
1476 			d_invalidate(dn);
1477 			have_lease = false;
1478 		}
1479 
1480 		if (have_lease) {
1481 			update_dentry_lease(dir, dn,
1482 					    rinfo->dlease, session,
1483 					    req->r_request_started);
1484 		}
1485 		dout(" final dn %p\n", dn);
1486 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1487 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1488 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1489 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1490 		struct inode *dir = req->r_parent;
1491 
1492 		/* fill out a snapdir LOOKUPSNAP dentry */
1493 		BUG_ON(!dir);
1494 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1495 		BUG_ON(!req->r_dentry);
1496 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1497 		ceph_dir_clear_ordered(dir);
1498 		ihold(in);
1499 		err = splice_dentry(&req->r_dentry, in);
1500 		if (err < 0)
1501 			goto done;
1502 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1503 		/* parent inode is not locked, be carefull */
1504 		struct ceph_vino *ptvino = NULL;
1505 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1506 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1507 		if (rinfo->head->is_target) {
1508 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1509 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1510 			ptvino = &tvino;
1511 		}
1512 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1513 					    session, req->r_request_started,
1514 					    rinfo->dname, rinfo->dname_len,
1515 					    &dvino, ptvino);
1516 	}
1517 done:
1518 	dout("fill_trace done err=%d\n", err);
1519 	return err;
1520 }
1521 
1522 /*
1523  * Prepopulate our cache with readdir results, leases, etc.
1524  */
1525 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1526 					   struct ceph_mds_session *session)
1527 {
1528 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1529 	int i, err = 0;
1530 
1531 	for (i = 0; i < rinfo->dir_nr; i++) {
1532 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1533 		struct ceph_vino vino;
1534 		struct inode *in;
1535 		int rc;
1536 
1537 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1538 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1539 
1540 		in = ceph_get_inode(req->r_dentry->d_sb, vino);
1541 		if (IS_ERR(in)) {
1542 			err = PTR_ERR(in);
1543 			dout("new_inode badness got %d\n", err);
1544 			continue;
1545 		}
1546 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1547 				     -1, &req->r_caps_reservation);
1548 		if (rc < 0) {
1549 			pr_err("ceph_fill_inode badness on %p got %d\n",
1550 			       in, rc);
1551 			err = rc;
1552 			if (in->i_state & I_NEW) {
1553 				ihold(in);
1554 				discard_new_inode(in);
1555 			}
1556 		} else if (in->i_state & I_NEW) {
1557 			unlock_new_inode(in);
1558 		}
1559 
1560 		iput(in);
1561 	}
1562 
1563 	return err;
1564 }
1565 
1566 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1567 {
1568 	if (ctl->page) {
1569 		kunmap(ctl->page);
1570 		put_page(ctl->page);
1571 		ctl->page = NULL;
1572 	}
1573 }
1574 
1575 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1576 			      struct ceph_readdir_cache_control *ctl,
1577 			      struct ceph_mds_request *req)
1578 {
1579 	struct ceph_inode_info *ci = ceph_inode(dir);
1580 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1581 	unsigned idx = ctl->index % nsize;
1582 	pgoff_t pgoff = ctl->index / nsize;
1583 
1584 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1585 		ceph_readdir_cache_release(ctl);
1586 		if (idx == 0)
1587 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1588 		else
1589 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1590 		if (!ctl->page) {
1591 			ctl->index = -1;
1592 			return idx == 0 ? -ENOMEM : 0;
1593 		}
1594 		/* reading/filling the cache are serialized by
1595 		 * i_mutex, no need to use page lock */
1596 		unlock_page(ctl->page);
1597 		ctl->dentries = kmap(ctl->page);
1598 		if (idx == 0)
1599 			memset(ctl->dentries, 0, PAGE_SIZE);
1600 	}
1601 
1602 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1603 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1604 		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1605 		ctl->dentries[idx] = dn;
1606 		ctl->index++;
1607 	} else {
1608 		dout("disable readdir cache\n");
1609 		ctl->index = -1;
1610 	}
1611 	return 0;
1612 }
1613 
1614 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1615 			     struct ceph_mds_session *session)
1616 {
1617 	struct dentry *parent = req->r_dentry;
1618 	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1619 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1620 	struct qstr dname;
1621 	struct dentry *dn;
1622 	struct inode *in;
1623 	int err = 0, skipped = 0, ret, i;
1624 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1625 	u32 last_hash = 0;
1626 	u32 fpos_offset;
1627 	struct ceph_readdir_cache_control cache_ctl = {};
1628 
1629 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1630 		return readdir_prepopulate_inodes_only(req, session);
1631 
1632 	if (rinfo->hash_order) {
1633 		if (req->r_path2) {
1634 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1635 						  req->r_path2,
1636 						  strlen(req->r_path2));
1637 			last_hash = ceph_frag_value(last_hash);
1638 		} else if (rinfo->offset_hash) {
1639 			/* mds understands offset_hash */
1640 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1641 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1642 		}
1643 	}
1644 
1645 	if (rinfo->dir_dir &&
1646 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1647 		dout("readdir_prepopulate got new frag %x -> %x\n",
1648 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1649 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1650 		if (!rinfo->hash_order)
1651 			req->r_readdir_offset = 2;
1652 	}
1653 
1654 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1655 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1656 		     rinfo->dir_nr, parent);
1657 	} else {
1658 		dout("readdir_prepopulate %d items under dn %p\n",
1659 		     rinfo->dir_nr, parent);
1660 		if (rinfo->dir_dir)
1661 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1662 
1663 		if (ceph_frag_is_leftmost(frag) &&
1664 		    req->r_readdir_offset == 2 &&
1665 		    !(rinfo->hash_order && last_hash)) {
1666 			/* note dir version at start of readdir so we can
1667 			 * tell if any dentries get dropped */
1668 			req->r_dir_release_cnt =
1669 				atomic64_read(&ci->i_release_count);
1670 			req->r_dir_ordered_cnt =
1671 				atomic64_read(&ci->i_ordered_count);
1672 			req->r_readdir_cache_idx = 0;
1673 		}
1674 	}
1675 
1676 	cache_ctl.index = req->r_readdir_cache_idx;
1677 	fpos_offset = req->r_readdir_offset;
1678 
1679 	/* FIXME: release caps/leases if error occurs */
1680 	for (i = 0; i < rinfo->dir_nr; i++) {
1681 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1682 		struct ceph_vino tvino;
1683 
1684 		dname.name = rde->name;
1685 		dname.len = rde->name_len;
1686 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1687 
1688 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1689 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1690 
1691 		if (rinfo->hash_order) {
1692 			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1693 						 rde->name, rde->name_len);
1694 			hash = ceph_frag_value(hash);
1695 			if (hash != last_hash)
1696 				fpos_offset = 2;
1697 			last_hash = hash;
1698 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1699 		} else {
1700 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1701 		}
1702 
1703 retry_lookup:
1704 		dn = d_lookup(parent, &dname);
1705 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1706 		     parent, dname.len, dname.name, dn);
1707 
1708 		if (!dn) {
1709 			dn = d_alloc(parent, &dname);
1710 			dout("d_alloc %p '%.*s' = %p\n", parent,
1711 			     dname.len, dname.name, dn);
1712 			if (!dn) {
1713 				dout("d_alloc badness\n");
1714 				err = -ENOMEM;
1715 				goto out;
1716 			}
1717 		} else if (d_really_is_positive(dn) &&
1718 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
1719 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
1720 			struct ceph_dentry_info *di = ceph_dentry(dn);
1721 			dout(" dn %p points to wrong inode %p\n",
1722 			     dn, d_inode(dn));
1723 
1724 			spin_lock(&dn->d_lock);
1725 			if (di->offset > 0 &&
1726 			    di->lease_shared_gen ==
1727 			    atomic_read(&ci->i_shared_gen)) {
1728 				__ceph_dir_clear_ordered(ci);
1729 				di->offset = 0;
1730 			}
1731 			spin_unlock(&dn->d_lock);
1732 
1733 			d_delete(dn);
1734 			dput(dn);
1735 			goto retry_lookup;
1736 		}
1737 
1738 		/* inode */
1739 		if (d_really_is_positive(dn)) {
1740 			in = d_inode(dn);
1741 		} else {
1742 			in = ceph_get_inode(parent->d_sb, tvino);
1743 			if (IS_ERR(in)) {
1744 				dout("new_inode badness\n");
1745 				d_drop(dn);
1746 				dput(dn);
1747 				err = PTR_ERR(in);
1748 				goto out;
1749 			}
1750 		}
1751 
1752 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1753 				      -1, &req->r_caps_reservation);
1754 		if (ret < 0) {
1755 			pr_err("ceph_fill_inode badness on %p\n", in);
1756 			if (d_really_is_negative(dn)) {
1757 				if (in->i_state & I_NEW) {
1758 					ihold(in);
1759 					discard_new_inode(in);
1760 				}
1761 				iput(in);
1762 			}
1763 			d_drop(dn);
1764 			err = ret;
1765 			goto next_item;
1766 		}
1767 		if (in->i_state & I_NEW)
1768 			unlock_new_inode(in);
1769 
1770 		if (d_really_is_negative(dn)) {
1771 			if (ceph_security_xattr_deadlock(in)) {
1772 				dout(" skip splicing dn %p to inode %p"
1773 				     " (security xattr deadlock)\n", dn, in);
1774 				iput(in);
1775 				skipped++;
1776 				goto next_item;
1777 			}
1778 
1779 			err = splice_dentry(&dn, in);
1780 			if (err < 0)
1781 				goto next_item;
1782 		}
1783 
1784 		ceph_dentry(dn)->offset = rde->offset;
1785 
1786 		update_dentry_lease(d_inode(parent), dn,
1787 				    rde->lease, req->r_session,
1788 				    req->r_request_started);
1789 
1790 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1791 			ret = fill_readdir_cache(d_inode(parent), dn,
1792 						 &cache_ctl, req);
1793 			if (ret < 0)
1794 				err = ret;
1795 		}
1796 next_item:
1797 		dput(dn);
1798 	}
1799 out:
1800 	if (err == 0 && skipped == 0) {
1801 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
1802 		req->r_readdir_cache_idx = cache_ctl.index;
1803 	}
1804 	ceph_readdir_cache_release(&cache_ctl);
1805 	dout("readdir_prepopulate done\n");
1806 	return err;
1807 }
1808 
1809 bool ceph_inode_set_size(struct inode *inode, loff_t size)
1810 {
1811 	struct ceph_inode_info *ci = ceph_inode(inode);
1812 	bool ret;
1813 
1814 	spin_lock(&ci->i_ceph_lock);
1815 	dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
1816 	i_size_write(inode, size);
1817 	inode->i_blocks = calc_inode_blocks(size);
1818 
1819 	ret = __ceph_should_report_size(ci);
1820 
1821 	spin_unlock(&ci->i_ceph_lock);
1822 	return ret;
1823 }
1824 
1825 void ceph_queue_inode_work(struct inode *inode, int work_bit)
1826 {
1827 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1828 	struct ceph_inode_info *ci = ceph_inode(inode);
1829 	set_bit(work_bit, &ci->i_work_mask);
1830 
1831 	ihold(inode);
1832 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
1833 		dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
1834 	} else {
1835 		dout("queue_inode_work %p already queued, mask=%lx\n",
1836 		     inode, ci->i_work_mask);
1837 		iput(inode);
1838 	}
1839 }
1840 
1841 static void ceph_do_invalidate_pages(struct inode *inode)
1842 {
1843 	struct ceph_inode_info *ci = ceph_inode(inode);
1844 	u32 orig_gen;
1845 	int check = 0;
1846 
1847 	mutex_lock(&ci->i_truncate_mutex);
1848 
1849 	if (ceph_inode_is_shutdown(inode)) {
1850 		pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
1851 				    __func__, ceph_vinop(inode));
1852 		mapping_set_error(inode->i_mapping, -EIO);
1853 		truncate_pagecache(inode, 0);
1854 		mutex_unlock(&ci->i_truncate_mutex);
1855 		goto out;
1856 	}
1857 
1858 	spin_lock(&ci->i_ceph_lock);
1859 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1860 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1861 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1862 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1863 			check = 1;
1864 		spin_unlock(&ci->i_ceph_lock);
1865 		mutex_unlock(&ci->i_truncate_mutex);
1866 		goto out;
1867 	}
1868 	orig_gen = ci->i_rdcache_gen;
1869 	spin_unlock(&ci->i_ceph_lock);
1870 
1871 	ceph_fscache_invalidate(inode);
1872 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1873 		pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
1874 		       ceph_vinop(inode));
1875 	}
1876 
1877 	spin_lock(&ci->i_ceph_lock);
1878 	if (orig_gen == ci->i_rdcache_gen &&
1879 	    orig_gen == ci->i_rdcache_revoking) {
1880 		dout("invalidate_pages %p gen %d successful\n", inode,
1881 		     ci->i_rdcache_gen);
1882 		ci->i_rdcache_revoking--;
1883 		check = 1;
1884 	} else {
1885 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1886 		     inode, orig_gen, ci->i_rdcache_gen,
1887 		     ci->i_rdcache_revoking);
1888 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1889 			check = 1;
1890 	}
1891 	spin_unlock(&ci->i_ceph_lock);
1892 	mutex_unlock(&ci->i_truncate_mutex);
1893 out:
1894 	if (check)
1895 		ceph_check_caps(ci, 0, NULL);
1896 }
1897 
1898 /*
1899  * Make sure any pending truncation is applied before doing anything
1900  * that may depend on it.
1901  */
1902 void __ceph_do_pending_vmtruncate(struct inode *inode)
1903 {
1904 	struct ceph_inode_info *ci = ceph_inode(inode);
1905 	u64 to;
1906 	int wrbuffer_refs, finish = 0;
1907 
1908 	mutex_lock(&ci->i_truncate_mutex);
1909 retry:
1910 	spin_lock(&ci->i_ceph_lock);
1911 	if (ci->i_truncate_pending == 0) {
1912 		dout("__do_pending_vmtruncate %p none pending\n", inode);
1913 		spin_unlock(&ci->i_ceph_lock);
1914 		mutex_unlock(&ci->i_truncate_mutex);
1915 		return;
1916 	}
1917 
1918 	/*
1919 	 * make sure any dirty snapped pages are flushed before we
1920 	 * possibly truncate them.. so write AND block!
1921 	 */
1922 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1923 		spin_unlock(&ci->i_ceph_lock);
1924 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
1925 		     inode);
1926 		filemap_write_and_wait_range(&inode->i_data, 0,
1927 					     inode->i_sb->s_maxbytes);
1928 		goto retry;
1929 	}
1930 
1931 	/* there should be no reader or writer */
1932 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1933 
1934 	to = ci->i_truncate_size;
1935 	wrbuffer_refs = ci->i_wrbuffer_ref;
1936 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1937 	     ci->i_truncate_pending, to);
1938 	spin_unlock(&ci->i_ceph_lock);
1939 
1940 	truncate_pagecache(inode, to);
1941 
1942 	spin_lock(&ci->i_ceph_lock);
1943 	if (to == ci->i_truncate_size) {
1944 		ci->i_truncate_pending = 0;
1945 		finish = 1;
1946 	}
1947 	spin_unlock(&ci->i_ceph_lock);
1948 	if (!finish)
1949 		goto retry;
1950 
1951 	mutex_unlock(&ci->i_truncate_mutex);
1952 
1953 	if (wrbuffer_refs == 0)
1954 		ceph_check_caps(ci, 0, NULL);
1955 
1956 	wake_up_all(&ci->i_cap_wq);
1957 }
1958 
1959 static void ceph_inode_work(struct work_struct *work)
1960 {
1961 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1962 						 i_work);
1963 	struct inode *inode = &ci->vfs_inode;
1964 
1965 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
1966 		dout("writeback %p\n", inode);
1967 		filemap_fdatawrite(&inode->i_data);
1968 	}
1969 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
1970 		ceph_do_invalidate_pages(inode);
1971 
1972 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
1973 		__ceph_do_pending_vmtruncate(inode);
1974 
1975 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
1976 		ceph_check_caps(ci, 0, NULL);
1977 
1978 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
1979 		ceph_flush_snaps(ci, NULL);
1980 
1981 	iput(inode);
1982 }
1983 
1984 /*
1985  * symlinks
1986  */
1987 static const struct inode_operations ceph_symlink_iops = {
1988 	.get_link = simple_get_link,
1989 	.setattr = ceph_setattr,
1990 	.getattr = ceph_getattr,
1991 	.listxattr = ceph_listxattr,
1992 };
1993 
1994 int __ceph_setattr(struct inode *inode, struct iattr *attr)
1995 {
1996 	struct ceph_inode_info *ci = ceph_inode(inode);
1997 	unsigned int ia_valid = attr->ia_valid;
1998 	struct ceph_mds_request *req;
1999 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2000 	struct ceph_cap_flush *prealloc_cf;
2001 	int issued;
2002 	int release = 0, dirtied = 0;
2003 	int mask = 0;
2004 	int err = 0;
2005 	int inode_dirty_flags = 0;
2006 	bool lock_snap_rwsem = false;
2007 
2008 	prealloc_cf = ceph_alloc_cap_flush();
2009 	if (!prealloc_cf)
2010 		return -ENOMEM;
2011 
2012 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2013 				       USE_AUTH_MDS);
2014 	if (IS_ERR(req)) {
2015 		ceph_free_cap_flush(prealloc_cf);
2016 		return PTR_ERR(req);
2017 	}
2018 
2019 	spin_lock(&ci->i_ceph_lock);
2020 	issued = __ceph_caps_issued(ci, NULL);
2021 
2022 	if (!ci->i_head_snapc &&
2023 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2024 		lock_snap_rwsem = true;
2025 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2026 			spin_unlock(&ci->i_ceph_lock);
2027 			down_read(&mdsc->snap_rwsem);
2028 			spin_lock(&ci->i_ceph_lock);
2029 			issued = __ceph_caps_issued(ci, NULL);
2030 		}
2031 	}
2032 
2033 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2034 
2035 	if (ia_valid & ATTR_UID) {
2036 		dout("setattr %p uid %d -> %d\n", inode,
2037 		     from_kuid(&init_user_ns, inode->i_uid),
2038 		     from_kuid(&init_user_ns, attr->ia_uid));
2039 		if (issued & CEPH_CAP_AUTH_EXCL) {
2040 			inode->i_uid = attr->ia_uid;
2041 			dirtied |= CEPH_CAP_AUTH_EXCL;
2042 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2043 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
2044 			req->r_args.setattr.uid = cpu_to_le32(
2045 				from_kuid(&init_user_ns, attr->ia_uid));
2046 			mask |= CEPH_SETATTR_UID;
2047 			release |= CEPH_CAP_AUTH_SHARED;
2048 		}
2049 	}
2050 	if (ia_valid & ATTR_GID) {
2051 		dout("setattr %p gid %d -> %d\n", inode,
2052 		     from_kgid(&init_user_ns, inode->i_gid),
2053 		     from_kgid(&init_user_ns, attr->ia_gid));
2054 		if (issued & CEPH_CAP_AUTH_EXCL) {
2055 			inode->i_gid = attr->ia_gid;
2056 			dirtied |= CEPH_CAP_AUTH_EXCL;
2057 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2058 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
2059 			req->r_args.setattr.gid = cpu_to_le32(
2060 				from_kgid(&init_user_ns, attr->ia_gid));
2061 			mask |= CEPH_SETATTR_GID;
2062 			release |= CEPH_CAP_AUTH_SHARED;
2063 		}
2064 	}
2065 	if (ia_valid & ATTR_MODE) {
2066 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2067 		     attr->ia_mode);
2068 		if (issued & CEPH_CAP_AUTH_EXCL) {
2069 			inode->i_mode = attr->ia_mode;
2070 			dirtied |= CEPH_CAP_AUTH_EXCL;
2071 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2072 			   attr->ia_mode != inode->i_mode) {
2073 			inode->i_mode = attr->ia_mode;
2074 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2075 			mask |= CEPH_SETATTR_MODE;
2076 			release |= CEPH_CAP_AUTH_SHARED;
2077 		}
2078 	}
2079 
2080 	if (ia_valid & ATTR_ATIME) {
2081 		dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2082 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2083 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2084 		if (issued & CEPH_CAP_FILE_EXCL) {
2085 			ci->i_time_warp_seq++;
2086 			inode->i_atime = attr->ia_atime;
2087 			dirtied |= CEPH_CAP_FILE_EXCL;
2088 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2089 			   timespec64_compare(&inode->i_atime,
2090 					    &attr->ia_atime) < 0) {
2091 			inode->i_atime = attr->ia_atime;
2092 			dirtied |= CEPH_CAP_FILE_WR;
2093 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2094 			   !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2095 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2096 					       &attr->ia_atime);
2097 			mask |= CEPH_SETATTR_ATIME;
2098 			release |= CEPH_CAP_FILE_SHARED |
2099 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2100 		}
2101 	}
2102 	if (ia_valid & ATTR_SIZE) {
2103 		loff_t isize = i_size_read(inode);
2104 
2105 		dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
2106 		if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2107 			if (attr->ia_size > isize) {
2108 				i_size_write(inode, attr->ia_size);
2109 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2110 				ci->i_reported_size = attr->ia_size;
2111 				dirtied |= CEPH_CAP_FILE_EXCL;
2112 				ia_valid |= ATTR_MTIME;
2113 			}
2114 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2115 			   attr->ia_size != isize) {
2116 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2117 			req->r_args.setattr.old_size = cpu_to_le64(isize);
2118 			mask |= CEPH_SETATTR_SIZE;
2119 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2120 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2121 		}
2122 	}
2123 	if (ia_valid & ATTR_MTIME) {
2124 		dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2125 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2126 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2127 		if (issued & CEPH_CAP_FILE_EXCL) {
2128 			ci->i_time_warp_seq++;
2129 			inode->i_mtime = attr->ia_mtime;
2130 			dirtied |= CEPH_CAP_FILE_EXCL;
2131 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2132 			   timespec64_compare(&inode->i_mtime,
2133 					    &attr->ia_mtime) < 0) {
2134 			inode->i_mtime = attr->ia_mtime;
2135 			dirtied |= CEPH_CAP_FILE_WR;
2136 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2137 			   !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2138 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2139 					       &attr->ia_mtime);
2140 			mask |= CEPH_SETATTR_MTIME;
2141 			release |= CEPH_CAP_FILE_SHARED |
2142 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2143 		}
2144 	}
2145 
2146 	/* these do nothing */
2147 	if (ia_valid & ATTR_CTIME) {
2148 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2149 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2150 		dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2151 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2152 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2153 		     only ? "ctime only" : "ignored");
2154 		if (only) {
2155 			/*
2156 			 * if kernel wants to dirty ctime but nothing else,
2157 			 * we need to choose a cap to dirty under, or do
2158 			 * a almost-no-op setattr
2159 			 */
2160 			if (issued & CEPH_CAP_AUTH_EXCL)
2161 				dirtied |= CEPH_CAP_AUTH_EXCL;
2162 			else if (issued & CEPH_CAP_FILE_EXCL)
2163 				dirtied |= CEPH_CAP_FILE_EXCL;
2164 			else if (issued & CEPH_CAP_XATTR_EXCL)
2165 				dirtied |= CEPH_CAP_XATTR_EXCL;
2166 			else
2167 				mask |= CEPH_SETATTR_CTIME;
2168 		}
2169 	}
2170 	if (ia_valid & ATTR_FILE)
2171 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2172 
2173 	if (dirtied) {
2174 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2175 							   &prealloc_cf);
2176 		inode->i_ctime = attr->ia_ctime;
2177 	}
2178 
2179 	release &= issued;
2180 	spin_unlock(&ci->i_ceph_lock);
2181 	if (lock_snap_rwsem)
2182 		up_read(&mdsc->snap_rwsem);
2183 
2184 	if (inode_dirty_flags)
2185 		__mark_inode_dirty(inode, inode_dirty_flags);
2186 
2187 
2188 	if (mask) {
2189 		req->r_inode = inode;
2190 		ihold(inode);
2191 		req->r_inode_drop = release;
2192 		req->r_args.setattr.mask = cpu_to_le32(mask);
2193 		req->r_num_caps = 1;
2194 		req->r_stamp = attr->ia_ctime;
2195 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2196 	}
2197 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2198 	     ceph_cap_string(dirtied), mask);
2199 
2200 	ceph_mdsc_put_request(req);
2201 	ceph_free_cap_flush(prealloc_cf);
2202 
2203 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2204 		__ceph_do_pending_vmtruncate(inode);
2205 
2206 	return err;
2207 }
2208 
2209 /*
2210  * setattr
2211  */
2212 int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
2213 		 struct iattr *attr)
2214 {
2215 	struct inode *inode = d_inode(dentry);
2216 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2217 	int err;
2218 
2219 	if (ceph_snap(inode) != CEPH_NOSNAP)
2220 		return -EROFS;
2221 
2222 	if (ceph_inode_is_shutdown(inode))
2223 		return -ESTALE;
2224 
2225 	err = setattr_prepare(&init_user_ns, dentry, attr);
2226 	if (err != 0)
2227 		return err;
2228 
2229 	if ((attr->ia_valid & ATTR_SIZE) &&
2230 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2231 		return -EFBIG;
2232 
2233 	if ((attr->ia_valid & ATTR_SIZE) &&
2234 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2235 		return -EDQUOT;
2236 
2237 	err = __ceph_setattr(inode, attr);
2238 
2239 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2240 		err = posix_acl_chmod(&init_user_ns, inode, attr->ia_mode);
2241 
2242 	return err;
2243 }
2244 
2245 /*
2246  * Verify that we have a lease on the given mask.  If not,
2247  * do a getattr against an mds.
2248  */
2249 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2250 		      int mask, bool force)
2251 {
2252 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2253 	struct ceph_mds_client *mdsc = fsc->mdsc;
2254 	struct ceph_mds_request *req;
2255 	int mode;
2256 	int err;
2257 
2258 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2259 		dout("do_getattr inode %p SNAPDIR\n", inode);
2260 		return 0;
2261 	}
2262 
2263 	dout("do_getattr inode %p mask %s mode 0%o\n",
2264 	     inode, ceph_cap_string(mask), inode->i_mode);
2265 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2266 			return 0;
2267 
2268 	mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
2269 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2270 	if (IS_ERR(req))
2271 		return PTR_ERR(req);
2272 	req->r_inode = inode;
2273 	ihold(inode);
2274 	req->r_num_caps = 1;
2275 	req->r_args.getattr.mask = cpu_to_le32(mask);
2276 	req->r_locked_page = locked_page;
2277 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2278 	if (locked_page && err == 0) {
2279 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2280 		if (inline_version == 0) {
2281 			/* the reply is supposed to contain inline data */
2282 			err = -EINVAL;
2283 		} else if (inline_version == CEPH_INLINE_NONE) {
2284 			err = -ENODATA;
2285 		} else {
2286 			err = req->r_reply_info.targeti.inline_len;
2287 		}
2288 	}
2289 	ceph_mdsc_put_request(req);
2290 	dout("do_getattr result=%d\n", err);
2291 	return err;
2292 }
2293 
2294 
2295 /*
2296  * Check inode permissions.  We verify we have a valid value for
2297  * the AUTH cap, then call the generic handler.
2298  */
2299 int ceph_permission(struct user_namespace *mnt_userns, struct inode *inode,
2300 		    int mask)
2301 {
2302 	int err;
2303 
2304 	if (mask & MAY_NOT_BLOCK)
2305 		return -ECHILD;
2306 
2307 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2308 
2309 	if (!err)
2310 		err = generic_permission(&init_user_ns, inode, mask);
2311 	return err;
2312 }
2313 
2314 /* Craft a mask of needed caps given a set of requested statx attrs. */
2315 static int statx_to_caps(u32 want, umode_t mode)
2316 {
2317 	int mask = 0;
2318 
2319 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME))
2320 		mask |= CEPH_CAP_AUTH_SHARED;
2321 
2322 	if (want & (STATX_NLINK|STATX_CTIME)) {
2323 		/*
2324 		 * The link count for directories depends on inode->i_subdirs,
2325 		 * and that is only updated when Fs caps are held.
2326 		 */
2327 		if (S_ISDIR(mode))
2328 			mask |= CEPH_CAP_FILE_SHARED;
2329 		else
2330 			mask |= CEPH_CAP_LINK_SHARED;
2331 	}
2332 
2333 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|
2334 		    STATX_BLOCKS))
2335 		mask |= CEPH_CAP_FILE_SHARED;
2336 
2337 	if (want & (STATX_CTIME))
2338 		mask |= CEPH_CAP_XATTR_SHARED;
2339 
2340 	return mask;
2341 }
2342 
2343 /*
2344  * Get all the attributes. If we have sufficient caps for the requested attrs,
2345  * then we can avoid talking to the MDS at all.
2346  */
2347 int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
2348 		 struct kstat *stat, u32 request_mask, unsigned int flags)
2349 {
2350 	struct inode *inode = d_inode(path->dentry);
2351 	struct ceph_inode_info *ci = ceph_inode(inode);
2352 	u32 valid_mask = STATX_BASIC_STATS;
2353 	int err = 0;
2354 
2355 	if (ceph_inode_is_shutdown(inode))
2356 		return -ESTALE;
2357 
2358 	/* Skip the getattr altogether if we're asked not to sync */
2359 	if (!(flags & AT_STATX_DONT_SYNC)) {
2360 		err = ceph_do_getattr(inode,
2361 				statx_to_caps(request_mask, inode->i_mode),
2362 				flags & AT_STATX_FORCE_SYNC);
2363 		if (err)
2364 			return err;
2365 	}
2366 
2367 	generic_fillattr(&init_user_ns, inode, stat);
2368 	stat->ino = ceph_present_inode(inode);
2369 
2370 	/*
2371 	 * btime on newly-allocated inodes is 0, so if this is still set to
2372 	 * that, then assume that it's not valid.
2373 	 */
2374 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2375 		stat->btime = ci->i_btime;
2376 		valid_mask |= STATX_BTIME;
2377 	}
2378 
2379 	if (ceph_snap(inode) == CEPH_NOSNAP)
2380 		stat->dev = inode->i_sb->s_dev;
2381 	else
2382 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2383 
2384 	if (S_ISDIR(inode->i_mode)) {
2385 		if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2386 					RBYTES))
2387 			stat->size = ci->i_rbytes;
2388 		else
2389 			stat->size = ci->i_files + ci->i_subdirs;
2390 		stat->blocks = 0;
2391 		stat->blksize = 65536;
2392 		/*
2393 		 * Some applications rely on the number of st_nlink
2394 		 * value on directories to be either 0 (if unlinked)
2395 		 * or 2 + number of subdirectories.
2396 		 */
2397 		if (stat->nlink == 1)
2398 			/* '.' + '..' + subdirs */
2399 			stat->nlink = 1 + 1 + ci->i_subdirs;
2400 	}
2401 
2402 	stat->result_mask = request_mask & valid_mask;
2403 	return err;
2404 }
2405 
2406 void ceph_inode_shutdown(struct inode *inode)
2407 {
2408 	struct ceph_inode_info *ci = ceph_inode(inode);
2409 	struct rb_node *p;
2410 	int iputs = 0;
2411 	bool invalidate = false;
2412 
2413 	spin_lock(&ci->i_ceph_lock);
2414 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
2415 	p = rb_first(&ci->i_caps);
2416 	while (p) {
2417 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
2418 
2419 		p = rb_next(p);
2420 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
2421 	}
2422 	spin_unlock(&ci->i_ceph_lock);
2423 
2424 	if (invalidate)
2425 		ceph_queue_invalidate(inode);
2426 	while (iputs--)
2427 		iput(inode);
2428 }
2429