xref: /openbmc/linux/fs/ceph/inode.c (revision 507160f46c55913955d272ebf559d63809a8e560)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 #include <linux/ceph/decode.h>
22 
23 // Temporary: netfs does disgusting things with inode pointers
24 #pragma GCC diagnostic ignored "-Wattribute-warning"
25 
26 /*
27  * Ceph inode operations
28  *
29  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
30  * setattr, etc.), xattr helpers, and helpers for assimilating
31  * metadata returned by the MDS into our cache.
32  *
33  * Also define helpers for doing asynchronous writeback, invalidation,
34  * and truncation for the benefit of those who can't afford to block
35  * (typically because they are in the message handler path).
36  */
37 
38 static const struct inode_operations ceph_symlink_iops;
39 
40 static void ceph_inode_work(struct work_struct *work);
41 
42 /*
43  * find or create an inode, given the ceph ino number
44  */
45 static int ceph_set_ino_cb(struct inode *inode, void *data)
46 {
47 	struct ceph_inode_info *ci = ceph_inode(inode);
48 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
49 
50 	ci->i_vino = *(struct ceph_vino *)data;
51 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
52 	inode_set_iversion_raw(inode, 0);
53 	percpu_counter_inc(&mdsc->metric.total_inodes);
54 
55 	return 0;
56 }
57 
58 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
59 {
60 	struct inode *inode;
61 
62 	if (ceph_vino_is_reserved(vino))
63 		return ERR_PTR(-EREMOTEIO);
64 
65 	inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
66 			     ceph_set_ino_cb, &vino);
67 	if (!inode)
68 		return ERR_PTR(-ENOMEM);
69 
70 	dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
71 	     ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
72 	return inode;
73 }
74 
75 /*
76  * get/constuct snapdir inode for a given directory
77  */
78 struct inode *ceph_get_snapdir(struct inode *parent)
79 {
80 	struct ceph_vino vino = {
81 		.ino = ceph_ino(parent),
82 		.snap = CEPH_SNAPDIR,
83 	};
84 	struct inode *inode = ceph_get_inode(parent->i_sb, vino);
85 	struct ceph_inode_info *ci = ceph_inode(inode);
86 
87 	if (IS_ERR(inode))
88 		return inode;
89 
90 	if (!S_ISDIR(parent->i_mode)) {
91 		pr_warn_once("bad snapdir parent type (mode=0%o)\n",
92 			     parent->i_mode);
93 		goto err;
94 	}
95 
96 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
97 		pr_warn_once("bad snapdir inode type (mode=0%o)\n",
98 			     inode->i_mode);
99 		goto err;
100 	}
101 
102 	inode->i_mode = parent->i_mode;
103 	inode->i_uid = parent->i_uid;
104 	inode->i_gid = parent->i_gid;
105 	inode->i_mtime = parent->i_mtime;
106 	inode->i_ctime = parent->i_ctime;
107 	inode->i_atime = parent->i_atime;
108 	ci->i_rbytes = 0;
109 	ci->i_btime = ceph_inode(parent)->i_btime;
110 
111 	if (inode->i_state & I_NEW) {
112 		inode->i_op = &ceph_snapdir_iops;
113 		inode->i_fop = &ceph_snapdir_fops;
114 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
115 		unlock_new_inode(inode);
116 	}
117 
118 	return inode;
119 err:
120 	if ((inode->i_state & I_NEW))
121 		discard_new_inode(inode);
122 	else
123 		iput(inode);
124 	return ERR_PTR(-ENOTDIR);
125 }
126 
127 const struct inode_operations ceph_file_iops = {
128 	.permission = ceph_permission,
129 	.setattr = ceph_setattr,
130 	.getattr = ceph_getattr,
131 	.listxattr = ceph_listxattr,
132 	.get_acl = ceph_get_acl,
133 	.set_acl = ceph_set_acl,
134 };
135 
136 
137 /*
138  * We use a 'frag tree' to keep track of the MDS's directory fragments
139  * for a given inode (usually there is just a single fragment).  We
140  * need to know when a child frag is delegated to a new MDS, or when
141  * it is flagged as replicated, so we can direct our requests
142  * accordingly.
143  */
144 
145 /*
146  * find/create a frag in the tree
147  */
148 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
149 						    u32 f)
150 {
151 	struct rb_node **p;
152 	struct rb_node *parent = NULL;
153 	struct ceph_inode_frag *frag;
154 	int c;
155 
156 	p = &ci->i_fragtree.rb_node;
157 	while (*p) {
158 		parent = *p;
159 		frag = rb_entry(parent, struct ceph_inode_frag, node);
160 		c = ceph_frag_compare(f, frag->frag);
161 		if (c < 0)
162 			p = &(*p)->rb_left;
163 		else if (c > 0)
164 			p = &(*p)->rb_right;
165 		else
166 			return frag;
167 	}
168 
169 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
170 	if (!frag)
171 		return ERR_PTR(-ENOMEM);
172 
173 	frag->frag = f;
174 	frag->split_by = 0;
175 	frag->mds = -1;
176 	frag->ndist = 0;
177 
178 	rb_link_node(&frag->node, parent, p);
179 	rb_insert_color(&frag->node, &ci->i_fragtree);
180 
181 	dout("get_or_create_frag added %llx.%llx frag %x\n",
182 	     ceph_vinop(&ci->vfs_inode), f);
183 	return frag;
184 }
185 
186 /*
187  * find a specific frag @f
188  */
189 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
190 {
191 	struct rb_node *n = ci->i_fragtree.rb_node;
192 
193 	while (n) {
194 		struct ceph_inode_frag *frag =
195 			rb_entry(n, struct ceph_inode_frag, node);
196 		int c = ceph_frag_compare(f, frag->frag);
197 		if (c < 0)
198 			n = n->rb_left;
199 		else if (c > 0)
200 			n = n->rb_right;
201 		else
202 			return frag;
203 	}
204 	return NULL;
205 }
206 
207 /*
208  * Choose frag containing the given value @v.  If @pfrag is
209  * specified, copy the frag delegation info to the caller if
210  * it is present.
211  */
212 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
213 			      struct ceph_inode_frag *pfrag, int *found)
214 {
215 	u32 t = ceph_frag_make(0, 0);
216 	struct ceph_inode_frag *frag;
217 	unsigned nway, i;
218 	u32 n;
219 
220 	if (found)
221 		*found = 0;
222 
223 	while (1) {
224 		WARN_ON(!ceph_frag_contains_value(t, v));
225 		frag = __ceph_find_frag(ci, t);
226 		if (!frag)
227 			break; /* t is a leaf */
228 		if (frag->split_by == 0) {
229 			if (pfrag)
230 				memcpy(pfrag, frag, sizeof(*pfrag));
231 			if (found)
232 				*found = 1;
233 			break;
234 		}
235 
236 		/* choose child */
237 		nway = 1 << frag->split_by;
238 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
239 		     frag->split_by, nway);
240 		for (i = 0; i < nway; i++) {
241 			n = ceph_frag_make_child(t, frag->split_by, i);
242 			if (ceph_frag_contains_value(n, v)) {
243 				t = n;
244 				break;
245 			}
246 		}
247 		BUG_ON(i == nway);
248 	}
249 	dout("choose_frag(%x) = %x\n", v, t);
250 
251 	return t;
252 }
253 
254 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
255 		     struct ceph_inode_frag *pfrag, int *found)
256 {
257 	u32 ret;
258 	mutex_lock(&ci->i_fragtree_mutex);
259 	ret = __ceph_choose_frag(ci, v, pfrag, found);
260 	mutex_unlock(&ci->i_fragtree_mutex);
261 	return ret;
262 }
263 
264 /*
265  * Process dirfrag (delegation) info from the mds.  Include leaf
266  * fragment in tree ONLY if ndist > 0.  Otherwise, only
267  * branches/splits are included in i_fragtree)
268  */
269 static int ceph_fill_dirfrag(struct inode *inode,
270 			     struct ceph_mds_reply_dirfrag *dirinfo)
271 {
272 	struct ceph_inode_info *ci = ceph_inode(inode);
273 	struct ceph_inode_frag *frag;
274 	u32 id = le32_to_cpu(dirinfo->frag);
275 	int mds = le32_to_cpu(dirinfo->auth);
276 	int ndist = le32_to_cpu(dirinfo->ndist);
277 	int diri_auth = -1;
278 	int i;
279 	int err = 0;
280 
281 	spin_lock(&ci->i_ceph_lock);
282 	if (ci->i_auth_cap)
283 		diri_auth = ci->i_auth_cap->mds;
284 	spin_unlock(&ci->i_ceph_lock);
285 
286 	if (mds == -1) /* CDIR_AUTH_PARENT */
287 		mds = diri_auth;
288 
289 	mutex_lock(&ci->i_fragtree_mutex);
290 	if (ndist == 0 && mds == diri_auth) {
291 		/* no delegation info needed. */
292 		frag = __ceph_find_frag(ci, id);
293 		if (!frag)
294 			goto out;
295 		if (frag->split_by == 0) {
296 			/* tree leaf, remove */
297 			dout("fill_dirfrag removed %llx.%llx frag %x"
298 			     " (no ref)\n", ceph_vinop(inode), id);
299 			rb_erase(&frag->node, &ci->i_fragtree);
300 			kfree(frag);
301 		} else {
302 			/* tree branch, keep and clear */
303 			dout("fill_dirfrag cleared %llx.%llx frag %x"
304 			     " referral\n", ceph_vinop(inode), id);
305 			frag->mds = -1;
306 			frag->ndist = 0;
307 		}
308 		goto out;
309 	}
310 
311 
312 	/* find/add this frag to store mds delegation info */
313 	frag = __get_or_create_frag(ci, id);
314 	if (IS_ERR(frag)) {
315 		/* this is not the end of the world; we can continue
316 		   with bad/inaccurate delegation info */
317 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
318 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
319 		err = -ENOMEM;
320 		goto out;
321 	}
322 
323 	frag->mds = mds;
324 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
325 	for (i = 0; i < frag->ndist; i++)
326 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
327 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
328 	     ceph_vinop(inode), frag->frag, frag->ndist);
329 
330 out:
331 	mutex_unlock(&ci->i_fragtree_mutex);
332 	return err;
333 }
334 
335 static int frag_tree_split_cmp(const void *l, const void *r)
336 {
337 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
338 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
339 	return ceph_frag_compare(le32_to_cpu(ls->frag),
340 				 le32_to_cpu(rs->frag));
341 }
342 
343 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
344 {
345 	if (!frag)
346 		return f == ceph_frag_make(0, 0);
347 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
348 		return false;
349 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
350 }
351 
352 static int ceph_fill_fragtree(struct inode *inode,
353 			      struct ceph_frag_tree_head *fragtree,
354 			      struct ceph_mds_reply_dirfrag *dirinfo)
355 {
356 	struct ceph_inode_info *ci = ceph_inode(inode);
357 	struct ceph_inode_frag *frag, *prev_frag = NULL;
358 	struct rb_node *rb_node;
359 	unsigned i, split_by, nsplits;
360 	u32 id;
361 	bool update = false;
362 
363 	mutex_lock(&ci->i_fragtree_mutex);
364 	nsplits = le32_to_cpu(fragtree->nsplits);
365 	if (nsplits != ci->i_fragtree_nsplits) {
366 		update = true;
367 	} else if (nsplits) {
368 		i = prandom_u32() % nsplits;
369 		id = le32_to_cpu(fragtree->splits[i].frag);
370 		if (!__ceph_find_frag(ci, id))
371 			update = true;
372 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
373 		rb_node = rb_first(&ci->i_fragtree);
374 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
375 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
376 			update = true;
377 	}
378 	if (!update && dirinfo) {
379 		id = le32_to_cpu(dirinfo->frag);
380 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
381 			update = true;
382 	}
383 	if (!update)
384 		goto out_unlock;
385 
386 	if (nsplits > 1) {
387 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
388 		     frag_tree_split_cmp, NULL);
389 	}
390 
391 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
392 	rb_node = rb_first(&ci->i_fragtree);
393 	for (i = 0; i < nsplits; i++) {
394 		id = le32_to_cpu(fragtree->splits[i].frag);
395 		split_by = le32_to_cpu(fragtree->splits[i].by);
396 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
397 			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
398 			       "frag %x split by %d\n", ceph_vinop(inode),
399 			       i, nsplits, id, split_by);
400 			continue;
401 		}
402 		frag = NULL;
403 		while (rb_node) {
404 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
405 			if (ceph_frag_compare(frag->frag, id) >= 0) {
406 				if (frag->frag != id)
407 					frag = NULL;
408 				else
409 					rb_node = rb_next(rb_node);
410 				break;
411 			}
412 			rb_node = rb_next(rb_node);
413 			/* delete stale split/leaf node */
414 			if (frag->split_by > 0 ||
415 			    !is_frag_child(frag->frag, prev_frag)) {
416 				rb_erase(&frag->node, &ci->i_fragtree);
417 				if (frag->split_by > 0)
418 					ci->i_fragtree_nsplits--;
419 				kfree(frag);
420 			}
421 			frag = NULL;
422 		}
423 		if (!frag) {
424 			frag = __get_or_create_frag(ci, id);
425 			if (IS_ERR(frag))
426 				continue;
427 		}
428 		if (frag->split_by == 0)
429 			ci->i_fragtree_nsplits++;
430 		frag->split_by = split_by;
431 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
432 		prev_frag = frag;
433 	}
434 	while (rb_node) {
435 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
436 		rb_node = rb_next(rb_node);
437 		/* delete stale split/leaf node */
438 		if (frag->split_by > 0 ||
439 		    !is_frag_child(frag->frag, prev_frag)) {
440 			rb_erase(&frag->node, &ci->i_fragtree);
441 			if (frag->split_by > 0)
442 				ci->i_fragtree_nsplits--;
443 			kfree(frag);
444 		}
445 	}
446 out_unlock:
447 	mutex_unlock(&ci->i_fragtree_mutex);
448 	return 0;
449 }
450 
451 /*
452  * initialize a newly allocated inode.
453  */
454 struct inode *ceph_alloc_inode(struct super_block *sb)
455 {
456 	struct ceph_inode_info *ci;
457 	int i;
458 
459 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
460 	if (!ci)
461 		return NULL;
462 
463 	dout("alloc_inode %p\n", &ci->vfs_inode);
464 
465 	/* Set parameters for the netfs library */
466 	netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops);
467 
468 	spin_lock_init(&ci->i_ceph_lock);
469 
470 	ci->i_version = 0;
471 	ci->i_inline_version = 0;
472 	ci->i_time_warp_seq = 0;
473 	ci->i_ceph_flags = 0;
474 	atomic64_set(&ci->i_ordered_count, 1);
475 	atomic64_set(&ci->i_release_count, 1);
476 	atomic64_set(&ci->i_complete_seq[0], 0);
477 	atomic64_set(&ci->i_complete_seq[1], 0);
478 	ci->i_symlink = NULL;
479 
480 	ci->i_max_bytes = 0;
481 	ci->i_max_files = 0;
482 
483 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
484 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
485 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
486 
487 	ci->i_fragtree = RB_ROOT;
488 	mutex_init(&ci->i_fragtree_mutex);
489 
490 	ci->i_xattrs.blob = NULL;
491 	ci->i_xattrs.prealloc_blob = NULL;
492 	ci->i_xattrs.dirty = false;
493 	ci->i_xattrs.index = RB_ROOT;
494 	ci->i_xattrs.count = 0;
495 	ci->i_xattrs.names_size = 0;
496 	ci->i_xattrs.vals_size = 0;
497 	ci->i_xattrs.version = 0;
498 	ci->i_xattrs.index_version = 0;
499 
500 	ci->i_caps = RB_ROOT;
501 	ci->i_auth_cap = NULL;
502 	ci->i_dirty_caps = 0;
503 	ci->i_flushing_caps = 0;
504 	INIT_LIST_HEAD(&ci->i_dirty_item);
505 	INIT_LIST_HEAD(&ci->i_flushing_item);
506 	ci->i_prealloc_cap_flush = NULL;
507 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
508 	init_waitqueue_head(&ci->i_cap_wq);
509 	ci->i_hold_caps_max = 0;
510 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
511 	INIT_LIST_HEAD(&ci->i_cap_snaps);
512 	ci->i_head_snapc = NULL;
513 	ci->i_snap_caps = 0;
514 
515 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
516 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
517 		ci->i_nr_by_mode[i] = 0;
518 
519 	mutex_init(&ci->i_truncate_mutex);
520 	ci->i_truncate_seq = 0;
521 	ci->i_truncate_size = 0;
522 	ci->i_truncate_pending = 0;
523 
524 	ci->i_max_size = 0;
525 	ci->i_reported_size = 0;
526 	ci->i_wanted_max_size = 0;
527 	ci->i_requested_max_size = 0;
528 
529 	ci->i_pin_ref = 0;
530 	ci->i_rd_ref = 0;
531 	ci->i_rdcache_ref = 0;
532 	ci->i_wr_ref = 0;
533 	ci->i_wb_ref = 0;
534 	ci->i_fx_ref = 0;
535 	ci->i_wrbuffer_ref = 0;
536 	ci->i_wrbuffer_ref_head = 0;
537 	atomic_set(&ci->i_filelock_ref, 0);
538 	atomic_set(&ci->i_shared_gen, 1);
539 	ci->i_rdcache_gen = 0;
540 	ci->i_rdcache_revoking = 0;
541 
542 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
543 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
544 	spin_lock_init(&ci->i_unsafe_lock);
545 
546 	ci->i_snap_realm = NULL;
547 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
548 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
549 
550 	INIT_WORK(&ci->i_work, ceph_inode_work);
551 	ci->i_work_mask = 0;
552 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
553 	return &ci->vfs_inode;
554 }
555 
556 void ceph_free_inode(struct inode *inode)
557 {
558 	struct ceph_inode_info *ci = ceph_inode(inode);
559 
560 	kfree(ci->i_symlink);
561 	kmem_cache_free(ceph_inode_cachep, ci);
562 }
563 
564 void ceph_evict_inode(struct inode *inode)
565 {
566 	struct ceph_inode_info *ci = ceph_inode(inode);
567 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
568 	struct ceph_inode_frag *frag;
569 	struct rb_node *n;
570 
571 	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
572 
573 	percpu_counter_dec(&mdsc->metric.total_inodes);
574 
575 	truncate_inode_pages_final(&inode->i_data);
576 	if (inode->i_state & I_PINNING_FSCACHE_WB)
577 		ceph_fscache_unuse_cookie(inode, true);
578 	clear_inode(inode);
579 
580 	ceph_fscache_unregister_inode_cookie(ci);
581 
582 	__ceph_remove_caps(ci);
583 
584 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
585 		ceph_adjust_quota_realms_count(inode, false);
586 
587 	/*
588 	 * we may still have a snap_realm reference if there are stray
589 	 * caps in i_snap_caps.
590 	 */
591 	if (ci->i_snap_realm) {
592 		if (ceph_snap(inode) == CEPH_NOSNAP) {
593 			dout(" dropping residual ref to snap realm %p\n",
594 			     ci->i_snap_realm);
595 			ceph_change_snap_realm(inode, NULL);
596 		} else {
597 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
598 			ci->i_snap_realm = NULL;
599 		}
600 	}
601 
602 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
603 		frag = rb_entry(n, struct ceph_inode_frag, node);
604 		rb_erase(n, &ci->i_fragtree);
605 		kfree(frag);
606 	}
607 	ci->i_fragtree_nsplits = 0;
608 
609 	__ceph_destroy_xattrs(ci);
610 	if (ci->i_xattrs.blob)
611 		ceph_buffer_put(ci->i_xattrs.blob);
612 	if (ci->i_xattrs.prealloc_blob)
613 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
614 
615 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
616 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
617 }
618 
619 static inline blkcnt_t calc_inode_blocks(u64 size)
620 {
621 	return (size + (1<<9) - 1) >> 9;
622 }
623 
624 /*
625  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
626  * careful because either the client or MDS may have more up to date
627  * info, depending on which capabilities are held, and whether
628  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
629  * and size are monotonically increasing, except when utimes() or
630  * truncate() increments the corresponding _seq values.)
631  */
632 int ceph_fill_file_size(struct inode *inode, int issued,
633 			u32 truncate_seq, u64 truncate_size, u64 size)
634 {
635 	struct ceph_inode_info *ci = ceph_inode(inode);
636 	int queue_trunc = 0;
637 	loff_t isize = i_size_read(inode);
638 
639 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
640 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
641 		dout("size %lld -> %llu\n", isize, size);
642 		if (size > 0 && S_ISDIR(inode->i_mode)) {
643 			pr_err("fill_file_size non-zero size for directory\n");
644 			size = 0;
645 		}
646 		i_size_write(inode, size);
647 		inode->i_blocks = calc_inode_blocks(size);
648 		/*
649 		 * If we're expanding, then we should be able to just update
650 		 * the existing cookie.
651 		 */
652 		if (size > isize)
653 			ceph_fscache_update(inode);
654 		ci->i_reported_size = size;
655 		if (truncate_seq != ci->i_truncate_seq) {
656 			dout("truncate_seq %u -> %u\n",
657 			     ci->i_truncate_seq, truncate_seq);
658 			ci->i_truncate_seq = truncate_seq;
659 
660 			/* the MDS should have revoked these caps */
661 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
662 					       CEPH_CAP_FILE_RD |
663 					       CEPH_CAP_FILE_WR |
664 					       CEPH_CAP_FILE_LAZYIO));
665 			/*
666 			 * If we hold relevant caps, or in the case where we're
667 			 * not the only client referencing this file and we
668 			 * don't hold those caps, then we need to check whether
669 			 * the file is either opened or mmaped
670 			 */
671 			if ((issued & (CEPH_CAP_FILE_CACHE|
672 				       CEPH_CAP_FILE_BUFFER)) ||
673 			    mapping_mapped(inode->i_mapping) ||
674 			    __ceph_is_file_opened(ci)) {
675 				ci->i_truncate_pending++;
676 				queue_trunc = 1;
677 			}
678 		}
679 	}
680 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
681 	    ci->i_truncate_size != truncate_size) {
682 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
683 		     truncate_size);
684 		ci->i_truncate_size = truncate_size;
685 	}
686 	return queue_trunc;
687 }
688 
689 void ceph_fill_file_time(struct inode *inode, int issued,
690 			 u64 time_warp_seq, struct timespec64 *ctime,
691 			 struct timespec64 *mtime, struct timespec64 *atime)
692 {
693 	struct ceph_inode_info *ci = ceph_inode(inode);
694 	int warn = 0;
695 
696 	if (issued & (CEPH_CAP_FILE_EXCL|
697 		      CEPH_CAP_FILE_WR|
698 		      CEPH_CAP_FILE_BUFFER|
699 		      CEPH_CAP_AUTH_EXCL|
700 		      CEPH_CAP_XATTR_EXCL)) {
701 		if (ci->i_version == 0 ||
702 		    timespec64_compare(ctime, &inode->i_ctime) > 0) {
703 			dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
704 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
705 			     ctime->tv_sec, ctime->tv_nsec);
706 			inode->i_ctime = *ctime;
707 		}
708 		if (ci->i_version == 0 ||
709 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
710 			/* the MDS did a utimes() */
711 			dout("mtime %lld.%09ld -> %lld.%09ld "
712 			     "tw %d -> %d\n",
713 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
714 			     mtime->tv_sec, mtime->tv_nsec,
715 			     ci->i_time_warp_seq, (int)time_warp_seq);
716 
717 			inode->i_mtime = *mtime;
718 			inode->i_atime = *atime;
719 			ci->i_time_warp_seq = time_warp_seq;
720 		} else if (time_warp_seq == ci->i_time_warp_seq) {
721 			/* nobody did utimes(); take the max */
722 			if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
723 				dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
724 				     inode->i_mtime.tv_sec,
725 				     inode->i_mtime.tv_nsec,
726 				     mtime->tv_sec, mtime->tv_nsec);
727 				inode->i_mtime = *mtime;
728 			}
729 			if (timespec64_compare(atime, &inode->i_atime) > 0) {
730 				dout("atime %lld.%09ld -> %lld.%09ld inc\n",
731 				     inode->i_atime.tv_sec,
732 				     inode->i_atime.tv_nsec,
733 				     atime->tv_sec, atime->tv_nsec);
734 				inode->i_atime = *atime;
735 			}
736 		} else if (issued & CEPH_CAP_FILE_EXCL) {
737 			/* we did a utimes(); ignore mds values */
738 		} else {
739 			warn = 1;
740 		}
741 	} else {
742 		/* we have no write|excl caps; whatever the MDS says is true */
743 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
744 			inode->i_ctime = *ctime;
745 			inode->i_mtime = *mtime;
746 			inode->i_atime = *atime;
747 			ci->i_time_warp_seq = time_warp_seq;
748 		} else {
749 			warn = 1;
750 		}
751 	}
752 	if (warn) /* time_warp_seq shouldn't go backwards */
753 		dout("%p mds time_warp_seq %llu < %u\n",
754 		     inode, time_warp_seq, ci->i_time_warp_seq);
755 }
756 
757 /*
758  * Populate an inode based on info from mds.  May be called on new or
759  * existing inodes.
760  */
761 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
762 		    struct ceph_mds_reply_info_in *iinfo,
763 		    struct ceph_mds_reply_dirfrag *dirinfo,
764 		    struct ceph_mds_session *session, int cap_fmode,
765 		    struct ceph_cap_reservation *caps_reservation)
766 {
767 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
768 	struct ceph_mds_reply_inode *info = iinfo->in;
769 	struct ceph_inode_info *ci = ceph_inode(inode);
770 	int issued, new_issued, info_caps;
771 	struct timespec64 mtime, atime, ctime;
772 	struct ceph_buffer *xattr_blob = NULL;
773 	struct ceph_buffer *old_blob = NULL;
774 	struct ceph_string *pool_ns = NULL;
775 	struct ceph_cap *new_cap = NULL;
776 	int err = 0;
777 	bool wake = false;
778 	bool queue_trunc = false;
779 	bool new_version = false;
780 	bool fill_inline = false;
781 	umode_t mode = le32_to_cpu(info->mode);
782 	dev_t rdev = le32_to_cpu(info->rdev);
783 
784 	lockdep_assert_held(&mdsc->snap_rwsem);
785 
786 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
787 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
788 	     ci->i_version);
789 
790 	/* Once I_NEW is cleared, we can't change type or dev numbers */
791 	if (inode->i_state & I_NEW) {
792 		inode->i_mode = mode;
793 	} else {
794 		if (inode_wrong_type(inode, mode)) {
795 			pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
796 				     ceph_vinop(inode), inode->i_mode, mode);
797 			return -ESTALE;
798 		}
799 
800 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
801 			pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
802 				     ceph_vinop(inode), MAJOR(inode->i_rdev),
803 				     MINOR(inode->i_rdev), MAJOR(rdev),
804 				     MINOR(rdev));
805 			return -ESTALE;
806 		}
807 	}
808 
809 	info_caps = le32_to_cpu(info->cap.caps);
810 
811 	/* prealloc new cap struct */
812 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
813 		new_cap = ceph_get_cap(mdsc, caps_reservation);
814 		if (!new_cap)
815 			return -ENOMEM;
816 	}
817 
818 	/*
819 	 * prealloc xattr data, if it looks like we'll need it.  only
820 	 * if len > 4 (meaning there are actually xattrs; the first 4
821 	 * bytes are the xattr count).
822 	 */
823 	if (iinfo->xattr_len > 4) {
824 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
825 		if (!xattr_blob)
826 			pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
827 			       iinfo->xattr_len);
828 	}
829 
830 	if (iinfo->pool_ns_len > 0)
831 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
832 						     iinfo->pool_ns_len);
833 
834 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
835 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
836 
837 	spin_lock(&ci->i_ceph_lock);
838 
839 	/*
840 	 * provided version will be odd if inode value is projected,
841 	 * even if stable.  skip the update if we have newer stable
842 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
843 	 * we are getting projected (unstable) info (in which case the
844 	 * version is odd, and we want ours>theirs).
845 	 *   us   them
846 	 *   2    2     skip
847 	 *   3    2     skip
848 	 *   3    3     update
849 	 */
850 	if (ci->i_version == 0 ||
851 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
852 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
853 		new_version = true;
854 
855 	/* Update change_attribute */
856 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
857 
858 	__ceph_caps_issued(ci, &issued);
859 	issued |= __ceph_caps_dirty(ci);
860 	new_issued = ~issued & info_caps;
861 
862 	/* directories have fl_stripe_unit set to zero */
863 	if (le32_to_cpu(info->layout.fl_stripe_unit))
864 		inode->i_blkbits =
865 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
866 	else
867 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
868 
869 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
870 
871 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
872 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
873 		inode->i_mode = mode;
874 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
875 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
876 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
877 		     from_kuid(&init_user_ns, inode->i_uid),
878 		     from_kgid(&init_user_ns, inode->i_gid));
879 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
880 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
881 	}
882 
883 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
884 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
885 		set_nlink(inode, le32_to_cpu(info->nlink));
886 
887 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
888 		/* be careful with mtime, atime, size */
889 		ceph_decode_timespec64(&atime, &info->atime);
890 		ceph_decode_timespec64(&mtime, &info->mtime);
891 		ceph_decode_timespec64(&ctime, &info->ctime);
892 		ceph_fill_file_time(inode, issued,
893 				le32_to_cpu(info->time_warp_seq),
894 				&ctime, &mtime, &atime);
895 	}
896 
897 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
898 		ci->i_files = le64_to_cpu(info->files);
899 		ci->i_subdirs = le64_to_cpu(info->subdirs);
900 	}
901 
902 	if (new_version ||
903 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
904 		s64 old_pool = ci->i_layout.pool_id;
905 		struct ceph_string *old_ns;
906 
907 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
908 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
909 					lockdep_is_held(&ci->i_ceph_lock));
910 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
911 
912 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
913 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
914 
915 		pool_ns = old_ns;
916 
917 		queue_trunc = ceph_fill_file_size(inode, issued,
918 					le32_to_cpu(info->truncate_seq),
919 					le64_to_cpu(info->truncate_size),
920 					le64_to_cpu(info->size));
921 		/* only update max_size on auth cap */
922 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
923 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
924 			dout("max_size %lld -> %llu\n", ci->i_max_size,
925 					le64_to_cpu(info->max_size));
926 			ci->i_max_size = le64_to_cpu(info->max_size);
927 		}
928 	}
929 
930 	/* layout and rstat are not tracked by capability, update them if
931 	 * the inode info is from auth mds */
932 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
933 		if (S_ISDIR(inode->i_mode)) {
934 			ci->i_dir_layout = iinfo->dir_layout;
935 			ci->i_rbytes = le64_to_cpu(info->rbytes);
936 			ci->i_rfiles = le64_to_cpu(info->rfiles);
937 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
938 			ci->i_dir_pin = iinfo->dir_pin;
939 			ci->i_rsnaps = iinfo->rsnaps;
940 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
941 		}
942 	}
943 
944 	/* xattrs */
945 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
946 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
947 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
948 		if (ci->i_xattrs.blob)
949 			old_blob = ci->i_xattrs.blob;
950 		ci->i_xattrs.blob = xattr_blob;
951 		if (xattr_blob)
952 			memcpy(ci->i_xattrs.blob->vec.iov_base,
953 			       iinfo->xattr_data, iinfo->xattr_len);
954 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
955 		ceph_forget_all_cached_acls(inode);
956 		ceph_security_invalidate_secctx(inode);
957 		xattr_blob = NULL;
958 	}
959 
960 	/* finally update i_version */
961 	if (le64_to_cpu(info->version) > ci->i_version)
962 		ci->i_version = le64_to_cpu(info->version);
963 
964 	inode->i_mapping->a_ops = &ceph_aops;
965 
966 	switch (inode->i_mode & S_IFMT) {
967 	case S_IFIFO:
968 	case S_IFBLK:
969 	case S_IFCHR:
970 	case S_IFSOCK:
971 		inode->i_blkbits = PAGE_SHIFT;
972 		init_special_inode(inode, inode->i_mode, rdev);
973 		inode->i_op = &ceph_file_iops;
974 		break;
975 	case S_IFREG:
976 		inode->i_op = &ceph_file_iops;
977 		inode->i_fop = &ceph_file_fops;
978 		break;
979 	case S_IFLNK:
980 		inode->i_op = &ceph_symlink_iops;
981 		if (!ci->i_symlink) {
982 			u32 symlen = iinfo->symlink_len;
983 			char *sym;
984 
985 			spin_unlock(&ci->i_ceph_lock);
986 
987 			if (symlen != i_size_read(inode)) {
988 				pr_err("%s %llx.%llx BAD symlink "
989 					"size %lld\n", __func__,
990 					ceph_vinop(inode),
991 					i_size_read(inode));
992 				i_size_write(inode, symlen);
993 				inode->i_blocks = calc_inode_blocks(symlen);
994 			}
995 
996 			err = -ENOMEM;
997 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
998 			if (!sym)
999 				goto out;
1000 
1001 			spin_lock(&ci->i_ceph_lock);
1002 			if (!ci->i_symlink)
1003 				ci->i_symlink = sym;
1004 			else
1005 				kfree(sym); /* lost a race */
1006 		}
1007 		inode->i_link = ci->i_symlink;
1008 		break;
1009 	case S_IFDIR:
1010 		inode->i_op = &ceph_dir_iops;
1011 		inode->i_fop = &ceph_dir_fops;
1012 		break;
1013 	default:
1014 		pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
1015 		       ceph_vinop(inode), inode->i_mode);
1016 	}
1017 
1018 	/* were we issued a capability? */
1019 	if (info_caps) {
1020 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1021 			ceph_add_cap(inode, session,
1022 				     le64_to_cpu(info->cap.cap_id),
1023 				     info_caps,
1024 				     le32_to_cpu(info->cap.wanted),
1025 				     le32_to_cpu(info->cap.seq),
1026 				     le32_to_cpu(info->cap.mseq),
1027 				     le64_to_cpu(info->cap.realm),
1028 				     info->cap.flags, &new_cap);
1029 
1030 			/* set dir completion flag? */
1031 			if (S_ISDIR(inode->i_mode) &&
1032 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1033 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1034 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1035 			    !__ceph_dir_is_complete(ci)) {
1036 				dout(" marking %p complete (empty)\n", inode);
1037 				i_size_write(inode, 0);
1038 				__ceph_dir_set_complete(ci,
1039 					atomic64_read(&ci->i_release_count),
1040 					atomic64_read(&ci->i_ordered_count));
1041 			}
1042 
1043 			wake = true;
1044 		} else {
1045 			dout(" %p got snap_caps %s\n", inode,
1046 			     ceph_cap_string(info_caps));
1047 			ci->i_snap_caps |= info_caps;
1048 		}
1049 	}
1050 
1051 	if (iinfo->inline_version > 0 &&
1052 	    iinfo->inline_version >= ci->i_inline_version) {
1053 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1054 		ci->i_inline_version = iinfo->inline_version;
1055 		if (ci->i_inline_version != CEPH_INLINE_NONE &&
1056 		    (locked_page || (info_caps & cache_caps)))
1057 			fill_inline = true;
1058 	}
1059 
1060 	if (cap_fmode >= 0) {
1061 		if (!info_caps)
1062 			pr_warn("mds issued no caps on %llx.%llx\n",
1063 				ceph_vinop(inode));
1064 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1065 	}
1066 
1067 	spin_unlock(&ci->i_ceph_lock);
1068 
1069 	ceph_fscache_register_inode_cookie(inode);
1070 
1071 	if (fill_inline)
1072 		ceph_fill_inline_data(inode, locked_page,
1073 				      iinfo->inline_data, iinfo->inline_len);
1074 
1075 	if (wake)
1076 		wake_up_all(&ci->i_cap_wq);
1077 
1078 	/* queue truncate if we saw i_size decrease */
1079 	if (queue_trunc)
1080 		ceph_queue_vmtruncate(inode);
1081 
1082 	/* populate frag tree */
1083 	if (S_ISDIR(inode->i_mode))
1084 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1085 
1086 	/* update delegation info? */
1087 	if (dirinfo)
1088 		ceph_fill_dirfrag(inode, dirinfo);
1089 
1090 	err = 0;
1091 out:
1092 	if (new_cap)
1093 		ceph_put_cap(mdsc, new_cap);
1094 	ceph_buffer_put(old_blob);
1095 	ceph_buffer_put(xattr_blob);
1096 	ceph_put_string(pool_ns);
1097 	return err;
1098 }
1099 
1100 /*
1101  * caller should hold session s_mutex and dentry->d_lock.
1102  */
1103 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1104 				  struct ceph_mds_reply_lease *lease,
1105 				  struct ceph_mds_session *session,
1106 				  unsigned long from_time,
1107 				  struct ceph_mds_session **old_lease_session)
1108 {
1109 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1110 	unsigned mask = le16_to_cpu(lease->mask);
1111 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1112 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1113 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1114 
1115 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1116 	     dentry, duration, ttl);
1117 
1118 	/* only track leases on regular dentries */
1119 	if (ceph_snap(dir) != CEPH_NOSNAP)
1120 		return;
1121 
1122 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1123 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1124 	else
1125 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1126 
1127 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1128 	if (!(mask & CEPH_LEASE_VALID)) {
1129 		__ceph_dentry_dir_lease_touch(di);
1130 		return;
1131 	}
1132 
1133 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1134 	    time_before(ttl, di->time))
1135 		return;  /* we already have a newer lease. */
1136 
1137 	if (di->lease_session && di->lease_session != session) {
1138 		*old_lease_session = di->lease_session;
1139 		di->lease_session = NULL;
1140 	}
1141 
1142 	if (!di->lease_session)
1143 		di->lease_session = ceph_get_mds_session(session);
1144 	di->lease_gen = atomic_read(&session->s_cap_gen);
1145 	di->lease_seq = le32_to_cpu(lease->seq);
1146 	di->lease_renew_after = half_ttl;
1147 	di->lease_renew_from = 0;
1148 	di->time = ttl;
1149 
1150 	__ceph_dentry_lease_touch(di);
1151 }
1152 
1153 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1154 					struct ceph_mds_reply_lease *lease,
1155 					struct ceph_mds_session *session,
1156 					unsigned long from_time)
1157 {
1158 	struct ceph_mds_session *old_lease_session = NULL;
1159 	spin_lock(&dentry->d_lock);
1160 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1161 			      &old_lease_session);
1162 	spin_unlock(&dentry->d_lock);
1163 	ceph_put_mds_session(old_lease_session);
1164 }
1165 
1166 /*
1167  * update dentry lease without having parent inode locked
1168  */
1169 static void update_dentry_lease_careful(struct dentry *dentry,
1170 					struct ceph_mds_reply_lease *lease,
1171 					struct ceph_mds_session *session,
1172 					unsigned long from_time,
1173 					char *dname, u32 dname_len,
1174 					struct ceph_vino *pdvino,
1175 					struct ceph_vino *ptvino)
1176 
1177 {
1178 	struct inode *dir;
1179 	struct ceph_mds_session *old_lease_session = NULL;
1180 
1181 	spin_lock(&dentry->d_lock);
1182 	/* make sure dentry's name matches target */
1183 	if (dentry->d_name.len != dname_len ||
1184 	    memcmp(dentry->d_name.name, dname, dname_len))
1185 		goto out_unlock;
1186 
1187 	dir = d_inode(dentry->d_parent);
1188 	/* make sure parent matches dvino */
1189 	if (!ceph_ino_compare(dir, pdvino))
1190 		goto out_unlock;
1191 
1192 	/* make sure dentry's inode matches target. NULL ptvino means that
1193 	 * we expect a negative dentry */
1194 	if (ptvino) {
1195 		if (d_really_is_negative(dentry))
1196 			goto out_unlock;
1197 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1198 			goto out_unlock;
1199 	} else {
1200 		if (d_really_is_positive(dentry))
1201 			goto out_unlock;
1202 	}
1203 
1204 	__update_dentry_lease(dir, dentry, lease, session,
1205 			      from_time, &old_lease_session);
1206 out_unlock:
1207 	spin_unlock(&dentry->d_lock);
1208 	ceph_put_mds_session(old_lease_session);
1209 }
1210 
1211 /*
1212  * splice a dentry to an inode.
1213  * caller must hold directory i_rwsem for this to be safe.
1214  */
1215 static int splice_dentry(struct dentry **pdn, struct inode *in)
1216 {
1217 	struct dentry *dn = *pdn;
1218 	struct dentry *realdn;
1219 
1220 	BUG_ON(d_inode(dn));
1221 
1222 	if (S_ISDIR(in->i_mode)) {
1223 		/* If inode is directory, d_splice_alias() below will remove
1224 		 * 'realdn' from its origin parent. We need to ensure that
1225 		 * origin parent's readdir cache will not reference 'realdn'
1226 		 */
1227 		realdn = d_find_any_alias(in);
1228 		if (realdn) {
1229 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1230 			spin_lock(&realdn->d_lock);
1231 
1232 			realdn->d_op->d_prune(realdn);
1233 
1234 			di->time = jiffies;
1235 			di->lease_shared_gen = 0;
1236 			di->offset = 0;
1237 
1238 			spin_unlock(&realdn->d_lock);
1239 			dput(realdn);
1240 		}
1241 	}
1242 
1243 	/* dn must be unhashed */
1244 	if (!d_unhashed(dn))
1245 		d_drop(dn);
1246 	realdn = d_splice_alias(in, dn);
1247 	if (IS_ERR(realdn)) {
1248 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1249 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
1250 		return PTR_ERR(realdn);
1251 	}
1252 
1253 	if (realdn) {
1254 		dout("dn %p (%d) spliced with %p (%d) "
1255 		     "inode %p ino %llx.%llx\n",
1256 		     dn, d_count(dn),
1257 		     realdn, d_count(realdn),
1258 		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
1259 		dput(dn);
1260 		*pdn = realdn;
1261 	} else {
1262 		BUG_ON(!ceph_dentry(dn));
1263 		dout("dn %p attached to %p ino %llx.%llx\n",
1264 		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1265 	}
1266 	return 0;
1267 }
1268 
1269 /*
1270  * Incorporate results into the local cache.  This is either just
1271  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1272  * after a lookup).
1273  *
1274  * A reply may contain
1275  *         a directory inode along with a dentry.
1276  *  and/or a target inode
1277  *
1278  * Called with snap_rwsem (read).
1279  */
1280 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1281 {
1282 	struct ceph_mds_session *session = req->r_session;
1283 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1284 	struct inode *in = NULL;
1285 	struct ceph_vino tvino, dvino;
1286 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1287 	int err = 0;
1288 
1289 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
1290 	     rinfo->head->is_dentry, rinfo->head->is_target);
1291 
1292 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1293 		dout("fill_trace reply is empty!\n");
1294 		if (rinfo->head->result == 0 && req->r_parent)
1295 			ceph_invalidate_dir_request(req);
1296 		return 0;
1297 	}
1298 
1299 	if (rinfo->head->is_dentry) {
1300 		struct inode *dir = req->r_parent;
1301 
1302 		if (dir) {
1303 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1304 					      rinfo->dirfrag, session, -1,
1305 					      &req->r_caps_reservation);
1306 			if (err < 0)
1307 				goto done;
1308 		} else {
1309 			WARN_ON_ONCE(1);
1310 		}
1311 
1312 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1313 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1314 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1315 			struct qstr dname;
1316 			struct dentry *dn, *parent;
1317 
1318 			BUG_ON(!rinfo->head->is_target);
1319 			BUG_ON(req->r_dentry);
1320 
1321 			parent = d_find_any_alias(dir);
1322 			BUG_ON(!parent);
1323 
1324 			dname.name = rinfo->dname;
1325 			dname.len = rinfo->dname_len;
1326 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1327 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1328 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1329 retry_lookup:
1330 			dn = d_lookup(parent, &dname);
1331 			dout("d_lookup on parent=%p name=%.*s got %p\n",
1332 			     parent, dname.len, dname.name, dn);
1333 
1334 			if (!dn) {
1335 				dn = d_alloc(parent, &dname);
1336 				dout("d_alloc %p '%.*s' = %p\n", parent,
1337 				     dname.len, dname.name, dn);
1338 				if (!dn) {
1339 					dput(parent);
1340 					err = -ENOMEM;
1341 					goto done;
1342 				}
1343 				err = 0;
1344 			} else if (d_really_is_positive(dn) &&
1345 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1346 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1347 				dout(" dn %p points to wrong inode %p\n",
1348 				     dn, d_inode(dn));
1349 				ceph_dir_clear_ordered(dir);
1350 				d_delete(dn);
1351 				dput(dn);
1352 				goto retry_lookup;
1353 			}
1354 
1355 			req->r_dentry = dn;
1356 			dput(parent);
1357 		}
1358 	}
1359 
1360 	if (rinfo->head->is_target) {
1361 		/* Should be filled in by handle_reply */
1362 		BUG_ON(!req->r_target_inode);
1363 
1364 		in = req->r_target_inode;
1365 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1366 				NULL, session,
1367 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1368 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1369 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1370 				&req->r_caps_reservation);
1371 		if (err < 0) {
1372 			pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1373 				in, ceph_vinop(in));
1374 			req->r_target_inode = NULL;
1375 			if (in->i_state & I_NEW)
1376 				discard_new_inode(in);
1377 			else
1378 				iput(in);
1379 			goto done;
1380 		}
1381 		if (in->i_state & I_NEW)
1382 			unlock_new_inode(in);
1383 	}
1384 
1385 	/*
1386 	 * ignore null lease/binding on snapdir ENOENT, or else we
1387 	 * will have trouble splicing in the virtual snapdir later
1388 	 */
1389 	if (rinfo->head->is_dentry &&
1390             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1391 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1392 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1393 					       fsc->mount_options->snapdir_name,
1394 					       req->r_dentry->d_name.len))) {
1395 		/*
1396 		 * lookup link rename   : null -> possibly existing inode
1397 		 * mknod symlink mkdir  : null -> new inode
1398 		 * unlink               : linked -> null
1399 		 */
1400 		struct inode *dir = req->r_parent;
1401 		struct dentry *dn = req->r_dentry;
1402 		bool have_dir_cap, have_lease;
1403 
1404 		BUG_ON(!dn);
1405 		BUG_ON(!dir);
1406 		BUG_ON(d_inode(dn->d_parent) != dir);
1407 
1408 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1409 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1410 
1411 		BUG_ON(ceph_ino(dir) != dvino.ino);
1412 		BUG_ON(ceph_snap(dir) != dvino.snap);
1413 
1414 		/* do we have a lease on the whole dir? */
1415 		have_dir_cap =
1416 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1417 			 CEPH_CAP_FILE_SHARED);
1418 
1419 		/* do we have a dn lease? */
1420 		have_lease = have_dir_cap ||
1421 			le32_to_cpu(rinfo->dlease->duration_ms);
1422 		if (!have_lease)
1423 			dout("fill_trace  no dentry lease or dir cap\n");
1424 
1425 		/* rename? */
1426 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1427 			struct inode *olddir = req->r_old_dentry_dir;
1428 			BUG_ON(!olddir);
1429 
1430 			dout(" src %p '%pd' dst %p '%pd'\n",
1431 			     req->r_old_dentry,
1432 			     req->r_old_dentry,
1433 			     dn, dn);
1434 			dout("fill_trace doing d_move %p -> %p\n",
1435 			     req->r_old_dentry, dn);
1436 
1437 			/* d_move screws up sibling dentries' offsets */
1438 			ceph_dir_clear_ordered(dir);
1439 			ceph_dir_clear_ordered(olddir);
1440 
1441 			d_move(req->r_old_dentry, dn);
1442 			dout(" src %p '%pd' dst %p '%pd'\n",
1443 			     req->r_old_dentry,
1444 			     req->r_old_dentry,
1445 			     dn, dn);
1446 
1447 			/* ensure target dentry is invalidated, despite
1448 			   rehashing bug in vfs_rename_dir */
1449 			ceph_invalidate_dentry_lease(dn);
1450 
1451 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1452 			     ceph_dentry(req->r_old_dentry)->offset);
1453 
1454 			/* swap r_dentry and r_old_dentry in case that
1455 			 * splice_dentry() gets called later. This is safe
1456 			 * because no other place will use them */
1457 			req->r_dentry = req->r_old_dentry;
1458 			req->r_old_dentry = dn;
1459 			dn = req->r_dentry;
1460 		}
1461 
1462 		/* null dentry? */
1463 		if (!rinfo->head->is_target) {
1464 			dout("fill_trace null dentry\n");
1465 			if (d_really_is_positive(dn)) {
1466 				dout("d_delete %p\n", dn);
1467 				ceph_dir_clear_ordered(dir);
1468 				d_delete(dn);
1469 			} else if (have_lease) {
1470 				if (d_unhashed(dn))
1471 					d_add(dn, NULL);
1472 			}
1473 
1474 			if (!d_unhashed(dn) && have_lease)
1475 				update_dentry_lease(dir, dn,
1476 						    rinfo->dlease, session,
1477 						    req->r_request_started);
1478 			goto done;
1479 		}
1480 
1481 		/* attach proper inode */
1482 		if (d_really_is_negative(dn)) {
1483 			ceph_dir_clear_ordered(dir);
1484 			ihold(in);
1485 			err = splice_dentry(&req->r_dentry, in);
1486 			if (err < 0)
1487 				goto done;
1488 			dn = req->r_dentry;  /* may have spliced */
1489 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1490 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1491 			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1492 			     ceph_vinop(in));
1493 			d_invalidate(dn);
1494 			have_lease = false;
1495 		}
1496 
1497 		if (have_lease) {
1498 			update_dentry_lease(dir, dn,
1499 					    rinfo->dlease, session,
1500 					    req->r_request_started);
1501 		}
1502 		dout(" final dn %p\n", dn);
1503 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1504 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1505 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1506 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1507 		struct inode *dir = req->r_parent;
1508 
1509 		/* fill out a snapdir LOOKUPSNAP dentry */
1510 		BUG_ON(!dir);
1511 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1512 		BUG_ON(!req->r_dentry);
1513 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1514 		ceph_dir_clear_ordered(dir);
1515 		ihold(in);
1516 		err = splice_dentry(&req->r_dentry, in);
1517 		if (err < 0)
1518 			goto done;
1519 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1520 		/* parent inode is not locked, be carefull */
1521 		struct ceph_vino *ptvino = NULL;
1522 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1523 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1524 		if (rinfo->head->is_target) {
1525 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1526 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1527 			ptvino = &tvino;
1528 		}
1529 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1530 					    session, req->r_request_started,
1531 					    rinfo->dname, rinfo->dname_len,
1532 					    &dvino, ptvino);
1533 	}
1534 done:
1535 	dout("fill_trace done err=%d\n", err);
1536 	return err;
1537 }
1538 
1539 /*
1540  * Prepopulate our cache with readdir results, leases, etc.
1541  */
1542 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1543 					   struct ceph_mds_session *session)
1544 {
1545 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1546 	int i, err = 0;
1547 
1548 	for (i = 0; i < rinfo->dir_nr; i++) {
1549 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1550 		struct ceph_vino vino;
1551 		struct inode *in;
1552 		int rc;
1553 
1554 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1555 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1556 
1557 		in = ceph_get_inode(req->r_dentry->d_sb, vino);
1558 		if (IS_ERR(in)) {
1559 			err = PTR_ERR(in);
1560 			dout("new_inode badness got %d\n", err);
1561 			continue;
1562 		}
1563 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1564 				     -1, &req->r_caps_reservation);
1565 		if (rc < 0) {
1566 			pr_err("ceph_fill_inode badness on %p got %d\n",
1567 			       in, rc);
1568 			err = rc;
1569 			if (in->i_state & I_NEW) {
1570 				ihold(in);
1571 				discard_new_inode(in);
1572 			}
1573 		} else if (in->i_state & I_NEW) {
1574 			unlock_new_inode(in);
1575 		}
1576 
1577 		iput(in);
1578 	}
1579 
1580 	return err;
1581 }
1582 
1583 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1584 {
1585 	if (ctl->page) {
1586 		kunmap(ctl->page);
1587 		put_page(ctl->page);
1588 		ctl->page = NULL;
1589 	}
1590 }
1591 
1592 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1593 			      struct ceph_readdir_cache_control *ctl,
1594 			      struct ceph_mds_request *req)
1595 {
1596 	struct ceph_inode_info *ci = ceph_inode(dir);
1597 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1598 	unsigned idx = ctl->index % nsize;
1599 	pgoff_t pgoff = ctl->index / nsize;
1600 
1601 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1602 		ceph_readdir_cache_release(ctl);
1603 		if (idx == 0)
1604 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1605 		else
1606 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1607 		if (!ctl->page) {
1608 			ctl->index = -1;
1609 			return idx == 0 ? -ENOMEM : 0;
1610 		}
1611 		/* reading/filling the cache are serialized by
1612 		 * i_rwsem, no need to use page lock */
1613 		unlock_page(ctl->page);
1614 		ctl->dentries = kmap(ctl->page);
1615 		if (idx == 0)
1616 			memset(ctl->dentries, 0, PAGE_SIZE);
1617 	}
1618 
1619 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1620 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1621 		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1622 		ctl->dentries[idx] = dn;
1623 		ctl->index++;
1624 	} else {
1625 		dout("disable readdir cache\n");
1626 		ctl->index = -1;
1627 	}
1628 	return 0;
1629 }
1630 
1631 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1632 			     struct ceph_mds_session *session)
1633 {
1634 	struct dentry *parent = req->r_dentry;
1635 	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1636 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1637 	struct qstr dname;
1638 	struct dentry *dn;
1639 	struct inode *in;
1640 	int err = 0, skipped = 0, ret, i;
1641 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1642 	u32 last_hash = 0;
1643 	u32 fpos_offset;
1644 	struct ceph_readdir_cache_control cache_ctl = {};
1645 
1646 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1647 		return readdir_prepopulate_inodes_only(req, session);
1648 
1649 	if (rinfo->hash_order) {
1650 		if (req->r_path2) {
1651 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1652 						  req->r_path2,
1653 						  strlen(req->r_path2));
1654 			last_hash = ceph_frag_value(last_hash);
1655 		} else if (rinfo->offset_hash) {
1656 			/* mds understands offset_hash */
1657 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1658 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1659 		}
1660 	}
1661 
1662 	if (rinfo->dir_dir &&
1663 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1664 		dout("readdir_prepopulate got new frag %x -> %x\n",
1665 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1666 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1667 		if (!rinfo->hash_order)
1668 			req->r_readdir_offset = 2;
1669 	}
1670 
1671 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1672 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1673 		     rinfo->dir_nr, parent);
1674 	} else {
1675 		dout("readdir_prepopulate %d items under dn %p\n",
1676 		     rinfo->dir_nr, parent);
1677 		if (rinfo->dir_dir)
1678 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1679 
1680 		if (ceph_frag_is_leftmost(frag) &&
1681 		    req->r_readdir_offset == 2 &&
1682 		    !(rinfo->hash_order && last_hash)) {
1683 			/* note dir version at start of readdir so we can
1684 			 * tell if any dentries get dropped */
1685 			req->r_dir_release_cnt =
1686 				atomic64_read(&ci->i_release_count);
1687 			req->r_dir_ordered_cnt =
1688 				atomic64_read(&ci->i_ordered_count);
1689 			req->r_readdir_cache_idx = 0;
1690 		}
1691 	}
1692 
1693 	cache_ctl.index = req->r_readdir_cache_idx;
1694 	fpos_offset = req->r_readdir_offset;
1695 
1696 	/* FIXME: release caps/leases if error occurs */
1697 	for (i = 0; i < rinfo->dir_nr; i++) {
1698 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1699 		struct ceph_vino tvino;
1700 
1701 		dname.name = rde->name;
1702 		dname.len = rde->name_len;
1703 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1704 
1705 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1706 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1707 
1708 		if (rinfo->hash_order) {
1709 			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1710 						 rde->name, rde->name_len);
1711 			hash = ceph_frag_value(hash);
1712 			if (hash != last_hash)
1713 				fpos_offset = 2;
1714 			last_hash = hash;
1715 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1716 		} else {
1717 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1718 		}
1719 
1720 retry_lookup:
1721 		dn = d_lookup(parent, &dname);
1722 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1723 		     parent, dname.len, dname.name, dn);
1724 
1725 		if (!dn) {
1726 			dn = d_alloc(parent, &dname);
1727 			dout("d_alloc %p '%.*s' = %p\n", parent,
1728 			     dname.len, dname.name, dn);
1729 			if (!dn) {
1730 				dout("d_alloc badness\n");
1731 				err = -ENOMEM;
1732 				goto out;
1733 			}
1734 		} else if (d_really_is_positive(dn) &&
1735 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
1736 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
1737 			struct ceph_dentry_info *di = ceph_dentry(dn);
1738 			dout(" dn %p points to wrong inode %p\n",
1739 			     dn, d_inode(dn));
1740 
1741 			spin_lock(&dn->d_lock);
1742 			if (di->offset > 0 &&
1743 			    di->lease_shared_gen ==
1744 			    atomic_read(&ci->i_shared_gen)) {
1745 				__ceph_dir_clear_ordered(ci);
1746 				di->offset = 0;
1747 			}
1748 			spin_unlock(&dn->d_lock);
1749 
1750 			d_delete(dn);
1751 			dput(dn);
1752 			goto retry_lookup;
1753 		}
1754 
1755 		/* inode */
1756 		if (d_really_is_positive(dn)) {
1757 			in = d_inode(dn);
1758 		} else {
1759 			in = ceph_get_inode(parent->d_sb, tvino);
1760 			if (IS_ERR(in)) {
1761 				dout("new_inode badness\n");
1762 				d_drop(dn);
1763 				dput(dn);
1764 				err = PTR_ERR(in);
1765 				goto out;
1766 			}
1767 		}
1768 
1769 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1770 				      -1, &req->r_caps_reservation);
1771 		if (ret < 0) {
1772 			pr_err("ceph_fill_inode badness on %p\n", in);
1773 			if (d_really_is_negative(dn)) {
1774 				if (in->i_state & I_NEW) {
1775 					ihold(in);
1776 					discard_new_inode(in);
1777 				}
1778 				iput(in);
1779 			}
1780 			d_drop(dn);
1781 			err = ret;
1782 			goto next_item;
1783 		}
1784 		if (in->i_state & I_NEW)
1785 			unlock_new_inode(in);
1786 
1787 		if (d_really_is_negative(dn)) {
1788 			if (ceph_security_xattr_deadlock(in)) {
1789 				dout(" skip splicing dn %p to inode %p"
1790 				     " (security xattr deadlock)\n", dn, in);
1791 				iput(in);
1792 				skipped++;
1793 				goto next_item;
1794 			}
1795 
1796 			err = splice_dentry(&dn, in);
1797 			if (err < 0)
1798 				goto next_item;
1799 		}
1800 
1801 		ceph_dentry(dn)->offset = rde->offset;
1802 
1803 		update_dentry_lease(d_inode(parent), dn,
1804 				    rde->lease, req->r_session,
1805 				    req->r_request_started);
1806 
1807 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1808 			ret = fill_readdir_cache(d_inode(parent), dn,
1809 						 &cache_ctl, req);
1810 			if (ret < 0)
1811 				err = ret;
1812 		}
1813 next_item:
1814 		dput(dn);
1815 	}
1816 out:
1817 	if (err == 0 && skipped == 0) {
1818 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
1819 		req->r_readdir_cache_idx = cache_ctl.index;
1820 	}
1821 	ceph_readdir_cache_release(&cache_ctl);
1822 	dout("readdir_prepopulate done\n");
1823 	return err;
1824 }
1825 
1826 bool ceph_inode_set_size(struct inode *inode, loff_t size)
1827 {
1828 	struct ceph_inode_info *ci = ceph_inode(inode);
1829 	bool ret;
1830 
1831 	spin_lock(&ci->i_ceph_lock);
1832 	dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
1833 	i_size_write(inode, size);
1834 	ceph_fscache_update(inode);
1835 	inode->i_blocks = calc_inode_blocks(size);
1836 
1837 	ret = __ceph_should_report_size(ci);
1838 
1839 	spin_unlock(&ci->i_ceph_lock);
1840 
1841 	return ret;
1842 }
1843 
1844 void ceph_queue_inode_work(struct inode *inode, int work_bit)
1845 {
1846 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1847 	struct ceph_inode_info *ci = ceph_inode(inode);
1848 	set_bit(work_bit, &ci->i_work_mask);
1849 
1850 	ihold(inode);
1851 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
1852 		dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
1853 	} else {
1854 		dout("queue_inode_work %p already queued, mask=%lx\n",
1855 		     inode, ci->i_work_mask);
1856 		iput(inode);
1857 	}
1858 }
1859 
1860 static void ceph_do_invalidate_pages(struct inode *inode)
1861 {
1862 	struct ceph_inode_info *ci = ceph_inode(inode);
1863 	u32 orig_gen;
1864 	int check = 0;
1865 
1866 	ceph_fscache_invalidate(inode, false);
1867 
1868 	mutex_lock(&ci->i_truncate_mutex);
1869 
1870 	if (ceph_inode_is_shutdown(inode)) {
1871 		pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
1872 				    __func__, ceph_vinop(inode));
1873 		mapping_set_error(inode->i_mapping, -EIO);
1874 		truncate_pagecache(inode, 0);
1875 		mutex_unlock(&ci->i_truncate_mutex);
1876 		goto out;
1877 	}
1878 
1879 	spin_lock(&ci->i_ceph_lock);
1880 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1881 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1882 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1883 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1884 			check = 1;
1885 		spin_unlock(&ci->i_ceph_lock);
1886 		mutex_unlock(&ci->i_truncate_mutex);
1887 		goto out;
1888 	}
1889 	orig_gen = ci->i_rdcache_gen;
1890 	spin_unlock(&ci->i_ceph_lock);
1891 
1892 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1893 		pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
1894 		       ceph_vinop(inode));
1895 	}
1896 
1897 	spin_lock(&ci->i_ceph_lock);
1898 	if (orig_gen == ci->i_rdcache_gen &&
1899 	    orig_gen == ci->i_rdcache_revoking) {
1900 		dout("invalidate_pages %p gen %d successful\n", inode,
1901 		     ci->i_rdcache_gen);
1902 		ci->i_rdcache_revoking--;
1903 		check = 1;
1904 	} else {
1905 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1906 		     inode, orig_gen, ci->i_rdcache_gen,
1907 		     ci->i_rdcache_revoking);
1908 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1909 			check = 1;
1910 	}
1911 	spin_unlock(&ci->i_ceph_lock);
1912 	mutex_unlock(&ci->i_truncate_mutex);
1913 out:
1914 	if (check)
1915 		ceph_check_caps(ci, 0, NULL);
1916 }
1917 
1918 /*
1919  * Make sure any pending truncation is applied before doing anything
1920  * that may depend on it.
1921  */
1922 void __ceph_do_pending_vmtruncate(struct inode *inode)
1923 {
1924 	struct ceph_inode_info *ci = ceph_inode(inode);
1925 	u64 to;
1926 	int wrbuffer_refs, finish = 0;
1927 
1928 	mutex_lock(&ci->i_truncate_mutex);
1929 retry:
1930 	spin_lock(&ci->i_ceph_lock);
1931 	if (ci->i_truncate_pending == 0) {
1932 		dout("__do_pending_vmtruncate %p none pending\n", inode);
1933 		spin_unlock(&ci->i_ceph_lock);
1934 		mutex_unlock(&ci->i_truncate_mutex);
1935 		return;
1936 	}
1937 
1938 	/*
1939 	 * make sure any dirty snapped pages are flushed before we
1940 	 * possibly truncate them.. so write AND block!
1941 	 */
1942 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1943 		spin_unlock(&ci->i_ceph_lock);
1944 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
1945 		     inode);
1946 		filemap_write_and_wait_range(&inode->i_data, 0,
1947 					     inode->i_sb->s_maxbytes);
1948 		goto retry;
1949 	}
1950 
1951 	/* there should be no reader or writer */
1952 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1953 
1954 	to = ci->i_truncate_size;
1955 	wrbuffer_refs = ci->i_wrbuffer_ref;
1956 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1957 	     ci->i_truncate_pending, to);
1958 	spin_unlock(&ci->i_ceph_lock);
1959 
1960 	ceph_fscache_resize(inode, to);
1961 	truncate_pagecache(inode, to);
1962 
1963 	spin_lock(&ci->i_ceph_lock);
1964 	if (to == ci->i_truncate_size) {
1965 		ci->i_truncate_pending = 0;
1966 		finish = 1;
1967 	}
1968 	spin_unlock(&ci->i_ceph_lock);
1969 	if (!finish)
1970 		goto retry;
1971 
1972 	mutex_unlock(&ci->i_truncate_mutex);
1973 
1974 	if (wrbuffer_refs == 0)
1975 		ceph_check_caps(ci, 0, NULL);
1976 
1977 	wake_up_all(&ci->i_cap_wq);
1978 }
1979 
1980 static void ceph_inode_work(struct work_struct *work)
1981 {
1982 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1983 						 i_work);
1984 	struct inode *inode = &ci->vfs_inode;
1985 
1986 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
1987 		dout("writeback %p\n", inode);
1988 		filemap_fdatawrite(&inode->i_data);
1989 	}
1990 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
1991 		ceph_do_invalidate_pages(inode);
1992 
1993 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
1994 		__ceph_do_pending_vmtruncate(inode);
1995 
1996 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
1997 		ceph_check_caps(ci, 0, NULL);
1998 
1999 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2000 		ceph_flush_snaps(ci, NULL);
2001 
2002 	iput(inode);
2003 }
2004 
2005 /*
2006  * symlinks
2007  */
2008 static const struct inode_operations ceph_symlink_iops = {
2009 	.get_link = simple_get_link,
2010 	.setattr = ceph_setattr,
2011 	.getattr = ceph_getattr,
2012 	.listxattr = ceph_listxattr,
2013 };
2014 
2015 int __ceph_setattr(struct inode *inode, struct iattr *attr)
2016 {
2017 	struct ceph_inode_info *ci = ceph_inode(inode);
2018 	unsigned int ia_valid = attr->ia_valid;
2019 	struct ceph_mds_request *req;
2020 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2021 	struct ceph_cap_flush *prealloc_cf;
2022 	int issued;
2023 	int release = 0, dirtied = 0;
2024 	int mask = 0;
2025 	int err = 0;
2026 	int inode_dirty_flags = 0;
2027 	bool lock_snap_rwsem = false;
2028 
2029 	prealloc_cf = ceph_alloc_cap_flush();
2030 	if (!prealloc_cf)
2031 		return -ENOMEM;
2032 
2033 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2034 				       USE_AUTH_MDS);
2035 	if (IS_ERR(req)) {
2036 		ceph_free_cap_flush(prealloc_cf);
2037 		return PTR_ERR(req);
2038 	}
2039 
2040 	spin_lock(&ci->i_ceph_lock);
2041 	issued = __ceph_caps_issued(ci, NULL);
2042 
2043 	if (!ci->i_head_snapc &&
2044 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2045 		lock_snap_rwsem = true;
2046 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2047 			spin_unlock(&ci->i_ceph_lock);
2048 			down_read(&mdsc->snap_rwsem);
2049 			spin_lock(&ci->i_ceph_lock);
2050 			issued = __ceph_caps_issued(ci, NULL);
2051 		}
2052 	}
2053 
2054 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2055 
2056 	if (ia_valid & ATTR_UID) {
2057 		dout("setattr %p uid %d -> %d\n", inode,
2058 		     from_kuid(&init_user_ns, inode->i_uid),
2059 		     from_kuid(&init_user_ns, attr->ia_uid));
2060 		if (issued & CEPH_CAP_AUTH_EXCL) {
2061 			inode->i_uid = attr->ia_uid;
2062 			dirtied |= CEPH_CAP_AUTH_EXCL;
2063 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2064 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
2065 			req->r_args.setattr.uid = cpu_to_le32(
2066 				from_kuid(&init_user_ns, attr->ia_uid));
2067 			mask |= CEPH_SETATTR_UID;
2068 			release |= CEPH_CAP_AUTH_SHARED;
2069 		}
2070 	}
2071 	if (ia_valid & ATTR_GID) {
2072 		dout("setattr %p gid %d -> %d\n", inode,
2073 		     from_kgid(&init_user_ns, inode->i_gid),
2074 		     from_kgid(&init_user_ns, attr->ia_gid));
2075 		if (issued & CEPH_CAP_AUTH_EXCL) {
2076 			inode->i_gid = attr->ia_gid;
2077 			dirtied |= CEPH_CAP_AUTH_EXCL;
2078 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2079 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
2080 			req->r_args.setattr.gid = cpu_to_le32(
2081 				from_kgid(&init_user_ns, attr->ia_gid));
2082 			mask |= CEPH_SETATTR_GID;
2083 			release |= CEPH_CAP_AUTH_SHARED;
2084 		}
2085 	}
2086 	if (ia_valid & ATTR_MODE) {
2087 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2088 		     attr->ia_mode);
2089 		if (issued & CEPH_CAP_AUTH_EXCL) {
2090 			inode->i_mode = attr->ia_mode;
2091 			dirtied |= CEPH_CAP_AUTH_EXCL;
2092 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2093 			   attr->ia_mode != inode->i_mode) {
2094 			inode->i_mode = attr->ia_mode;
2095 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2096 			mask |= CEPH_SETATTR_MODE;
2097 			release |= CEPH_CAP_AUTH_SHARED;
2098 		}
2099 	}
2100 
2101 	if (ia_valid & ATTR_ATIME) {
2102 		dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2103 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2104 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2105 		if (issued & CEPH_CAP_FILE_EXCL) {
2106 			ci->i_time_warp_seq++;
2107 			inode->i_atime = attr->ia_atime;
2108 			dirtied |= CEPH_CAP_FILE_EXCL;
2109 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2110 			   timespec64_compare(&inode->i_atime,
2111 					    &attr->ia_atime) < 0) {
2112 			inode->i_atime = attr->ia_atime;
2113 			dirtied |= CEPH_CAP_FILE_WR;
2114 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2115 			   !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2116 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2117 					       &attr->ia_atime);
2118 			mask |= CEPH_SETATTR_ATIME;
2119 			release |= CEPH_CAP_FILE_SHARED |
2120 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2121 		}
2122 	}
2123 	if (ia_valid & ATTR_SIZE) {
2124 		loff_t isize = i_size_read(inode);
2125 
2126 		dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
2127 		if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2128 			if (attr->ia_size > isize) {
2129 				i_size_write(inode, attr->ia_size);
2130 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2131 				ci->i_reported_size = attr->ia_size;
2132 				dirtied |= CEPH_CAP_FILE_EXCL;
2133 				ia_valid |= ATTR_MTIME;
2134 			}
2135 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2136 			   attr->ia_size != isize) {
2137 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2138 			req->r_args.setattr.old_size = cpu_to_le64(isize);
2139 			mask |= CEPH_SETATTR_SIZE;
2140 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2141 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2142 		}
2143 	}
2144 	if (ia_valid & ATTR_MTIME) {
2145 		dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2146 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2147 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2148 		if (issued & CEPH_CAP_FILE_EXCL) {
2149 			ci->i_time_warp_seq++;
2150 			inode->i_mtime = attr->ia_mtime;
2151 			dirtied |= CEPH_CAP_FILE_EXCL;
2152 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2153 			   timespec64_compare(&inode->i_mtime,
2154 					    &attr->ia_mtime) < 0) {
2155 			inode->i_mtime = attr->ia_mtime;
2156 			dirtied |= CEPH_CAP_FILE_WR;
2157 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2158 			   !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2159 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2160 					       &attr->ia_mtime);
2161 			mask |= CEPH_SETATTR_MTIME;
2162 			release |= CEPH_CAP_FILE_SHARED |
2163 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2164 		}
2165 	}
2166 
2167 	/* these do nothing */
2168 	if (ia_valid & ATTR_CTIME) {
2169 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2170 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2171 		dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2172 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2173 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2174 		     only ? "ctime only" : "ignored");
2175 		if (only) {
2176 			/*
2177 			 * if kernel wants to dirty ctime but nothing else,
2178 			 * we need to choose a cap to dirty under, or do
2179 			 * a almost-no-op setattr
2180 			 */
2181 			if (issued & CEPH_CAP_AUTH_EXCL)
2182 				dirtied |= CEPH_CAP_AUTH_EXCL;
2183 			else if (issued & CEPH_CAP_FILE_EXCL)
2184 				dirtied |= CEPH_CAP_FILE_EXCL;
2185 			else if (issued & CEPH_CAP_XATTR_EXCL)
2186 				dirtied |= CEPH_CAP_XATTR_EXCL;
2187 			else
2188 				mask |= CEPH_SETATTR_CTIME;
2189 		}
2190 	}
2191 	if (ia_valid & ATTR_FILE)
2192 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2193 
2194 	if (dirtied) {
2195 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2196 							   &prealloc_cf);
2197 		inode->i_ctime = attr->ia_ctime;
2198 	}
2199 
2200 	release &= issued;
2201 	spin_unlock(&ci->i_ceph_lock);
2202 	if (lock_snap_rwsem)
2203 		up_read(&mdsc->snap_rwsem);
2204 
2205 	if (inode_dirty_flags)
2206 		__mark_inode_dirty(inode, inode_dirty_flags);
2207 
2208 	if (mask) {
2209 		req->r_inode = inode;
2210 		ihold(inode);
2211 		req->r_inode_drop = release;
2212 		req->r_args.setattr.mask = cpu_to_le32(mask);
2213 		req->r_num_caps = 1;
2214 		req->r_stamp = attr->ia_ctime;
2215 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2216 	}
2217 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2218 	     ceph_cap_string(dirtied), mask);
2219 
2220 	ceph_mdsc_put_request(req);
2221 	ceph_free_cap_flush(prealloc_cf);
2222 
2223 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2224 		__ceph_do_pending_vmtruncate(inode);
2225 
2226 	return err;
2227 }
2228 
2229 /*
2230  * setattr
2231  */
2232 int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
2233 		 struct iattr *attr)
2234 {
2235 	struct inode *inode = d_inode(dentry);
2236 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2237 	int err;
2238 
2239 	if (ceph_snap(inode) != CEPH_NOSNAP)
2240 		return -EROFS;
2241 
2242 	if (ceph_inode_is_shutdown(inode))
2243 		return -ESTALE;
2244 
2245 	err = setattr_prepare(&init_user_ns, dentry, attr);
2246 	if (err != 0)
2247 		return err;
2248 
2249 	if ((attr->ia_valid & ATTR_SIZE) &&
2250 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2251 		return -EFBIG;
2252 
2253 	if ((attr->ia_valid & ATTR_SIZE) &&
2254 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2255 		return -EDQUOT;
2256 
2257 	err = __ceph_setattr(inode, attr);
2258 
2259 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2260 		err = posix_acl_chmod(&init_user_ns, inode, attr->ia_mode);
2261 
2262 	return err;
2263 }
2264 
2265 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2266 {
2267 	int issued = ceph_caps_issued(ceph_inode(inode));
2268 
2269 	/*
2270 	 * If any 'x' caps is issued we can just choose the auth MDS
2271 	 * instead of the random replica MDSes. Because only when the
2272 	 * Locker is in LOCK_EXEC state will the loner client could
2273 	 * get the 'x' caps. And if we send the getattr requests to
2274 	 * any replica MDS it must auth pin and tries to rdlock from
2275 	 * the auth MDS, and then the auth MDS need to do the Locker
2276 	 * state transition to LOCK_SYNC. And after that the lock state
2277 	 * will change back.
2278 	 *
2279 	 * This cost much when doing the Locker state transition and
2280 	 * usually will need to revoke caps from clients.
2281 	 */
2282 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2283 	    || (mask & CEPH_STAT_RSTAT))
2284 		return USE_AUTH_MDS;
2285 	else
2286 		return USE_ANY_MDS;
2287 }
2288 
2289 /*
2290  * Verify that we have a lease on the given mask.  If not,
2291  * do a getattr against an mds.
2292  */
2293 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2294 		      int mask, bool force)
2295 {
2296 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2297 	struct ceph_mds_client *mdsc = fsc->mdsc;
2298 	struct ceph_mds_request *req;
2299 	int mode;
2300 	int err;
2301 
2302 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2303 		dout("do_getattr inode %p SNAPDIR\n", inode);
2304 		return 0;
2305 	}
2306 
2307 	dout("do_getattr inode %p mask %s mode 0%o\n",
2308 	     inode, ceph_cap_string(mask), inode->i_mode);
2309 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2310 			return 0;
2311 
2312 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2313 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2314 	if (IS_ERR(req))
2315 		return PTR_ERR(req);
2316 	req->r_inode = inode;
2317 	ihold(inode);
2318 	req->r_num_caps = 1;
2319 	req->r_args.getattr.mask = cpu_to_le32(mask);
2320 	req->r_locked_page = locked_page;
2321 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2322 	if (locked_page && err == 0) {
2323 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2324 		if (inline_version == 0) {
2325 			/* the reply is supposed to contain inline data */
2326 			err = -EINVAL;
2327 		} else if (inline_version == CEPH_INLINE_NONE) {
2328 			err = -ENODATA;
2329 		} else {
2330 			err = req->r_reply_info.targeti.inline_len;
2331 		}
2332 	}
2333 	ceph_mdsc_put_request(req);
2334 	dout("do_getattr result=%d\n", err);
2335 	return err;
2336 }
2337 
2338 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
2339 		      size_t size)
2340 {
2341 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2342 	struct ceph_mds_client *mdsc = fsc->mdsc;
2343 	struct ceph_mds_request *req;
2344 	int mode = USE_AUTH_MDS;
2345 	int err;
2346 	char *xattr_value;
2347 	size_t xattr_value_len;
2348 
2349 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
2350 	if (IS_ERR(req)) {
2351 		err = -ENOMEM;
2352 		goto out;
2353 	}
2354 
2355 	req->r_path2 = kstrdup(name, GFP_NOFS);
2356 	if (!req->r_path2) {
2357 		err = -ENOMEM;
2358 		goto put;
2359 	}
2360 
2361 	ihold(inode);
2362 	req->r_inode = inode;
2363 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2364 	if (err < 0)
2365 		goto put;
2366 
2367 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
2368 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
2369 
2370 	dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
2371 
2372 	err = (int)xattr_value_len;
2373 	if (size == 0)
2374 		goto put;
2375 
2376 	if (xattr_value_len > size) {
2377 		err = -ERANGE;
2378 		goto put;
2379 	}
2380 
2381 	memcpy(value, xattr_value, xattr_value_len);
2382 put:
2383 	ceph_mdsc_put_request(req);
2384 out:
2385 	dout("do_getvxattr result=%d\n", err);
2386 	return err;
2387 }
2388 
2389 
2390 /*
2391  * Check inode permissions.  We verify we have a valid value for
2392  * the AUTH cap, then call the generic handler.
2393  */
2394 int ceph_permission(struct user_namespace *mnt_userns, struct inode *inode,
2395 		    int mask)
2396 {
2397 	int err;
2398 
2399 	if (mask & MAY_NOT_BLOCK)
2400 		return -ECHILD;
2401 
2402 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2403 
2404 	if (!err)
2405 		err = generic_permission(&init_user_ns, inode, mask);
2406 	return err;
2407 }
2408 
2409 /* Craft a mask of needed caps given a set of requested statx attrs. */
2410 static int statx_to_caps(u32 want, umode_t mode)
2411 {
2412 	int mask = 0;
2413 
2414 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME))
2415 		mask |= CEPH_CAP_AUTH_SHARED;
2416 
2417 	if (want & (STATX_NLINK|STATX_CTIME)) {
2418 		/*
2419 		 * The link count for directories depends on inode->i_subdirs,
2420 		 * and that is only updated when Fs caps are held.
2421 		 */
2422 		if (S_ISDIR(mode))
2423 			mask |= CEPH_CAP_FILE_SHARED;
2424 		else
2425 			mask |= CEPH_CAP_LINK_SHARED;
2426 	}
2427 
2428 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|
2429 		    STATX_BLOCKS))
2430 		mask |= CEPH_CAP_FILE_SHARED;
2431 
2432 	if (want & (STATX_CTIME))
2433 		mask |= CEPH_CAP_XATTR_SHARED;
2434 
2435 	return mask;
2436 }
2437 
2438 /*
2439  * Get all the attributes. If we have sufficient caps for the requested attrs,
2440  * then we can avoid talking to the MDS at all.
2441  */
2442 int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
2443 		 struct kstat *stat, u32 request_mask, unsigned int flags)
2444 {
2445 	struct inode *inode = d_inode(path->dentry);
2446 	struct ceph_inode_info *ci = ceph_inode(inode);
2447 	u32 valid_mask = STATX_BASIC_STATS;
2448 	int err = 0;
2449 
2450 	if (ceph_inode_is_shutdown(inode))
2451 		return -ESTALE;
2452 
2453 	/* Skip the getattr altogether if we're asked not to sync */
2454 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
2455 		err = ceph_do_getattr(inode,
2456 				statx_to_caps(request_mask, inode->i_mode),
2457 				flags & AT_STATX_FORCE_SYNC);
2458 		if (err)
2459 			return err;
2460 	}
2461 
2462 	generic_fillattr(&init_user_ns, inode, stat);
2463 	stat->ino = ceph_present_inode(inode);
2464 
2465 	/*
2466 	 * btime on newly-allocated inodes is 0, so if this is still set to
2467 	 * that, then assume that it's not valid.
2468 	 */
2469 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2470 		stat->btime = ci->i_btime;
2471 		valid_mask |= STATX_BTIME;
2472 	}
2473 
2474 	if (ceph_snap(inode) == CEPH_NOSNAP)
2475 		stat->dev = inode->i_sb->s_dev;
2476 	else
2477 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2478 
2479 	if (S_ISDIR(inode->i_mode)) {
2480 		if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2481 					RBYTES))
2482 			stat->size = ci->i_rbytes;
2483 		else
2484 			stat->size = ci->i_files + ci->i_subdirs;
2485 		stat->blocks = 0;
2486 		stat->blksize = 65536;
2487 		/*
2488 		 * Some applications rely on the number of st_nlink
2489 		 * value on directories to be either 0 (if unlinked)
2490 		 * or 2 + number of subdirectories.
2491 		 */
2492 		if (stat->nlink == 1)
2493 			/* '.' + '..' + subdirs */
2494 			stat->nlink = 1 + 1 + ci->i_subdirs;
2495 	}
2496 
2497 	stat->result_mask = request_mask & valid_mask;
2498 	return err;
2499 }
2500 
2501 void ceph_inode_shutdown(struct inode *inode)
2502 {
2503 	struct ceph_inode_info *ci = ceph_inode(inode);
2504 	struct rb_node *p;
2505 	int iputs = 0;
2506 	bool invalidate = false;
2507 
2508 	spin_lock(&ci->i_ceph_lock);
2509 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
2510 	p = rb_first(&ci->i_caps);
2511 	while (p) {
2512 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
2513 
2514 		p = rb_next(p);
2515 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
2516 	}
2517 	spin_unlock(&ci->i_ceph_lock);
2518 
2519 	if (invalidate)
2520 		ceph_queue_invalidate(inode);
2521 	while (iputs--)
2522 		iput(inode);
2523 }
2524