xref: /openbmc/linux/fs/ceph/inode.c (revision ec9595c080c6f0ba3ebcfc3013eac8f38b868b78)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 #include <linux/ceph/decode.h>
22 
23 /*
24  * Ceph inode operations
25  *
26  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
27  * setattr, etc.), xattr helpers, and helpers for assimilating
28  * metadata returned by the MDS into our cache.
29  *
30  * Also define helpers for doing asynchronous writeback, invalidation,
31  * and truncation for the benefit of those who can't afford to block
32  * (typically because they are in the message handler path).
33  */
34 
35 static const struct inode_operations ceph_symlink_iops;
36 
37 static void ceph_inode_work(struct work_struct *work);
38 
39 /*
40  * find or create an inode, given the ceph ino number
41  */
42 static int ceph_set_ino_cb(struct inode *inode, void *data)
43 {
44 	struct ceph_inode_info *ci = ceph_inode(inode);
45 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
46 
47 	ci->i_vino = *(struct ceph_vino *)data;
48 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
49 	inode_set_iversion_raw(inode, 0);
50 	percpu_counter_inc(&mdsc->metric.total_inodes);
51 
52 	return 0;
53 }
54 
55 /**
56  * ceph_new_inode - allocate a new inode in advance of an expected create
57  * @dir: parent directory for new inode
58  * @dentry: dentry that may eventually point to new inode
59  * @mode: mode of new inode
60  * @as_ctx: pointer to inherited security context
61  *
62  * Allocate a new inode in advance of an operation to create a new inode.
63  * This allocates the inode and sets up the acl_sec_ctx with appropriate
64  * info for the new inode.
65  *
66  * Returns a pointer to the new inode or an ERR_PTR.
67  */
68 struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
69 			     umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
70 {
71 	int err;
72 	struct inode *inode;
73 
74 	inode = new_inode(dir->i_sb);
75 	if (!inode)
76 		return ERR_PTR(-ENOMEM);
77 
78 	if (!S_ISLNK(*mode)) {
79 		err = ceph_pre_init_acls(dir, mode, as_ctx);
80 		if (err < 0)
81 			goto out_err;
82 	}
83 
84 	err = ceph_security_init_secctx(dentry, *mode, as_ctx);
85 	if (err < 0)
86 		goto out_err;
87 
88 	inode->i_state = 0;
89 	inode->i_mode = *mode;
90 	return inode;
91 out_err:
92 	iput(inode);
93 	return ERR_PTR(err);
94 }
95 
96 void ceph_as_ctx_to_req(struct ceph_mds_request *req,
97 			struct ceph_acl_sec_ctx *as_ctx)
98 {
99 	if (as_ctx->pagelist) {
100 		req->r_pagelist = as_ctx->pagelist;
101 		as_ctx->pagelist = NULL;
102 	}
103 }
104 
105 /**
106  * ceph_get_inode - find or create/hash a new inode
107  * @sb: superblock to search and allocate in
108  * @vino: vino to search for
109  * @newino: optional new inode to insert if one isn't found (may be NULL)
110  *
111  * Search for or insert a new inode into the hash for the given vino, and
112  * return a reference to it. If new is non-NULL, its reference is consumed.
113  */
114 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
115 			     struct inode *newino)
116 {
117 	struct inode *inode;
118 
119 	if (ceph_vino_is_reserved(vino))
120 		return ERR_PTR(-EREMOTEIO);
121 
122 	if (newino) {
123 		inode = inode_insert5(newino, (unsigned long)vino.ino,
124 				      ceph_ino_compare, ceph_set_ino_cb, &vino);
125 		if (inode != newino)
126 			iput(newino);
127 	} else {
128 		inode = iget5_locked(sb, (unsigned long)vino.ino,
129 				     ceph_ino_compare, ceph_set_ino_cb, &vino);
130 	}
131 
132 	if (!inode) {
133 		dout("No inode found for %llx.%llx\n", vino.ino, vino.snap);
134 		return ERR_PTR(-ENOMEM);
135 	}
136 
137 	dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
138 	     ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
139 	return inode;
140 }
141 
142 /*
143  * get/constuct snapdir inode for a given directory
144  */
145 struct inode *ceph_get_snapdir(struct inode *parent)
146 {
147 	struct ceph_vino vino = {
148 		.ino = ceph_ino(parent),
149 		.snap = CEPH_SNAPDIR,
150 	};
151 	struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
152 	struct ceph_inode_info *ci = ceph_inode(inode);
153 
154 	if (IS_ERR(inode))
155 		return inode;
156 
157 	if (!S_ISDIR(parent->i_mode)) {
158 		pr_warn_once("bad snapdir parent type (mode=0%o)\n",
159 			     parent->i_mode);
160 		goto err;
161 	}
162 
163 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
164 		pr_warn_once("bad snapdir inode type (mode=0%o)\n",
165 			     inode->i_mode);
166 		goto err;
167 	}
168 
169 	inode->i_mode = parent->i_mode;
170 	inode->i_uid = parent->i_uid;
171 	inode->i_gid = parent->i_gid;
172 	inode->i_mtime = parent->i_mtime;
173 	inode->i_ctime = parent->i_ctime;
174 	inode->i_atime = parent->i_atime;
175 	ci->i_rbytes = 0;
176 	ci->i_btime = ceph_inode(parent)->i_btime;
177 
178 	if (inode->i_state & I_NEW) {
179 		inode->i_op = &ceph_snapdir_iops;
180 		inode->i_fop = &ceph_snapdir_fops;
181 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
182 		unlock_new_inode(inode);
183 	}
184 
185 	return inode;
186 err:
187 	if ((inode->i_state & I_NEW))
188 		discard_new_inode(inode);
189 	else
190 		iput(inode);
191 	return ERR_PTR(-ENOTDIR);
192 }
193 
194 const struct inode_operations ceph_file_iops = {
195 	.permission = ceph_permission,
196 	.setattr = ceph_setattr,
197 	.getattr = ceph_getattr,
198 	.listxattr = ceph_listxattr,
199 	.get_inode_acl = ceph_get_acl,
200 	.set_acl = ceph_set_acl,
201 };
202 
203 
204 /*
205  * We use a 'frag tree' to keep track of the MDS's directory fragments
206  * for a given inode (usually there is just a single fragment).  We
207  * need to know when a child frag is delegated to a new MDS, or when
208  * it is flagged as replicated, so we can direct our requests
209  * accordingly.
210  */
211 
212 /*
213  * find/create a frag in the tree
214  */
215 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
216 						    u32 f)
217 {
218 	struct rb_node **p;
219 	struct rb_node *parent = NULL;
220 	struct ceph_inode_frag *frag;
221 	int c;
222 
223 	p = &ci->i_fragtree.rb_node;
224 	while (*p) {
225 		parent = *p;
226 		frag = rb_entry(parent, struct ceph_inode_frag, node);
227 		c = ceph_frag_compare(f, frag->frag);
228 		if (c < 0)
229 			p = &(*p)->rb_left;
230 		else if (c > 0)
231 			p = &(*p)->rb_right;
232 		else
233 			return frag;
234 	}
235 
236 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
237 	if (!frag)
238 		return ERR_PTR(-ENOMEM);
239 
240 	frag->frag = f;
241 	frag->split_by = 0;
242 	frag->mds = -1;
243 	frag->ndist = 0;
244 
245 	rb_link_node(&frag->node, parent, p);
246 	rb_insert_color(&frag->node, &ci->i_fragtree);
247 
248 	dout("get_or_create_frag added %llx.%llx frag %x\n",
249 	     ceph_vinop(&ci->netfs.inode), f);
250 	return frag;
251 }
252 
253 /*
254  * find a specific frag @f
255  */
256 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
257 {
258 	struct rb_node *n = ci->i_fragtree.rb_node;
259 
260 	while (n) {
261 		struct ceph_inode_frag *frag =
262 			rb_entry(n, struct ceph_inode_frag, node);
263 		int c = ceph_frag_compare(f, frag->frag);
264 		if (c < 0)
265 			n = n->rb_left;
266 		else if (c > 0)
267 			n = n->rb_right;
268 		else
269 			return frag;
270 	}
271 	return NULL;
272 }
273 
274 /*
275  * Choose frag containing the given value @v.  If @pfrag is
276  * specified, copy the frag delegation info to the caller if
277  * it is present.
278  */
279 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
280 			      struct ceph_inode_frag *pfrag, int *found)
281 {
282 	u32 t = ceph_frag_make(0, 0);
283 	struct ceph_inode_frag *frag;
284 	unsigned nway, i;
285 	u32 n;
286 
287 	if (found)
288 		*found = 0;
289 
290 	while (1) {
291 		WARN_ON(!ceph_frag_contains_value(t, v));
292 		frag = __ceph_find_frag(ci, t);
293 		if (!frag)
294 			break; /* t is a leaf */
295 		if (frag->split_by == 0) {
296 			if (pfrag)
297 				memcpy(pfrag, frag, sizeof(*pfrag));
298 			if (found)
299 				*found = 1;
300 			break;
301 		}
302 
303 		/* choose child */
304 		nway = 1 << frag->split_by;
305 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
306 		     frag->split_by, nway);
307 		for (i = 0; i < nway; i++) {
308 			n = ceph_frag_make_child(t, frag->split_by, i);
309 			if (ceph_frag_contains_value(n, v)) {
310 				t = n;
311 				break;
312 			}
313 		}
314 		BUG_ON(i == nway);
315 	}
316 	dout("choose_frag(%x) = %x\n", v, t);
317 
318 	return t;
319 }
320 
321 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
322 		     struct ceph_inode_frag *pfrag, int *found)
323 {
324 	u32 ret;
325 	mutex_lock(&ci->i_fragtree_mutex);
326 	ret = __ceph_choose_frag(ci, v, pfrag, found);
327 	mutex_unlock(&ci->i_fragtree_mutex);
328 	return ret;
329 }
330 
331 /*
332  * Process dirfrag (delegation) info from the mds.  Include leaf
333  * fragment in tree ONLY if ndist > 0.  Otherwise, only
334  * branches/splits are included in i_fragtree)
335  */
336 static int ceph_fill_dirfrag(struct inode *inode,
337 			     struct ceph_mds_reply_dirfrag *dirinfo)
338 {
339 	struct ceph_inode_info *ci = ceph_inode(inode);
340 	struct ceph_inode_frag *frag;
341 	u32 id = le32_to_cpu(dirinfo->frag);
342 	int mds = le32_to_cpu(dirinfo->auth);
343 	int ndist = le32_to_cpu(dirinfo->ndist);
344 	int diri_auth = -1;
345 	int i;
346 	int err = 0;
347 
348 	spin_lock(&ci->i_ceph_lock);
349 	if (ci->i_auth_cap)
350 		diri_auth = ci->i_auth_cap->mds;
351 	spin_unlock(&ci->i_ceph_lock);
352 
353 	if (mds == -1) /* CDIR_AUTH_PARENT */
354 		mds = diri_auth;
355 
356 	mutex_lock(&ci->i_fragtree_mutex);
357 	if (ndist == 0 && mds == diri_auth) {
358 		/* no delegation info needed. */
359 		frag = __ceph_find_frag(ci, id);
360 		if (!frag)
361 			goto out;
362 		if (frag->split_by == 0) {
363 			/* tree leaf, remove */
364 			dout("fill_dirfrag removed %llx.%llx frag %x"
365 			     " (no ref)\n", ceph_vinop(inode), id);
366 			rb_erase(&frag->node, &ci->i_fragtree);
367 			kfree(frag);
368 		} else {
369 			/* tree branch, keep and clear */
370 			dout("fill_dirfrag cleared %llx.%llx frag %x"
371 			     " referral\n", ceph_vinop(inode), id);
372 			frag->mds = -1;
373 			frag->ndist = 0;
374 		}
375 		goto out;
376 	}
377 
378 
379 	/* find/add this frag to store mds delegation info */
380 	frag = __get_or_create_frag(ci, id);
381 	if (IS_ERR(frag)) {
382 		/* this is not the end of the world; we can continue
383 		   with bad/inaccurate delegation info */
384 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
385 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
386 		err = -ENOMEM;
387 		goto out;
388 	}
389 
390 	frag->mds = mds;
391 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
392 	for (i = 0; i < frag->ndist; i++)
393 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
394 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
395 	     ceph_vinop(inode), frag->frag, frag->ndist);
396 
397 out:
398 	mutex_unlock(&ci->i_fragtree_mutex);
399 	return err;
400 }
401 
402 static int frag_tree_split_cmp(const void *l, const void *r)
403 {
404 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
405 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
406 	return ceph_frag_compare(le32_to_cpu(ls->frag),
407 				 le32_to_cpu(rs->frag));
408 }
409 
410 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
411 {
412 	if (!frag)
413 		return f == ceph_frag_make(0, 0);
414 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
415 		return false;
416 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
417 }
418 
419 static int ceph_fill_fragtree(struct inode *inode,
420 			      struct ceph_frag_tree_head *fragtree,
421 			      struct ceph_mds_reply_dirfrag *dirinfo)
422 {
423 	struct ceph_inode_info *ci = ceph_inode(inode);
424 	struct ceph_inode_frag *frag, *prev_frag = NULL;
425 	struct rb_node *rb_node;
426 	unsigned i, split_by, nsplits;
427 	u32 id;
428 	bool update = false;
429 
430 	mutex_lock(&ci->i_fragtree_mutex);
431 	nsplits = le32_to_cpu(fragtree->nsplits);
432 	if (nsplits != ci->i_fragtree_nsplits) {
433 		update = true;
434 	} else if (nsplits) {
435 		i = get_random_u32_below(nsplits);
436 		id = le32_to_cpu(fragtree->splits[i].frag);
437 		if (!__ceph_find_frag(ci, id))
438 			update = true;
439 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
440 		rb_node = rb_first(&ci->i_fragtree);
441 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
442 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
443 			update = true;
444 	}
445 	if (!update && dirinfo) {
446 		id = le32_to_cpu(dirinfo->frag);
447 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
448 			update = true;
449 	}
450 	if (!update)
451 		goto out_unlock;
452 
453 	if (nsplits > 1) {
454 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
455 		     frag_tree_split_cmp, NULL);
456 	}
457 
458 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
459 	rb_node = rb_first(&ci->i_fragtree);
460 	for (i = 0; i < nsplits; i++) {
461 		id = le32_to_cpu(fragtree->splits[i].frag);
462 		split_by = le32_to_cpu(fragtree->splits[i].by);
463 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
464 			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
465 			       "frag %x split by %d\n", ceph_vinop(inode),
466 			       i, nsplits, id, split_by);
467 			continue;
468 		}
469 		frag = NULL;
470 		while (rb_node) {
471 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
472 			if (ceph_frag_compare(frag->frag, id) >= 0) {
473 				if (frag->frag != id)
474 					frag = NULL;
475 				else
476 					rb_node = rb_next(rb_node);
477 				break;
478 			}
479 			rb_node = rb_next(rb_node);
480 			/* delete stale split/leaf node */
481 			if (frag->split_by > 0 ||
482 			    !is_frag_child(frag->frag, prev_frag)) {
483 				rb_erase(&frag->node, &ci->i_fragtree);
484 				if (frag->split_by > 0)
485 					ci->i_fragtree_nsplits--;
486 				kfree(frag);
487 			}
488 			frag = NULL;
489 		}
490 		if (!frag) {
491 			frag = __get_or_create_frag(ci, id);
492 			if (IS_ERR(frag))
493 				continue;
494 		}
495 		if (frag->split_by == 0)
496 			ci->i_fragtree_nsplits++;
497 		frag->split_by = split_by;
498 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
499 		prev_frag = frag;
500 	}
501 	while (rb_node) {
502 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
503 		rb_node = rb_next(rb_node);
504 		/* delete stale split/leaf node */
505 		if (frag->split_by > 0 ||
506 		    !is_frag_child(frag->frag, prev_frag)) {
507 			rb_erase(&frag->node, &ci->i_fragtree);
508 			if (frag->split_by > 0)
509 				ci->i_fragtree_nsplits--;
510 			kfree(frag);
511 		}
512 	}
513 out_unlock:
514 	mutex_unlock(&ci->i_fragtree_mutex);
515 	return 0;
516 }
517 
518 /*
519  * initialize a newly allocated inode.
520  */
521 struct inode *ceph_alloc_inode(struct super_block *sb)
522 {
523 	struct ceph_inode_info *ci;
524 	int i;
525 
526 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
527 	if (!ci)
528 		return NULL;
529 
530 	dout("alloc_inode %p\n", &ci->netfs.inode);
531 
532 	/* Set parameters for the netfs library */
533 	netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
534 
535 	spin_lock_init(&ci->i_ceph_lock);
536 
537 	ci->i_version = 0;
538 	ci->i_inline_version = 0;
539 	ci->i_time_warp_seq = 0;
540 	ci->i_ceph_flags = 0;
541 	atomic64_set(&ci->i_ordered_count, 1);
542 	atomic64_set(&ci->i_release_count, 1);
543 	atomic64_set(&ci->i_complete_seq[0], 0);
544 	atomic64_set(&ci->i_complete_seq[1], 0);
545 	ci->i_symlink = NULL;
546 
547 	ci->i_max_bytes = 0;
548 	ci->i_max_files = 0;
549 
550 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
551 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
552 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
553 
554 	ci->i_fragtree = RB_ROOT;
555 	mutex_init(&ci->i_fragtree_mutex);
556 
557 	ci->i_xattrs.blob = NULL;
558 	ci->i_xattrs.prealloc_blob = NULL;
559 	ci->i_xattrs.dirty = false;
560 	ci->i_xattrs.index = RB_ROOT;
561 	ci->i_xattrs.count = 0;
562 	ci->i_xattrs.names_size = 0;
563 	ci->i_xattrs.vals_size = 0;
564 	ci->i_xattrs.version = 0;
565 	ci->i_xattrs.index_version = 0;
566 
567 	ci->i_caps = RB_ROOT;
568 	ci->i_auth_cap = NULL;
569 	ci->i_dirty_caps = 0;
570 	ci->i_flushing_caps = 0;
571 	INIT_LIST_HEAD(&ci->i_dirty_item);
572 	INIT_LIST_HEAD(&ci->i_flushing_item);
573 	ci->i_prealloc_cap_flush = NULL;
574 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
575 	init_waitqueue_head(&ci->i_cap_wq);
576 	ci->i_hold_caps_max = 0;
577 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
578 	INIT_LIST_HEAD(&ci->i_cap_snaps);
579 	ci->i_head_snapc = NULL;
580 	ci->i_snap_caps = 0;
581 
582 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
583 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
584 		ci->i_nr_by_mode[i] = 0;
585 
586 	mutex_init(&ci->i_truncate_mutex);
587 	ci->i_truncate_seq = 0;
588 	ci->i_truncate_size = 0;
589 	ci->i_truncate_pending = 0;
590 
591 	ci->i_max_size = 0;
592 	ci->i_reported_size = 0;
593 	ci->i_wanted_max_size = 0;
594 	ci->i_requested_max_size = 0;
595 
596 	ci->i_pin_ref = 0;
597 	ci->i_rd_ref = 0;
598 	ci->i_rdcache_ref = 0;
599 	ci->i_wr_ref = 0;
600 	ci->i_wb_ref = 0;
601 	ci->i_fx_ref = 0;
602 	ci->i_wrbuffer_ref = 0;
603 	ci->i_wrbuffer_ref_head = 0;
604 	atomic_set(&ci->i_filelock_ref, 0);
605 	atomic_set(&ci->i_shared_gen, 1);
606 	ci->i_rdcache_gen = 0;
607 	ci->i_rdcache_revoking = 0;
608 
609 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
610 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
611 	spin_lock_init(&ci->i_unsafe_lock);
612 
613 	ci->i_snap_realm = NULL;
614 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
615 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
616 
617 	INIT_WORK(&ci->i_work, ceph_inode_work);
618 	ci->i_work_mask = 0;
619 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
620 	return &ci->netfs.inode;
621 }
622 
623 void ceph_free_inode(struct inode *inode)
624 {
625 	struct ceph_inode_info *ci = ceph_inode(inode);
626 
627 	kfree(ci->i_symlink);
628 	kmem_cache_free(ceph_inode_cachep, ci);
629 }
630 
631 void ceph_evict_inode(struct inode *inode)
632 {
633 	struct ceph_inode_info *ci = ceph_inode(inode);
634 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
635 	struct ceph_inode_frag *frag;
636 	struct rb_node *n;
637 
638 	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
639 
640 	percpu_counter_dec(&mdsc->metric.total_inodes);
641 
642 	truncate_inode_pages_final(&inode->i_data);
643 	if (inode->i_state & I_PINNING_FSCACHE_WB)
644 		ceph_fscache_unuse_cookie(inode, true);
645 	clear_inode(inode);
646 
647 	ceph_fscache_unregister_inode_cookie(ci);
648 
649 	__ceph_remove_caps(ci);
650 
651 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
652 		ceph_adjust_quota_realms_count(inode, false);
653 
654 	/*
655 	 * we may still have a snap_realm reference if there are stray
656 	 * caps in i_snap_caps.
657 	 */
658 	if (ci->i_snap_realm) {
659 		if (ceph_snap(inode) == CEPH_NOSNAP) {
660 			dout(" dropping residual ref to snap realm %p\n",
661 			     ci->i_snap_realm);
662 			ceph_change_snap_realm(inode, NULL);
663 		} else {
664 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
665 			ci->i_snap_realm = NULL;
666 		}
667 	}
668 
669 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
670 		frag = rb_entry(n, struct ceph_inode_frag, node);
671 		rb_erase(n, &ci->i_fragtree);
672 		kfree(frag);
673 	}
674 	ci->i_fragtree_nsplits = 0;
675 
676 	__ceph_destroy_xattrs(ci);
677 	if (ci->i_xattrs.blob)
678 		ceph_buffer_put(ci->i_xattrs.blob);
679 	if (ci->i_xattrs.prealloc_blob)
680 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
681 
682 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
683 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
684 }
685 
686 static inline blkcnt_t calc_inode_blocks(u64 size)
687 {
688 	return (size + (1<<9) - 1) >> 9;
689 }
690 
691 /*
692  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
693  * careful because either the client or MDS may have more up to date
694  * info, depending on which capabilities are held, and whether
695  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
696  * and size are monotonically increasing, except when utimes() or
697  * truncate() increments the corresponding _seq values.)
698  */
699 int ceph_fill_file_size(struct inode *inode, int issued,
700 			u32 truncate_seq, u64 truncate_size, u64 size)
701 {
702 	struct ceph_inode_info *ci = ceph_inode(inode);
703 	int queue_trunc = 0;
704 	loff_t isize = i_size_read(inode);
705 
706 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
707 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
708 		dout("size %lld -> %llu\n", isize, size);
709 		if (size > 0 && S_ISDIR(inode->i_mode)) {
710 			pr_err("fill_file_size non-zero size for directory\n");
711 			size = 0;
712 		}
713 		i_size_write(inode, size);
714 		inode->i_blocks = calc_inode_blocks(size);
715 		/*
716 		 * If we're expanding, then we should be able to just update
717 		 * the existing cookie.
718 		 */
719 		if (size > isize)
720 			ceph_fscache_update(inode);
721 		ci->i_reported_size = size;
722 		if (truncate_seq != ci->i_truncate_seq) {
723 			dout("truncate_seq %u -> %u\n",
724 			     ci->i_truncate_seq, truncate_seq);
725 			ci->i_truncate_seq = truncate_seq;
726 
727 			/* the MDS should have revoked these caps */
728 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
729 					       CEPH_CAP_FILE_RD |
730 					       CEPH_CAP_FILE_WR |
731 					       CEPH_CAP_FILE_LAZYIO));
732 			/*
733 			 * If we hold relevant caps, or in the case where we're
734 			 * not the only client referencing this file and we
735 			 * don't hold those caps, then we need to check whether
736 			 * the file is either opened or mmaped
737 			 */
738 			if ((issued & (CEPH_CAP_FILE_CACHE|
739 				       CEPH_CAP_FILE_BUFFER)) ||
740 			    mapping_mapped(inode->i_mapping) ||
741 			    __ceph_is_file_opened(ci)) {
742 				ci->i_truncate_pending++;
743 				queue_trunc = 1;
744 			}
745 		}
746 	}
747 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
748 	    ci->i_truncate_size != truncate_size) {
749 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
750 		     truncate_size);
751 		ci->i_truncate_size = truncate_size;
752 	}
753 	return queue_trunc;
754 }
755 
756 void ceph_fill_file_time(struct inode *inode, int issued,
757 			 u64 time_warp_seq, struct timespec64 *ctime,
758 			 struct timespec64 *mtime, struct timespec64 *atime)
759 {
760 	struct ceph_inode_info *ci = ceph_inode(inode);
761 	int warn = 0;
762 
763 	if (issued & (CEPH_CAP_FILE_EXCL|
764 		      CEPH_CAP_FILE_WR|
765 		      CEPH_CAP_FILE_BUFFER|
766 		      CEPH_CAP_AUTH_EXCL|
767 		      CEPH_CAP_XATTR_EXCL)) {
768 		if (ci->i_version == 0 ||
769 		    timespec64_compare(ctime, &inode->i_ctime) > 0) {
770 			dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
771 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
772 			     ctime->tv_sec, ctime->tv_nsec);
773 			inode->i_ctime = *ctime;
774 		}
775 		if (ci->i_version == 0 ||
776 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
777 			/* the MDS did a utimes() */
778 			dout("mtime %lld.%09ld -> %lld.%09ld "
779 			     "tw %d -> %d\n",
780 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
781 			     mtime->tv_sec, mtime->tv_nsec,
782 			     ci->i_time_warp_seq, (int)time_warp_seq);
783 
784 			inode->i_mtime = *mtime;
785 			inode->i_atime = *atime;
786 			ci->i_time_warp_seq = time_warp_seq;
787 		} else if (time_warp_seq == ci->i_time_warp_seq) {
788 			/* nobody did utimes(); take the max */
789 			if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
790 				dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
791 				     inode->i_mtime.tv_sec,
792 				     inode->i_mtime.tv_nsec,
793 				     mtime->tv_sec, mtime->tv_nsec);
794 				inode->i_mtime = *mtime;
795 			}
796 			if (timespec64_compare(atime, &inode->i_atime) > 0) {
797 				dout("atime %lld.%09ld -> %lld.%09ld inc\n",
798 				     inode->i_atime.tv_sec,
799 				     inode->i_atime.tv_nsec,
800 				     atime->tv_sec, atime->tv_nsec);
801 				inode->i_atime = *atime;
802 			}
803 		} else if (issued & CEPH_CAP_FILE_EXCL) {
804 			/* we did a utimes(); ignore mds values */
805 		} else {
806 			warn = 1;
807 		}
808 	} else {
809 		/* we have no write|excl caps; whatever the MDS says is true */
810 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
811 			inode->i_ctime = *ctime;
812 			inode->i_mtime = *mtime;
813 			inode->i_atime = *atime;
814 			ci->i_time_warp_seq = time_warp_seq;
815 		} else {
816 			warn = 1;
817 		}
818 	}
819 	if (warn) /* time_warp_seq shouldn't go backwards */
820 		dout("%p mds time_warp_seq %llu < %u\n",
821 		     inode, time_warp_seq, ci->i_time_warp_seq);
822 }
823 
824 /*
825  * Populate an inode based on info from mds.  May be called on new or
826  * existing inodes.
827  */
828 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
829 		    struct ceph_mds_reply_info_in *iinfo,
830 		    struct ceph_mds_reply_dirfrag *dirinfo,
831 		    struct ceph_mds_session *session, int cap_fmode,
832 		    struct ceph_cap_reservation *caps_reservation)
833 {
834 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
835 	struct ceph_mds_reply_inode *info = iinfo->in;
836 	struct ceph_inode_info *ci = ceph_inode(inode);
837 	int issued, new_issued, info_caps;
838 	struct timespec64 mtime, atime, ctime;
839 	struct ceph_buffer *xattr_blob = NULL;
840 	struct ceph_buffer *old_blob = NULL;
841 	struct ceph_string *pool_ns = NULL;
842 	struct ceph_cap *new_cap = NULL;
843 	int err = 0;
844 	bool wake = false;
845 	bool queue_trunc = false;
846 	bool new_version = false;
847 	bool fill_inline = false;
848 	umode_t mode = le32_to_cpu(info->mode);
849 	dev_t rdev = le32_to_cpu(info->rdev);
850 
851 	lockdep_assert_held(&mdsc->snap_rwsem);
852 
853 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
854 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
855 	     ci->i_version);
856 
857 	/* Once I_NEW is cleared, we can't change type or dev numbers */
858 	if (inode->i_state & I_NEW) {
859 		inode->i_mode = mode;
860 	} else {
861 		if (inode_wrong_type(inode, mode)) {
862 			pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
863 				     ceph_vinop(inode), inode->i_mode, mode);
864 			return -ESTALE;
865 		}
866 
867 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
868 			pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
869 				     ceph_vinop(inode), MAJOR(inode->i_rdev),
870 				     MINOR(inode->i_rdev), MAJOR(rdev),
871 				     MINOR(rdev));
872 			return -ESTALE;
873 		}
874 	}
875 
876 	info_caps = le32_to_cpu(info->cap.caps);
877 
878 	/* prealloc new cap struct */
879 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
880 		new_cap = ceph_get_cap(mdsc, caps_reservation);
881 		if (!new_cap)
882 			return -ENOMEM;
883 	}
884 
885 	/*
886 	 * prealloc xattr data, if it looks like we'll need it.  only
887 	 * if len > 4 (meaning there are actually xattrs; the first 4
888 	 * bytes are the xattr count).
889 	 */
890 	if (iinfo->xattr_len > 4) {
891 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
892 		if (!xattr_blob)
893 			pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
894 			       iinfo->xattr_len);
895 	}
896 
897 	if (iinfo->pool_ns_len > 0)
898 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
899 						     iinfo->pool_ns_len);
900 
901 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
902 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
903 
904 	spin_lock(&ci->i_ceph_lock);
905 
906 	/*
907 	 * provided version will be odd if inode value is projected,
908 	 * even if stable.  skip the update if we have newer stable
909 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
910 	 * we are getting projected (unstable) info (in which case the
911 	 * version is odd, and we want ours>theirs).
912 	 *   us   them
913 	 *   2    2     skip
914 	 *   3    2     skip
915 	 *   3    3     update
916 	 */
917 	if (ci->i_version == 0 ||
918 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
919 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
920 		new_version = true;
921 
922 	/* Update change_attribute */
923 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
924 
925 	__ceph_caps_issued(ci, &issued);
926 	issued |= __ceph_caps_dirty(ci);
927 	new_issued = ~issued & info_caps;
928 
929 	/* directories have fl_stripe_unit set to zero */
930 	if (le32_to_cpu(info->layout.fl_stripe_unit))
931 		inode->i_blkbits =
932 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
933 	else
934 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
935 
936 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
937 
938 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
939 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
940 		inode->i_mode = mode;
941 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
942 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
943 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
944 		     from_kuid(&init_user_ns, inode->i_uid),
945 		     from_kgid(&init_user_ns, inode->i_gid));
946 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
947 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
948 	}
949 
950 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
951 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
952 		set_nlink(inode, le32_to_cpu(info->nlink));
953 
954 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
955 		/* be careful with mtime, atime, size */
956 		ceph_decode_timespec64(&atime, &info->atime);
957 		ceph_decode_timespec64(&mtime, &info->mtime);
958 		ceph_decode_timespec64(&ctime, &info->ctime);
959 		ceph_fill_file_time(inode, issued,
960 				le32_to_cpu(info->time_warp_seq),
961 				&ctime, &mtime, &atime);
962 	}
963 
964 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
965 		ci->i_files = le64_to_cpu(info->files);
966 		ci->i_subdirs = le64_to_cpu(info->subdirs);
967 	}
968 
969 	if (new_version ||
970 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
971 		s64 old_pool = ci->i_layout.pool_id;
972 		struct ceph_string *old_ns;
973 
974 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
975 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
976 					lockdep_is_held(&ci->i_ceph_lock));
977 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
978 
979 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
980 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
981 
982 		pool_ns = old_ns;
983 
984 		queue_trunc = ceph_fill_file_size(inode, issued,
985 					le32_to_cpu(info->truncate_seq),
986 					le64_to_cpu(info->truncate_size),
987 					le64_to_cpu(info->size));
988 		/* only update max_size on auth cap */
989 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
990 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
991 			dout("max_size %lld -> %llu\n", ci->i_max_size,
992 					le64_to_cpu(info->max_size));
993 			ci->i_max_size = le64_to_cpu(info->max_size);
994 		}
995 	}
996 
997 	/* layout and rstat are not tracked by capability, update them if
998 	 * the inode info is from auth mds */
999 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
1000 		if (S_ISDIR(inode->i_mode)) {
1001 			ci->i_dir_layout = iinfo->dir_layout;
1002 			ci->i_rbytes = le64_to_cpu(info->rbytes);
1003 			ci->i_rfiles = le64_to_cpu(info->rfiles);
1004 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
1005 			ci->i_dir_pin = iinfo->dir_pin;
1006 			ci->i_rsnaps = iinfo->rsnaps;
1007 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
1008 		}
1009 	}
1010 
1011 	/* xattrs */
1012 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1013 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
1014 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1015 		if (ci->i_xattrs.blob)
1016 			old_blob = ci->i_xattrs.blob;
1017 		ci->i_xattrs.blob = xattr_blob;
1018 		if (xattr_blob)
1019 			memcpy(ci->i_xattrs.blob->vec.iov_base,
1020 			       iinfo->xattr_data, iinfo->xattr_len);
1021 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
1022 		ceph_forget_all_cached_acls(inode);
1023 		ceph_security_invalidate_secctx(inode);
1024 		xattr_blob = NULL;
1025 	}
1026 
1027 	/* finally update i_version */
1028 	if (le64_to_cpu(info->version) > ci->i_version)
1029 		ci->i_version = le64_to_cpu(info->version);
1030 
1031 	inode->i_mapping->a_ops = &ceph_aops;
1032 
1033 	switch (inode->i_mode & S_IFMT) {
1034 	case S_IFIFO:
1035 	case S_IFBLK:
1036 	case S_IFCHR:
1037 	case S_IFSOCK:
1038 		inode->i_blkbits = PAGE_SHIFT;
1039 		init_special_inode(inode, inode->i_mode, rdev);
1040 		inode->i_op = &ceph_file_iops;
1041 		break;
1042 	case S_IFREG:
1043 		inode->i_op = &ceph_file_iops;
1044 		inode->i_fop = &ceph_file_fops;
1045 		break;
1046 	case S_IFLNK:
1047 		inode->i_op = &ceph_symlink_iops;
1048 		if (!ci->i_symlink) {
1049 			u32 symlen = iinfo->symlink_len;
1050 			char *sym;
1051 
1052 			spin_unlock(&ci->i_ceph_lock);
1053 
1054 			if (symlen != i_size_read(inode)) {
1055 				pr_err("%s %llx.%llx BAD symlink "
1056 					"size %lld\n", __func__,
1057 					ceph_vinop(inode),
1058 					i_size_read(inode));
1059 				i_size_write(inode, symlen);
1060 				inode->i_blocks = calc_inode_blocks(symlen);
1061 			}
1062 
1063 			err = -ENOMEM;
1064 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
1065 			if (!sym)
1066 				goto out;
1067 
1068 			spin_lock(&ci->i_ceph_lock);
1069 			if (!ci->i_symlink)
1070 				ci->i_symlink = sym;
1071 			else
1072 				kfree(sym); /* lost a race */
1073 		}
1074 		inode->i_link = ci->i_symlink;
1075 		break;
1076 	case S_IFDIR:
1077 		inode->i_op = &ceph_dir_iops;
1078 		inode->i_fop = &ceph_dir_fops;
1079 		break;
1080 	default:
1081 		pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
1082 		       ceph_vinop(inode), inode->i_mode);
1083 	}
1084 
1085 	/* were we issued a capability? */
1086 	if (info_caps) {
1087 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1088 			ceph_add_cap(inode, session,
1089 				     le64_to_cpu(info->cap.cap_id),
1090 				     info_caps,
1091 				     le32_to_cpu(info->cap.wanted),
1092 				     le32_to_cpu(info->cap.seq),
1093 				     le32_to_cpu(info->cap.mseq),
1094 				     le64_to_cpu(info->cap.realm),
1095 				     info->cap.flags, &new_cap);
1096 
1097 			/* set dir completion flag? */
1098 			if (S_ISDIR(inode->i_mode) &&
1099 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1100 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1101 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1102 			    !__ceph_dir_is_complete(ci)) {
1103 				dout(" marking %p complete (empty)\n", inode);
1104 				i_size_write(inode, 0);
1105 				__ceph_dir_set_complete(ci,
1106 					atomic64_read(&ci->i_release_count),
1107 					atomic64_read(&ci->i_ordered_count));
1108 			}
1109 
1110 			wake = true;
1111 		} else {
1112 			dout(" %p got snap_caps %s\n", inode,
1113 			     ceph_cap_string(info_caps));
1114 			ci->i_snap_caps |= info_caps;
1115 		}
1116 	}
1117 
1118 	if (iinfo->inline_version > 0 &&
1119 	    iinfo->inline_version >= ci->i_inline_version) {
1120 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1121 		ci->i_inline_version = iinfo->inline_version;
1122 		if (ceph_has_inline_data(ci) &&
1123 		    (locked_page || (info_caps & cache_caps)))
1124 			fill_inline = true;
1125 	}
1126 
1127 	if (cap_fmode >= 0) {
1128 		if (!info_caps)
1129 			pr_warn("mds issued no caps on %llx.%llx\n",
1130 				ceph_vinop(inode));
1131 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1132 	}
1133 
1134 	spin_unlock(&ci->i_ceph_lock);
1135 
1136 	ceph_fscache_register_inode_cookie(inode);
1137 
1138 	if (fill_inline)
1139 		ceph_fill_inline_data(inode, locked_page,
1140 				      iinfo->inline_data, iinfo->inline_len);
1141 
1142 	if (wake)
1143 		wake_up_all(&ci->i_cap_wq);
1144 
1145 	/* queue truncate if we saw i_size decrease */
1146 	if (queue_trunc)
1147 		ceph_queue_vmtruncate(inode);
1148 
1149 	/* populate frag tree */
1150 	if (S_ISDIR(inode->i_mode))
1151 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1152 
1153 	/* update delegation info? */
1154 	if (dirinfo)
1155 		ceph_fill_dirfrag(inode, dirinfo);
1156 
1157 	err = 0;
1158 out:
1159 	if (new_cap)
1160 		ceph_put_cap(mdsc, new_cap);
1161 	ceph_buffer_put(old_blob);
1162 	ceph_buffer_put(xattr_blob);
1163 	ceph_put_string(pool_ns);
1164 	return err;
1165 }
1166 
1167 /*
1168  * caller should hold session s_mutex and dentry->d_lock.
1169  */
1170 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1171 				  struct ceph_mds_reply_lease *lease,
1172 				  struct ceph_mds_session *session,
1173 				  unsigned long from_time,
1174 				  struct ceph_mds_session **old_lease_session)
1175 {
1176 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1177 	unsigned mask = le16_to_cpu(lease->mask);
1178 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1179 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1180 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1181 
1182 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1183 	     dentry, duration, ttl);
1184 
1185 	/* only track leases on regular dentries */
1186 	if (ceph_snap(dir) != CEPH_NOSNAP)
1187 		return;
1188 
1189 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1190 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1191 	else
1192 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1193 
1194 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1195 	if (!(mask & CEPH_LEASE_VALID)) {
1196 		__ceph_dentry_dir_lease_touch(di);
1197 		return;
1198 	}
1199 
1200 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1201 	    time_before(ttl, di->time))
1202 		return;  /* we already have a newer lease. */
1203 
1204 	if (di->lease_session && di->lease_session != session) {
1205 		*old_lease_session = di->lease_session;
1206 		di->lease_session = NULL;
1207 	}
1208 
1209 	if (!di->lease_session)
1210 		di->lease_session = ceph_get_mds_session(session);
1211 	di->lease_gen = atomic_read(&session->s_cap_gen);
1212 	di->lease_seq = le32_to_cpu(lease->seq);
1213 	di->lease_renew_after = half_ttl;
1214 	di->lease_renew_from = 0;
1215 	di->time = ttl;
1216 
1217 	__ceph_dentry_lease_touch(di);
1218 }
1219 
1220 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1221 					struct ceph_mds_reply_lease *lease,
1222 					struct ceph_mds_session *session,
1223 					unsigned long from_time)
1224 {
1225 	struct ceph_mds_session *old_lease_session = NULL;
1226 	spin_lock(&dentry->d_lock);
1227 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1228 			      &old_lease_session);
1229 	spin_unlock(&dentry->d_lock);
1230 	ceph_put_mds_session(old_lease_session);
1231 }
1232 
1233 /*
1234  * update dentry lease without having parent inode locked
1235  */
1236 static void update_dentry_lease_careful(struct dentry *dentry,
1237 					struct ceph_mds_reply_lease *lease,
1238 					struct ceph_mds_session *session,
1239 					unsigned long from_time,
1240 					char *dname, u32 dname_len,
1241 					struct ceph_vino *pdvino,
1242 					struct ceph_vino *ptvino)
1243 
1244 {
1245 	struct inode *dir;
1246 	struct ceph_mds_session *old_lease_session = NULL;
1247 
1248 	spin_lock(&dentry->d_lock);
1249 	/* make sure dentry's name matches target */
1250 	if (dentry->d_name.len != dname_len ||
1251 	    memcmp(dentry->d_name.name, dname, dname_len))
1252 		goto out_unlock;
1253 
1254 	dir = d_inode(dentry->d_parent);
1255 	/* make sure parent matches dvino */
1256 	if (!ceph_ino_compare(dir, pdvino))
1257 		goto out_unlock;
1258 
1259 	/* make sure dentry's inode matches target. NULL ptvino means that
1260 	 * we expect a negative dentry */
1261 	if (ptvino) {
1262 		if (d_really_is_negative(dentry))
1263 			goto out_unlock;
1264 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1265 			goto out_unlock;
1266 	} else {
1267 		if (d_really_is_positive(dentry))
1268 			goto out_unlock;
1269 	}
1270 
1271 	__update_dentry_lease(dir, dentry, lease, session,
1272 			      from_time, &old_lease_session);
1273 out_unlock:
1274 	spin_unlock(&dentry->d_lock);
1275 	ceph_put_mds_session(old_lease_session);
1276 }
1277 
1278 /*
1279  * splice a dentry to an inode.
1280  * caller must hold directory i_rwsem for this to be safe.
1281  */
1282 static int splice_dentry(struct dentry **pdn, struct inode *in)
1283 {
1284 	struct dentry *dn = *pdn;
1285 	struct dentry *realdn;
1286 
1287 	BUG_ON(d_inode(dn));
1288 
1289 	if (S_ISDIR(in->i_mode)) {
1290 		/* If inode is directory, d_splice_alias() below will remove
1291 		 * 'realdn' from its origin parent. We need to ensure that
1292 		 * origin parent's readdir cache will not reference 'realdn'
1293 		 */
1294 		realdn = d_find_any_alias(in);
1295 		if (realdn) {
1296 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1297 			spin_lock(&realdn->d_lock);
1298 
1299 			realdn->d_op->d_prune(realdn);
1300 
1301 			di->time = jiffies;
1302 			di->lease_shared_gen = 0;
1303 			di->offset = 0;
1304 
1305 			spin_unlock(&realdn->d_lock);
1306 			dput(realdn);
1307 		}
1308 	}
1309 
1310 	/* dn must be unhashed */
1311 	if (!d_unhashed(dn))
1312 		d_drop(dn);
1313 	realdn = d_splice_alias(in, dn);
1314 	if (IS_ERR(realdn)) {
1315 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1316 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
1317 		return PTR_ERR(realdn);
1318 	}
1319 
1320 	if (realdn) {
1321 		dout("dn %p (%d) spliced with %p (%d) "
1322 		     "inode %p ino %llx.%llx\n",
1323 		     dn, d_count(dn),
1324 		     realdn, d_count(realdn),
1325 		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
1326 		dput(dn);
1327 		*pdn = realdn;
1328 	} else {
1329 		BUG_ON(!ceph_dentry(dn));
1330 		dout("dn %p attached to %p ino %llx.%llx\n",
1331 		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1332 	}
1333 	return 0;
1334 }
1335 
1336 /*
1337  * Incorporate results into the local cache.  This is either just
1338  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1339  * after a lookup).
1340  *
1341  * A reply may contain
1342  *         a directory inode along with a dentry.
1343  *  and/or a target inode
1344  *
1345  * Called with snap_rwsem (read).
1346  */
1347 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1348 {
1349 	struct ceph_mds_session *session = req->r_session;
1350 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1351 	struct inode *in = NULL;
1352 	struct ceph_vino tvino, dvino;
1353 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1354 	int err = 0;
1355 
1356 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
1357 	     rinfo->head->is_dentry, rinfo->head->is_target);
1358 
1359 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1360 		dout("fill_trace reply is empty!\n");
1361 		if (rinfo->head->result == 0 && req->r_parent)
1362 			ceph_invalidate_dir_request(req);
1363 		return 0;
1364 	}
1365 
1366 	if (rinfo->head->is_dentry) {
1367 		struct inode *dir = req->r_parent;
1368 
1369 		if (dir) {
1370 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1371 					      rinfo->dirfrag, session, -1,
1372 					      &req->r_caps_reservation);
1373 			if (err < 0)
1374 				goto done;
1375 		} else {
1376 			WARN_ON_ONCE(1);
1377 		}
1378 
1379 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1380 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1381 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1382 			struct qstr dname;
1383 			struct dentry *dn, *parent;
1384 
1385 			BUG_ON(!rinfo->head->is_target);
1386 			BUG_ON(req->r_dentry);
1387 
1388 			parent = d_find_any_alias(dir);
1389 			BUG_ON(!parent);
1390 
1391 			dname.name = rinfo->dname;
1392 			dname.len = rinfo->dname_len;
1393 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1394 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1395 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1396 retry_lookup:
1397 			dn = d_lookup(parent, &dname);
1398 			dout("d_lookup on parent=%p name=%.*s got %p\n",
1399 			     parent, dname.len, dname.name, dn);
1400 
1401 			if (!dn) {
1402 				dn = d_alloc(parent, &dname);
1403 				dout("d_alloc %p '%.*s' = %p\n", parent,
1404 				     dname.len, dname.name, dn);
1405 				if (!dn) {
1406 					dput(parent);
1407 					err = -ENOMEM;
1408 					goto done;
1409 				}
1410 				err = 0;
1411 			} else if (d_really_is_positive(dn) &&
1412 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1413 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1414 				dout(" dn %p points to wrong inode %p\n",
1415 				     dn, d_inode(dn));
1416 				ceph_dir_clear_ordered(dir);
1417 				d_delete(dn);
1418 				dput(dn);
1419 				goto retry_lookup;
1420 			}
1421 
1422 			req->r_dentry = dn;
1423 			dput(parent);
1424 		}
1425 	}
1426 
1427 	if (rinfo->head->is_target) {
1428 		/* Should be filled in by handle_reply */
1429 		BUG_ON(!req->r_target_inode);
1430 
1431 		in = req->r_target_inode;
1432 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1433 				NULL, session,
1434 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1435 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1436 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1437 				&req->r_caps_reservation);
1438 		if (err < 0) {
1439 			pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1440 				in, ceph_vinop(in));
1441 			req->r_target_inode = NULL;
1442 			if (in->i_state & I_NEW)
1443 				discard_new_inode(in);
1444 			else
1445 				iput(in);
1446 			goto done;
1447 		}
1448 		if (in->i_state & I_NEW)
1449 			unlock_new_inode(in);
1450 	}
1451 
1452 	/*
1453 	 * ignore null lease/binding on snapdir ENOENT, or else we
1454 	 * will have trouble splicing in the virtual snapdir later
1455 	 */
1456 	if (rinfo->head->is_dentry &&
1457             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1458 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1459 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1460 					       fsc->mount_options->snapdir_name,
1461 					       req->r_dentry->d_name.len))) {
1462 		/*
1463 		 * lookup link rename   : null -> possibly existing inode
1464 		 * mknod symlink mkdir  : null -> new inode
1465 		 * unlink               : linked -> null
1466 		 */
1467 		struct inode *dir = req->r_parent;
1468 		struct dentry *dn = req->r_dentry;
1469 		bool have_dir_cap, have_lease;
1470 
1471 		BUG_ON(!dn);
1472 		BUG_ON(!dir);
1473 		BUG_ON(d_inode(dn->d_parent) != dir);
1474 
1475 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1476 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1477 
1478 		BUG_ON(ceph_ino(dir) != dvino.ino);
1479 		BUG_ON(ceph_snap(dir) != dvino.snap);
1480 
1481 		/* do we have a lease on the whole dir? */
1482 		have_dir_cap =
1483 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1484 			 CEPH_CAP_FILE_SHARED);
1485 
1486 		/* do we have a dn lease? */
1487 		have_lease = have_dir_cap ||
1488 			le32_to_cpu(rinfo->dlease->duration_ms);
1489 		if (!have_lease)
1490 			dout("fill_trace  no dentry lease or dir cap\n");
1491 
1492 		/* rename? */
1493 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1494 			struct inode *olddir = req->r_old_dentry_dir;
1495 			BUG_ON(!olddir);
1496 
1497 			dout(" src %p '%pd' dst %p '%pd'\n",
1498 			     req->r_old_dentry,
1499 			     req->r_old_dentry,
1500 			     dn, dn);
1501 			dout("fill_trace doing d_move %p -> %p\n",
1502 			     req->r_old_dentry, dn);
1503 
1504 			/* d_move screws up sibling dentries' offsets */
1505 			ceph_dir_clear_ordered(dir);
1506 			ceph_dir_clear_ordered(olddir);
1507 
1508 			d_move(req->r_old_dentry, dn);
1509 			dout(" src %p '%pd' dst %p '%pd'\n",
1510 			     req->r_old_dentry,
1511 			     req->r_old_dentry,
1512 			     dn, dn);
1513 
1514 			/* ensure target dentry is invalidated, despite
1515 			   rehashing bug in vfs_rename_dir */
1516 			ceph_invalidate_dentry_lease(dn);
1517 
1518 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1519 			     ceph_dentry(req->r_old_dentry)->offset);
1520 
1521 			/* swap r_dentry and r_old_dentry in case that
1522 			 * splice_dentry() gets called later. This is safe
1523 			 * because no other place will use them */
1524 			req->r_dentry = req->r_old_dentry;
1525 			req->r_old_dentry = dn;
1526 			dn = req->r_dentry;
1527 		}
1528 
1529 		/* null dentry? */
1530 		if (!rinfo->head->is_target) {
1531 			dout("fill_trace null dentry\n");
1532 			if (d_really_is_positive(dn)) {
1533 				dout("d_delete %p\n", dn);
1534 				ceph_dir_clear_ordered(dir);
1535 				d_delete(dn);
1536 			} else if (have_lease) {
1537 				if (d_unhashed(dn))
1538 					d_add(dn, NULL);
1539 			}
1540 
1541 			if (!d_unhashed(dn) && have_lease)
1542 				update_dentry_lease(dir, dn,
1543 						    rinfo->dlease, session,
1544 						    req->r_request_started);
1545 			goto done;
1546 		}
1547 
1548 		/* attach proper inode */
1549 		if (d_really_is_negative(dn)) {
1550 			ceph_dir_clear_ordered(dir);
1551 			ihold(in);
1552 			err = splice_dentry(&req->r_dentry, in);
1553 			if (err < 0)
1554 				goto done;
1555 			dn = req->r_dentry;  /* may have spliced */
1556 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1557 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1558 			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1559 			     ceph_vinop(in));
1560 			d_invalidate(dn);
1561 			have_lease = false;
1562 		}
1563 
1564 		if (have_lease) {
1565 			update_dentry_lease(dir, dn,
1566 					    rinfo->dlease, session,
1567 					    req->r_request_started);
1568 		}
1569 		dout(" final dn %p\n", dn);
1570 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1571 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1572 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1573 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1574 		struct inode *dir = req->r_parent;
1575 
1576 		/* fill out a snapdir LOOKUPSNAP dentry */
1577 		BUG_ON(!dir);
1578 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1579 		BUG_ON(!req->r_dentry);
1580 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1581 		ceph_dir_clear_ordered(dir);
1582 		ihold(in);
1583 		err = splice_dentry(&req->r_dentry, in);
1584 		if (err < 0)
1585 			goto done;
1586 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1587 		/* parent inode is not locked, be carefull */
1588 		struct ceph_vino *ptvino = NULL;
1589 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1590 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1591 		if (rinfo->head->is_target) {
1592 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1593 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1594 			ptvino = &tvino;
1595 		}
1596 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1597 					    session, req->r_request_started,
1598 					    rinfo->dname, rinfo->dname_len,
1599 					    &dvino, ptvino);
1600 	}
1601 done:
1602 	dout("fill_trace done err=%d\n", err);
1603 	return err;
1604 }
1605 
1606 /*
1607  * Prepopulate our cache with readdir results, leases, etc.
1608  */
1609 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1610 					   struct ceph_mds_session *session)
1611 {
1612 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1613 	int i, err = 0;
1614 
1615 	for (i = 0; i < rinfo->dir_nr; i++) {
1616 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1617 		struct ceph_vino vino;
1618 		struct inode *in;
1619 		int rc;
1620 
1621 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1622 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1623 
1624 		in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
1625 		if (IS_ERR(in)) {
1626 			err = PTR_ERR(in);
1627 			dout("new_inode badness got %d\n", err);
1628 			continue;
1629 		}
1630 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1631 				     -1, &req->r_caps_reservation);
1632 		if (rc < 0) {
1633 			pr_err("ceph_fill_inode badness on %p got %d\n",
1634 			       in, rc);
1635 			err = rc;
1636 			if (in->i_state & I_NEW) {
1637 				ihold(in);
1638 				discard_new_inode(in);
1639 			}
1640 		} else if (in->i_state & I_NEW) {
1641 			unlock_new_inode(in);
1642 		}
1643 
1644 		iput(in);
1645 	}
1646 
1647 	return err;
1648 }
1649 
1650 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1651 {
1652 	if (ctl->page) {
1653 		kunmap(ctl->page);
1654 		put_page(ctl->page);
1655 		ctl->page = NULL;
1656 	}
1657 }
1658 
1659 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1660 			      struct ceph_readdir_cache_control *ctl,
1661 			      struct ceph_mds_request *req)
1662 {
1663 	struct ceph_inode_info *ci = ceph_inode(dir);
1664 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1665 	unsigned idx = ctl->index % nsize;
1666 	pgoff_t pgoff = ctl->index / nsize;
1667 
1668 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1669 		ceph_readdir_cache_release(ctl);
1670 		if (idx == 0)
1671 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1672 		else
1673 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1674 		if (!ctl->page) {
1675 			ctl->index = -1;
1676 			return idx == 0 ? -ENOMEM : 0;
1677 		}
1678 		/* reading/filling the cache are serialized by
1679 		 * i_rwsem, no need to use page lock */
1680 		unlock_page(ctl->page);
1681 		ctl->dentries = kmap(ctl->page);
1682 		if (idx == 0)
1683 			memset(ctl->dentries, 0, PAGE_SIZE);
1684 	}
1685 
1686 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1687 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1688 		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1689 		ctl->dentries[idx] = dn;
1690 		ctl->index++;
1691 	} else {
1692 		dout("disable readdir cache\n");
1693 		ctl->index = -1;
1694 	}
1695 	return 0;
1696 }
1697 
1698 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1699 			     struct ceph_mds_session *session)
1700 {
1701 	struct dentry *parent = req->r_dentry;
1702 	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1703 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1704 	struct qstr dname;
1705 	struct dentry *dn;
1706 	struct inode *in;
1707 	int err = 0, skipped = 0, ret, i;
1708 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1709 	u32 last_hash = 0;
1710 	u32 fpos_offset;
1711 	struct ceph_readdir_cache_control cache_ctl = {};
1712 
1713 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1714 		return readdir_prepopulate_inodes_only(req, session);
1715 
1716 	if (rinfo->hash_order) {
1717 		if (req->r_path2) {
1718 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1719 						  req->r_path2,
1720 						  strlen(req->r_path2));
1721 			last_hash = ceph_frag_value(last_hash);
1722 		} else if (rinfo->offset_hash) {
1723 			/* mds understands offset_hash */
1724 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1725 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1726 		}
1727 	}
1728 
1729 	if (rinfo->dir_dir &&
1730 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1731 		dout("readdir_prepopulate got new frag %x -> %x\n",
1732 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1733 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1734 		if (!rinfo->hash_order)
1735 			req->r_readdir_offset = 2;
1736 	}
1737 
1738 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1739 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1740 		     rinfo->dir_nr, parent);
1741 	} else {
1742 		dout("readdir_prepopulate %d items under dn %p\n",
1743 		     rinfo->dir_nr, parent);
1744 		if (rinfo->dir_dir)
1745 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1746 
1747 		if (ceph_frag_is_leftmost(frag) &&
1748 		    req->r_readdir_offset == 2 &&
1749 		    !(rinfo->hash_order && last_hash)) {
1750 			/* note dir version at start of readdir so we can
1751 			 * tell if any dentries get dropped */
1752 			req->r_dir_release_cnt =
1753 				atomic64_read(&ci->i_release_count);
1754 			req->r_dir_ordered_cnt =
1755 				atomic64_read(&ci->i_ordered_count);
1756 			req->r_readdir_cache_idx = 0;
1757 		}
1758 	}
1759 
1760 	cache_ctl.index = req->r_readdir_cache_idx;
1761 	fpos_offset = req->r_readdir_offset;
1762 
1763 	/* FIXME: release caps/leases if error occurs */
1764 	for (i = 0; i < rinfo->dir_nr; i++) {
1765 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1766 		struct ceph_vino tvino;
1767 
1768 		dname.name = rde->name;
1769 		dname.len = rde->name_len;
1770 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1771 
1772 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1773 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1774 
1775 		if (rinfo->hash_order) {
1776 			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1777 						 rde->name, rde->name_len);
1778 			hash = ceph_frag_value(hash);
1779 			if (hash != last_hash)
1780 				fpos_offset = 2;
1781 			last_hash = hash;
1782 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1783 		} else {
1784 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1785 		}
1786 
1787 retry_lookup:
1788 		dn = d_lookup(parent, &dname);
1789 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1790 		     parent, dname.len, dname.name, dn);
1791 
1792 		if (!dn) {
1793 			dn = d_alloc(parent, &dname);
1794 			dout("d_alloc %p '%.*s' = %p\n", parent,
1795 			     dname.len, dname.name, dn);
1796 			if (!dn) {
1797 				dout("d_alloc badness\n");
1798 				err = -ENOMEM;
1799 				goto out;
1800 			}
1801 		} else if (d_really_is_positive(dn) &&
1802 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
1803 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
1804 			struct ceph_dentry_info *di = ceph_dentry(dn);
1805 			dout(" dn %p points to wrong inode %p\n",
1806 			     dn, d_inode(dn));
1807 
1808 			spin_lock(&dn->d_lock);
1809 			if (di->offset > 0 &&
1810 			    di->lease_shared_gen ==
1811 			    atomic_read(&ci->i_shared_gen)) {
1812 				__ceph_dir_clear_ordered(ci);
1813 				di->offset = 0;
1814 			}
1815 			spin_unlock(&dn->d_lock);
1816 
1817 			d_delete(dn);
1818 			dput(dn);
1819 			goto retry_lookup;
1820 		}
1821 
1822 		/* inode */
1823 		if (d_really_is_positive(dn)) {
1824 			in = d_inode(dn);
1825 		} else {
1826 			in = ceph_get_inode(parent->d_sb, tvino, NULL);
1827 			if (IS_ERR(in)) {
1828 				dout("new_inode badness\n");
1829 				d_drop(dn);
1830 				dput(dn);
1831 				err = PTR_ERR(in);
1832 				goto out;
1833 			}
1834 		}
1835 
1836 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1837 				      -1, &req->r_caps_reservation);
1838 		if (ret < 0) {
1839 			pr_err("ceph_fill_inode badness on %p\n", in);
1840 			if (d_really_is_negative(dn)) {
1841 				if (in->i_state & I_NEW) {
1842 					ihold(in);
1843 					discard_new_inode(in);
1844 				}
1845 				iput(in);
1846 			}
1847 			d_drop(dn);
1848 			err = ret;
1849 			goto next_item;
1850 		}
1851 		if (in->i_state & I_NEW)
1852 			unlock_new_inode(in);
1853 
1854 		if (d_really_is_negative(dn)) {
1855 			if (ceph_security_xattr_deadlock(in)) {
1856 				dout(" skip splicing dn %p to inode %p"
1857 				     " (security xattr deadlock)\n", dn, in);
1858 				iput(in);
1859 				skipped++;
1860 				goto next_item;
1861 			}
1862 
1863 			err = splice_dentry(&dn, in);
1864 			if (err < 0)
1865 				goto next_item;
1866 		}
1867 
1868 		ceph_dentry(dn)->offset = rde->offset;
1869 
1870 		update_dentry_lease(d_inode(parent), dn,
1871 				    rde->lease, req->r_session,
1872 				    req->r_request_started);
1873 
1874 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1875 			ret = fill_readdir_cache(d_inode(parent), dn,
1876 						 &cache_ctl, req);
1877 			if (ret < 0)
1878 				err = ret;
1879 		}
1880 next_item:
1881 		dput(dn);
1882 	}
1883 out:
1884 	if (err == 0 && skipped == 0) {
1885 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
1886 		req->r_readdir_cache_idx = cache_ctl.index;
1887 	}
1888 	ceph_readdir_cache_release(&cache_ctl);
1889 	dout("readdir_prepopulate done\n");
1890 	return err;
1891 }
1892 
1893 bool ceph_inode_set_size(struct inode *inode, loff_t size)
1894 {
1895 	struct ceph_inode_info *ci = ceph_inode(inode);
1896 	bool ret;
1897 
1898 	spin_lock(&ci->i_ceph_lock);
1899 	dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
1900 	i_size_write(inode, size);
1901 	ceph_fscache_update(inode);
1902 	inode->i_blocks = calc_inode_blocks(size);
1903 
1904 	ret = __ceph_should_report_size(ci);
1905 
1906 	spin_unlock(&ci->i_ceph_lock);
1907 
1908 	return ret;
1909 }
1910 
1911 void ceph_queue_inode_work(struct inode *inode, int work_bit)
1912 {
1913 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1914 	struct ceph_inode_info *ci = ceph_inode(inode);
1915 	set_bit(work_bit, &ci->i_work_mask);
1916 
1917 	ihold(inode);
1918 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
1919 		dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
1920 	} else {
1921 		dout("queue_inode_work %p already queued, mask=%lx\n",
1922 		     inode, ci->i_work_mask);
1923 		iput(inode);
1924 	}
1925 }
1926 
1927 static void ceph_do_invalidate_pages(struct inode *inode)
1928 {
1929 	struct ceph_inode_info *ci = ceph_inode(inode);
1930 	u32 orig_gen;
1931 	int check = 0;
1932 
1933 	ceph_fscache_invalidate(inode, false);
1934 
1935 	mutex_lock(&ci->i_truncate_mutex);
1936 
1937 	if (ceph_inode_is_shutdown(inode)) {
1938 		pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
1939 				    __func__, ceph_vinop(inode));
1940 		mapping_set_error(inode->i_mapping, -EIO);
1941 		truncate_pagecache(inode, 0);
1942 		mutex_unlock(&ci->i_truncate_mutex);
1943 		goto out;
1944 	}
1945 
1946 	spin_lock(&ci->i_ceph_lock);
1947 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1948 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1949 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1950 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1951 			check = 1;
1952 		spin_unlock(&ci->i_ceph_lock);
1953 		mutex_unlock(&ci->i_truncate_mutex);
1954 		goto out;
1955 	}
1956 	orig_gen = ci->i_rdcache_gen;
1957 	spin_unlock(&ci->i_ceph_lock);
1958 
1959 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1960 		pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
1961 		       ceph_vinop(inode));
1962 	}
1963 
1964 	spin_lock(&ci->i_ceph_lock);
1965 	if (orig_gen == ci->i_rdcache_gen &&
1966 	    orig_gen == ci->i_rdcache_revoking) {
1967 		dout("invalidate_pages %p gen %d successful\n", inode,
1968 		     ci->i_rdcache_gen);
1969 		ci->i_rdcache_revoking--;
1970 		check = 1;
1971 	} else {
1972 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1973 		     inode, orig_gen, ci->i_rdcache_gen,
1974 		     ci->i_rdcache_revoking);
1975 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1976 			check = 1;
1977 	}
1978 	spin_unlock(&ci->i_ceph_lock);
1979 	mutex_unlock(&ci->i_truncate_mutex);
1980 out:
1981 	if (check)
1982 		ceph_check_caps(ci, 0);
1983 }
1984 
1985 /*
1986  * Make sure any pending truncation is applied before doing anything
1987  * that may depend on it.
1988  */
1989 void __ceph_do_pending_vmtruncate(struct inode *inode)
1990 {
1991 	struct ceph_inode_info *ci = ceph_inode(inode);
1992 	u64 to;
1993 	int wrbuffer_refs, finish = 0;
1994 
1995 	mutex_lock(&ci->i_truncate_mutex);
1996 retry:
1997 	spin_lock(&ci->i_ceph_lock);
1998 	if (ci->i_truncate_pending == 0) {
1999 		dout("__do_pending_vmtruncate %p none pending\n", inode);
2000 		spin_unlock(&ci->i_ceph_lock);
2001 		mutex_unlock(&ci->i_truncate_mutex);
2002 		return;
2003 	}
2004 
2005 	/*
2006 	 * make sure any dirty snapped pages are flushed before we
2007 	 * possibly truncate them.. so write AND block!
2008 	 */
2009 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
2010 		spin_unlock(&ci->i_ceph_lock);
2011 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
2012 		     inode);
2013 		filemap_write_and_wait_range(&inode->i_data, 0,
2014 					     inode->i_sb->s_maxbytes);
2015 		goto retry;
2016 	}
2017 
2018 	/* there should be no reader or writer */
2019 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
2020 
2021 	to = ci->i_truncate_size;
2022 	wrbuffer_refs = ci->i_wrbuffer_ref;
2023 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
2024 	     ci->i_truncate_pending, to);
2025 	spin_unlock(&ci->i_ceph_lock);
2026 
2027 	ceph_fscache_resize(inode, to);
2028 	truncate_pagecache(inode, to);
2029 
2030 	spin_lock(&ci->i_ceph_lock);
2031 	if (to == ci->i_truncate_size) {
2032 		ci->i_truncate_pending = 0;
2033 		finish = 1;
2034 	}
2035 	spin_unlock(&ci->i_ceph_lock);
2036 	if (!finish)
2037 		goto retry;
2038 
2039 	mutex_unlock(&ci->i_truncate_mutex);
2040 
2041 	if (wrbuffer_refs == 0)
2042 		ceph_check_caps(ci, 0);
2043 
2044 	wake_up_all(&ci->i_cap_wq);
2045 }
2046 
2047 static void ceph_inode_work(struct work_struct *work)
2048 {
2049 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2050 						 i_work);
2051 	struct inode *inode = &ci->netfs.inode;
2052 
2053 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2054 		dout("writeback %p\n", inode);
2055 		filemap_fdatawrite(&inode->i_data);
2056 	}
2057 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2058 		ceph_do_invalidate_pages(inode);
2059 
2060 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2061 		__ceph_do_pending_vmtruncate(inode);
2062 
2063 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
2064 		ceph_check_caps(ci, 0);
2065 
2066 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2067 		ceph_flush_snaps(ci, NULL);
2068 
2069 	iput(inode);
2070 }
2071 
2072 /*
2073  * symlinks
2074  */
2075 static const struct inode_operations ceph_symlink_iops = {
2076 	.get_link = simple_get_link,
2077 	.setattr = ceph_setattr,
2078 	.getattr = ceph_getattr,
2079 	.listxattr = ceph_listxattr,
2080 };
2081 
2082 int __ceph_setattr(struct inode *inode, struct iattr *attr)
2083 {
2084 	struct ceph_inode_info *ci = ceph_inode(inode);
2085 	unsigned int ia_valid = attr->ia_valid;
2086 	struct ceph_mds_request *req;
2087 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2088 	struct ceph_cap_flush *prealloc_cf;
2089 	int issued;
2090 	int release = 0, dirtied = 0;
2091 	int mask = 0;
2092 	int err = 0;
2093 	int inode_dirty_flags = 0;
2094 	bool lock_snap_rwsem = false;
2095 
2096 	prealloc_cf = ceph_alloc_cap_flush();
2097 	if (!prealloc_cf)
2098 		return -ENOMEM;
2099 
2100 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2101 				       USE_AUTH_MDS);
2102 	if (IS_ERR(req)) {
2103 		ceph_free_cap_flush(prealloc_cf);
2104 		return PTR_ERR(req);
2105 	}
2106 
2107 	spin_lock(&ci->i_ceph_lock);
2108 	issued = __ceph_caps_issued(ci, NULL);
2109 
2110 	if (!ci->i_head_snapc &&
2111 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2112 		lock_snap_rwsem = true;
2113 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2114 			spin_unlock(&ci->i_ceph_lock);
2115 			down_read(&mdsc->snap_rwsem);
2116 			spin_lock(&ci->i_ceph_lock);
2117 			issued = __ceph_caps_issued(ci, NULL);
2118 		}
2119 	}
2120 
2121 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2122 
2123 	if (ia_valid & ATTR_UID) {
2124 		dout("setattr %p uid %d -> %d\n", inode,
2125 		     from_kuid(&init_user_ns, inode->i_uid),
2126 		     from_kuid(&init_user_ns, attr->ia_uid));
2127 		if (issued & CEPH_CAP_AUTH_EXCL) {
2128 			inode->i_uid = attr->ia_uid;
2129 			dirtied |= CEPH_CAP_AUTH_EXCL;
2130 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2131 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
2132 			req->r_args.setattr.uid = cpu_to_le32(
2133 				from_kuid(&init_user_ns, attr->ia_uid));
2134 			mask |= CEPH_SETATTR_UID;
2135 			release |= CEPH_CAP_AUTH_SHARED;
2136 		}
2137 	}
2138 	if (ia_valid & ATTR_GID) {
2139 		dout("setattr %p gid %d -> %d\n", inode,
2140 		     from_kgid(&init_user_ns, inode->i_gid),
2141 		     from_kgid(&init_user_ns, attr->ia_gid));
2142 		if (issued & CEPH_CAP_AUTH_EXCL) {
2143 			inode->i_gid = attr->ia_gid;
2144 			dirtied |= CEPH_CAP_AUTH_EXCL;
2145 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2146 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
2147 			req->r_args.setattr.gid = cpu_to_le32(
2148 				from_kgid(&init_user_ns, attr->ia_gid));
2149 			mask |= CEPH_SETATTR_GID;
2150 			release |= CEPH_CAP_AUTH_SHARED;
2151 		}
2152 	}
2153 	if (ia_valid & ATTR_MODE) {
2154 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2155 		     attr->ia_mode);
2156 		if (issued & CEPH_CAP_AUTH_EXCL) {
2157 			inode->i_mode = attr->ia_mode;
2158 			dirtied |= CEPH_CAP_AUTH_EXCL;
2159 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2160 			   attr->ia_mode != inode->i_mode) {
2161 			inode->i_mode = attr->ia_mode;
2162 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2163 			mask |= CEPH_SETATTR_MODE;
2164 			release |= CEPH_CAP_AUTH_SHARED;
2165 		}
2166 	}
2167 
2168 	if (ia_valid & ATTR_ATIME) {
2169 		dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2170 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2171 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2172 		if (issued & CEPH_CAP_FILE_EXCL) {
2173 			ci->i_time_warp_seq++;
2174 			inode->i_atime = attr->ia_atime;
2175 			dirtied |= CEPH_CAP_FILE_EXCL;
2176 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2177 			   timespec64_compare(&inode->i_atime,
2178 					    &attr->ia_atime) < 0) {
2179 			inode->i_atime = attr->ia_atime;
2180 			dirtied |= CEPH_CAP_FILE_WR;
2181 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2182 			   !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2183 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2184 					       &attr->ia_atime);
2185 			mask |= CEPH_SETATTR_ATIME;
2186 			release |= CEPH_CAP_FILE_SHARED |
2187 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2188 		}
2189 	}
2190 	if (ia_valid & ATTR_SIZE) {
2191 		loff_t isize = i_size_read(inode);
2192 
2193 		dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
2194 		if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2195 			if (attr->ia_size > isize) {
2196 				i_size_write(inode, attr->ia_size);
2197 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2198 				ci->i_reported_size = attr->ia_size;
2199 				dirtied |= CEPH_CAP_FILE_EXCL;
2200 				ia_valid |= ATTR_MTIME;
2201 			}
2202 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2203 			   attr->ia_size != isize) {
2204 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2205 			req->r_args.setattr.old_size = cpu_to_le64(isize);
2206 			mask |= CEPH_SETATTR_SIZE;
2207 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2208 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2209 		}
2210 	}
2211 	if (ia_valid & ATTR_MTIME) {
2212 		dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2213 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2214 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2215 		if (issued & CEPH_CAP_FILE_EXCL) {
2216 			ci->i_time_warp_seq++;
2217 			inode->i_mtime = attr->ia_mtime;
2218 			dirtied |= CEPH_CAP_FILE_EXCL;
2219 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2220 			   timespec64_compare(&inode->i_mtime,
2221 					    &attr->ia_mtime) < 0) {
2222 			inode->i_mtime = attr->ia_mtime;
2223 			dirtied |= CEPH_CAP_FILE_WR;
2224 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2225 			   !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2226 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2227 					       &attr->ia_mtime);
2228 			mask |= CEPH_SETATTR_MTIME;
2229 			release |= CEPH_CAP_FILE_SHARED |
2230 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2231 		}
2232 	}
2233 
2234 	/* these do nothing */
2235 	if (ia_valid & ATTR_CTIME) {
2236 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2237 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2238 		dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2239 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2240 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2241 		     only ? "ctime only" : "ignored");
2242 		if (only) {
2243 			/*
2244 			 * if kernel wants to dirty ctime but nothing else,
2245 			 * we need to choose a cap to dirty under, or do
2246 			 * a almost-no-op setattr
2247 			 */
2248 			if (issued & CEPH_CAP_AUTH_EXCL)
2249 				dirtied |= CEPH_CAP_AUTH_EXCL;
2250 			else if (issued & CEPH_CAP_FILE_EXCL)
2251 				dirtied |= CEPH_CAP_FILE_EXCL;
2252 			else if (issued & CEPH_CAP_XATTR_EXCL)
2253 				dirtied |= CEPH_CAP_XATTR_EXCL;
2254 			else
2255 				mask |= CEPH_SETATTR_CTIME;
2256 		}
2257 	}
2258 	if (ia_valid & ATTR_FILE)
2259 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2260 
2261 	if (dirtied) {
2262 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2263 							   &prealloc_cf);
2264 		inode->i_ctime = attr->ia_ctime;
2265 		inode_inc_iversion_raw(inode);
2266 	}
2267 
2268 	release &= issued;
2269 	spin_unlock(&ci->i_ceph_lock);
2270 	if (lock_snap_rwsem)
2271 		up_read(&mdsc->snap_rwsem);
2272 
2273 	if (inode_dirty_flags)
2274 		__mark_inode_dirty(inode, inode_dirty_flags);
2275 
2276 	if (mask) {
2277 		req->r_inode = inode;
2278 		ihold(inode);
2279 		req->r_inode_drop = release;
2280 		req->r_args.setattr.mask = cpu_to_le32(mask);
2281 		req->r_num_caps = 1;
2282 		req->r_stamp = attr->ia_ctime;
2283 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2284 	}
2285 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2286 	     ceph_cap_string(dirtied), mask);
2287 
2288 	ceph_mdsc_put_request(req);
2289 	ceph_free_cap_flush(prealloc_cf);
2290 
2291 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2292 		__ceph_do_pending_vmtruncate(inode);
2293 
2294 	return err;
2295 }
2296 
2297 /*
2298  * setattr
2299  */
2300 int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2301 		 struct iattr *attr)
2302 {
2303 	struct inode *inode = d_inode(dentry);
2304 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2305 	int err;
2306 
2307 	if (ceph_snap(inode) != CEPH_NOSNAP)
2308 		return -EROFS;
2309 
2310 	if (ceph_inode_is_shutdown(inode))
2311 		return -ESTALE;
2312 
2313 	err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
2314 	if (err != 0)
2315 		return err;
2316 
2317 	if ((attr->ia_valid & ATTR_SIZE) &&
2318 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2319 		return -EFBIG;
2320 
2321 	if ((attr->ia_valid & ATTR_SIZE) &&
2322 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2323 		return -EDQUOT;
2324 
2325 	err = __ceph_setattr(inode, attr);
2326 
2327 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2328 		err = posix_acl_chmod(&nop_mnt_idmap, dentry, attr->ia_mode);
2329 
2330 	return err;
2331 }
2332 
2333 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2334 {
2335 	int issued = ceph_caps_issued(ceph_inode(inode));
2336 
2337 	/*
2338 	 * If any 'x' caps is issued we can just choose the auth MDS
2339 	 * instead of the random replica MDSes. Because only when the
2340 	 * Locker is in LOCK_EXEC state will the loner client could
2341 	 * get the 'x' caps. And if we send the getattr requests to
2342 	 * any replica MDS it must auth pin and tries to rdlock from
2343 	 * the auth MDS, and then the auth MDS need to do the Locker
2344 	 * state transition to LOCK_SYNC. And after that the lock state
2345 	 * will change back.
2346 	 *
2347 	 * This cost much when doing the Locker state transition and
2348 	 * usually will need to revoke caps from clients.
2349 	 *
2350 	 * And for the 'Xs' caps for getxattr we will also choose the
2351 	 * auth MDS, because the MDS side code is buggy due to setxattr
2352 	 * won't notify the replica MDSes when the values changed and
2353 	 * the replica MDS will return the old values. Though we will
2354 	 * fix it in MDS code, but this still makes sense for old ceph.
2355 	 */
2356 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2357 	    || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2358 		return USE_AUTH_MDS;
2359 	else
2360 		return USE_ANY_MDS;
2361 }
2362 
2363 /*
2364  * Verify that we have a lease on the given mask.  If not,
2365  * do a getattr against an mds.
2366  */
2367 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2368 		      int mask, bool force)
2369 {
2370 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2371 	struct ceph_mds_client *mdsc = fsc->mdsc;
2372 	struct ceph_mds_request *req;
2373 	int mode;
2374 	int err;
2375 
2376 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2377 		dout("do_getattr inode %p SNAPDIR\n", inode);
2378 		return 0;
2379 	}
2380 
2381 	dout("do_getattr inode %p mask %s mode 0%o\n",
2382 	     inode, ceph_cap_string(mask), inode->i_mode);
2383 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2384 			return 0;
2385 
2386 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2387 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2388 	if (IS_ERR(req))
2389 		return PTR_ERR(req);
2390 	req->r_inode = inode;
2391 	ihold(inode);
2392 	req->r_num_caps = 1;
2393 	req->r_args.getattr.mask = cpu_to_le32(mask);
2394 	req->r_locked_page = locked_page;
2395 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2396 	if (locked_page && err == 0) {
2397 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2398 		if (inline_version == 0) {
2399 			/* the reply is supposed to contain inline data */
2400 			err = -EINVAL;
2401 		} else if (inline_version == CEPH_INLINE_NONE ||
2402 			   inline_version == 1) {
2403 			err = -ENODATA;
2404 		} else {
2405 			err = req->r_reply_info.targeti.inline_len;
2406 		}
2407 	}
2408 	ceph_mdsc_put_request(req);
2409 	dout("do_getattr result=%d\n", err);
2410 	return err;
2411 }
2412 
2413 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
2414 		      size_t size)
2415 {
2416 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2417 	struct ceph_mds_client *mdsc = fsc->mdsc;
2418 	struct ceph_mds_request *req;
2419 	int mode = USE_AUTH_MDS;
2420 	int err;
2421 	char *xattr_value;
2422 	size_t xattr_value_len;
2423 
2424 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
2425 	if (IS_ERR(req)) {
2426 		err = -ENOMEM;
2427 		goto out;
2428 	}
2429 
2430 	req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
2431 	req->r_path2 = kstrdup(name, GFP_NOFS);
2432 	if (!req->r_path2) {
2433 		err = -ENOMEM;
2434 		goto put;
2435 	}
2436 
2437 	ihold(inode);
2438 	req->r_inode = inode;
2439 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2440 	if (err < 0)
2441 		goto put;
2442 
2443 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
2444 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
2445 
2446 	dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
2447 
2448 	err = (int)xattr_value_len;
2449 	if (size == 0)
2450 		goto put;
2451 
2452 	if (xattr_value_len > size) {
2453 		err = -ERANGE;
2454 		goto put;
2455 	}
2456 
2457 	memcpy(value, xattr_value, xattr_value_len);
2458 put:
2459 	ceph_mdsc_put_request(req);
2460 out:
2461 	dout("do_getvxattr result=%d\n", err);
2462 	return err;
2463 }
2464 
2465 
2466 /*
2467  * Check inode permissions.  We verify we have a valid value for
2468  * the AUTH cap, then call the generic handler.
2469  */
2470 int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
2471 		    int mask)
2472 {
2473 	int err;
2474 
2475 	if (mask & MAY_NOT_BLOCK)
2476 		return -ECHILD;
2477 
2478 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2479 
2480 	if (!err)
2481 		err = generic_permission(&nop_mnt_idmap, inode, mask);
2482 	return err;
2483 }
2484 
2485 /* Craft a mask of needed caps given a set of requested statx attrs. */
2486 static int statx_to_caps(u32 want, umode_t mode)
2487 {
2488 	int mask = 0;
2489 
2490 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
2491 		mask |= CEPH_CAP_AUTH_SHARED;
2492 
2493 	if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
2494 		/*
2495 		 * The link count for directories depends on inode->i_subdirs,
2496 		 * and that is only updated when Fs caps are held.
2497 		 */
2498 		if (S_ISDIR(mode))
2499 			mask |= CEPH_CAP_FILE_SHARED;
2500 		else
2501 			mask |= CEPH_CAP_LINK_SHARED;
2502 	}
2503 
2504 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
2505 		mask |= CEPH_CAP_FILE_SHARED;
2506 
2507 	if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
2508 		mask |= CEPH_CAP_XATTR_SHARED;
2509 
2510 	return mask;
2511 }
2512 
2513 /*
2514  * Get all the attributes. If we have sufficient caps for the requested attrs,
2515  * then we can avoid talking to the MDS at all.
2516  */
2517 int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
2518 		 struct kstat *stat, u32 request_mask, unsigned int flags)
2519 {
2520 	struct inode *inode = d_inode(path->dentry);
2521 	struct super_block *sb = inode->i_sb;
2522 	struct ceph_inode_info *ci = ceph_inode(inode);
2523 	u32 valid_mask = STATX_BASIC_STATS;
2524 	int err = 0;
2525 
2526 	if (ceph_inode_is_shutdown(inode))
2527 		return -ESTALE;
2528 
2529 	/* Skip the getattr altogether if we're asked not to sync */
2530 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
2531 		err = ceph_do_getattr(inode,
2532 				statx_to_caps(request_mask, inode->i_mode),
2533 				flags & AT_STATX_FORCE_SYNC);
2534 		if (err)
2535 			return err;
2536 	}
2537 
2538 	generic_fillattr(&nop_mnt_idmap, inode, stat);
2539 	stat->ino = ceph_present_inode(inode);
2540 
2541 	/*
2542 	 * btime on newly-allocated inodes is 0, so if this is still set to
2543 	 * that, then assume that it's not valid.
2544 	 */
2545 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2546 		stat->btime = ci->i_btime;
2547 		valid_mask |= STATX_BTIME;
2548 	}
2549 
2550 	if (request_mask & STATX_CHANGE_COOKIE) {
2551 		stat->change_cookie = inode_peek_iversion_raw(inode);
2552 		valid_mask |= STATX_CHANGE_COOKIE;
2553 	}
2554 
2555 	if (ceph_snap(inode) == CEPH_NOSNAP)
2556 		stat->dev = sb->s_dev;
2557 	else
2558 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2559 
2560 	if (S_ISDIR(inode->i_mode)) {
2561 		if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) {
2562 			stat->size = ci->i_rbytes;
2563 		} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
2564 			struct ceph_inode_info *pci;
2565 			struct ceph_snap_realm *realm;
2566 			struct inode *parent;
2567 
2568 			parent = ceph_lookup_inode(sb, ceph_ino(inode));
2569 			if (IS_ERR(parent))
2570 				return PTR_ERR(parent);
2571 
2572 			pci = ceph_inode(parent);
2573 			spin_lock(&pci->i_ceph_lock);
2574 			realm = pci->i_snap_realm;
2575 			if (realm)
2576 				stat->size = realm->num_snaps;
2577 			else
2578 				stat->size = 0;
2579 			spin_unlock(&pci->i_ceph_lock);
2580 			iput(parent);
2581 		} else {
2582 			stat->size = ci->i_files + ci->i_subdirs;
2583 		}
2584 		stat->blocks = 0;
2585 		stat->blksize = 65536;
2586 		/*
2587 		 * Some applications rely on the number of st_nlink
2588 		 * value on directories to be either 0 (if unlinked)
2589 		 * or 2 + number of subdirectories.
2590 		 */
2591 		if (stat->nlink == 1)
2592 			/* '.' + '..' + subdirs */
2593 			stat->nlink = 1 + 1 + ci->i_subdirs;
2594 	}
2595 
2596 	stat->attributes_mask |= STATX_ATTR_CHANGE_MONOTONIC;
2597 	stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
2598 	stat->result_mask = request_mask & valid_mask;
2599 	return err;
2600 }
2601 
2602 void ceph_inode_shutdown(struct inode *inode)
2603 {
2604 	struct ceph_inode_info *ci = ceph_inode(inode);
2605 	struct rb_node *p;
2606 	int iputs = 0;
2607 	bool invalidate = false;
2608 
2609 	spin_lock(&ci->i_ceph_lock);
2610 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
2611 	p = rb_first(&ci->i_caps);
2612 	while (p) {
2613 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
2614 
2615 		p = rb_next(p);
2616 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
2617 	}
2618 	spin_unlock(&ci->i_ceph_lock);
2619 
2620 	if (invalidate)
2621 		ceph_queue_invalidate(inode);
2622 	while (iputs--)
2623 		iput(inode);
2624 }
2625